From: heister Date: Tue, 20 Nov 2012 23:20:45 +0000 (+0000) Subject: merge indentation manually X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=4c5f6be90a9263f35a521d476cf9464ff659529e;p=dealii-svn.git merge indentation manually git-svn-id: https://svn.dealii.org/branches/branch_merge_mg_into_dof_handler@27632 0785d39b-7218-0410-832d-ea1e28bc413d --- 4c5f6be90a9263f35a521d476cf9464ff659529e diff --cc deal.II/examples/step-13/step-13.cc index e8182e5bd0,8e527273d0..63e62cc64b --- a/deal.II/examples/step-13/step-13.cc +++ b/deal.II/examples/step-13/step-13.cc @@@ -168,15 -168,15 +168,15 @@@ namespace Step1 template class EvaluationBase { - public: - virtual ~EvaluationBase (); + public: + virtual ~EvaluationBase (); - void set_refinement_cycle (const unsigned int refinement_cycle); + void set_refinement_cycle (const unsigned int refinement_cycle); - virtual void operator () (const DoFHandler &dof_handler, - const Vector &solution) const = 0; - protected: - unsigned int refinement_cycle; + virtual void operator () (const DoFHandler &dof_handler, - const Vector &solution) const = 0; ++ const Vector &solution) const = 0; + protected: + unsigned int refinement_cycle; }; @@@ -252,20 -252,20 +252,20 @@@ template class PointValueEvaluation : public EvaluationBase { - public: - PointValueEvaluation (const Point &evaluation_point, - TableHandler &results_table); - - virtual void operator () (const DoFHandler &dof_handler, - const Vector &solution) const; - - DeclException1 (ExcEvaluationPointNotFound, - Point, - << "The evaluation point " << arg1 - << " was not found among the vertices of the present grid."); - private: - const Point evaluation_point; - TableHandler &results_table; + public: + PointValueEvaluation (const Point &evaluation_point, + TableHandler &results_table); + + virtual void operator () (const DoFHandler &dof_handler, - const Vector &solution) const; ++ const Vector &solution) const; + + DeclException1 (ExcEvaluationPointNotFound, + Point, + << "The evaluation point " << arg1 + << " was not found among the vertices of the present grid."); + private: + const Point evaluation_point; + TableHandler &results_table; }; @@@ -292,36 -292,36 +292,36 @@@ void PointValueEvaluation:: operator () (const DoFHandler &dof_handler, - const Vector &solution) const + const Vector &solution) const { - // First allocate a variable that - // will hold the point - // value. Initialize it with a - // value that is clearly bogus, - // so that if we fail to set it - // to a reasonable value, we will - // note at once. This may not be - // necessary in a function as - // small as this one, since we - // can easily see all possible - // paths of execution here, but - // it proved to be helpful for - // more complex cases, and so we - // employ this strategy here as - // well. + // First allocate a variable that + // will hold the point + // value. Initialize it with a + // value that is clearly bogus, + // so that if we fail to set it + // to a reasonable value, we will + // note at once. This may not be + // necessary in a function as + // small as this one, since we + // can easily see all possible + // paths of execution here, but + // it proved to be helpful for + // more complex cases, and so we + // employ this strategy here as + // well. double point_value = 1e20; - // Then loop over all cells and - // all their vertices, and check - // whether a vertex matches the - // evaluation point. If this is - // the case, then extract the - // point value, set a flag that - // we have found the point of - // interest, and exit the loop. + // Then loop over all cells and + // all their vertices, and check + // whether a vertex matches the + // evaluation point. If this is + // the case, then extract the + // point value, set a flag that + // we have found the point of + // interest, and exit the loop. typename DoFHandler::active_cell_iterator - cell = dof_handler.begin_active(), - endc = dof_handler.end(); + cell = dof_handler.begin_active(), + endc = dof_handler.end(); bool evaluation_point_found = false; for (; (cell!=endc) && !evaluation_point_found; ++cell) for (unsigned int vertex=0; @@@ -610,15 -610,15 +610,15 @@@ template class SolutionOutput : public EvaluationBase { - public: - SolutionOutput (const std::string &output_name_base, - const typename DataOut::OutputFormat output_format); - - virtual void operator () (const DoFHandler &dof_handler, - const Vector &solution) const; - private: - const std::string output_name_base; - const typename DataOut::OutputFormat output_format; + public: + SolutionOutput (const std::string &output_name_base, + const typename DataOut::OutputFormat output_format); + + virtual void operator () (const DoFHandler &dof_handler, - const Vector &solution) const; ++ const Vector &solution) const; + private: + const std::string output_name_base; + const typename DataOut::OutputFormat output_format; }; diff --cc deal.II/examples/step-14/step-14.cc index 4a6185ee26,9c58a26932..c1d4613872 --- a/deal.II/examples/step-14/step-14.cc +++ b/deal.II/examples/step-14/step-14.cc @@@ -72,15 -72,15 +72,15 @@@ namespace Step1 template class EvaluationBase { - public: - virtual ~EvaluationBase (); + public: + virtual ~EvaluationBase (); - void set_refinement_cycle (const unsigned int refinement_cycle); + void set_refinement_cycle (const unsigned int refinement_cycle); - virtual void operator () (const DoFHandler &dof_handler, - const Vector &solution) const = 0; - protected: - unsigned int refinement_cycle; + virtual void operator () (const DoFHandler &dof_handler, - const Vector &solution) const = 0; ++ const Vector &solution) const = 0; + protected: + unsigned int refinement_cycle; }; @@@ -102,18 -102,18 +102,18 @@@ template class PointValueEvaluation : public EvaluationBase { - public: - PointValueEvaluation (const Point &evaluation_point); + public: + PointValueEvaluation (const Point &evaluation_point); - virtual void operator () (const DoFHandler &dof_handler, - const Vector &solution) const; + virtual void operator () (const DoFHandler &dof_handler, - const Vector &solution) const; ++ const Vector &solution) const; - DeclException1 (ExcEvaluationPointNotFound, - Point, - << "The evaluation point " << arg1 - << " was not found among the vertices of the present grid."); - private: - const Point evaluation_point; + DeclException1 (ExcEvaluationPointNotFound, + Point, + << "The evaluation point " << arg1 + << " was not found among the vertices of the present grid."); + private: + const Point evaluation_point; }; @@@ -186,18 -186,18 +186,18 @@@ template class PointXDerivativeEvaluation : public EvaluationBase { - public: - PointXDerivativeEvaluation (const Point &evaluation_point); + public: + PointXDerivativeEvaluation (const Point &evaluation_point); - virtual void operator () (const DoFHandler &dof_handler, - const Vector &solution) const; + virtual void operator () (const DoFHandler &dof_handler, - const Vector &solution) const; ++ const Vector &solution) const; - DeclException1 (ExcEvaluationPointNotFound, - Point, - << "The evaluation point " << arg1 - << " was not found among the vertices of the present grid."); - private: - const Point evaluation_point; + DeclException1 (ExcEvaluationPointNotFound, + Point, + << "The evaluation point " << arg1 + << " was not found among the vertices of the present grid."); + private: + const Point evaluation_point; }; @@@ -216,19 -216,19 +216,19 @@@ void PointXDerivativeEvaluation:: operator () (const DoFHandler &dof_handler, - const Vector &solution) const + const Vector &solution) const { - // This time initialize the - // return value with something - // useful, since we will have to - // add up a number of - // contributions and take the - // mean value afterwards... + // This time initialize the + // return value with something + // useful, since we will have to + // add up a number of + // contributions and take the + // mean value afterwards... double point_derivative = 0; - // ...then have some objects of - // which the meaning wil become - // clear below... + // ...then have some objects of + // which the meaning wil become + // clear below... QTrapez vertex_quadrature; FEValues fe_values (dof_handler.get_fe(), vertex_quadrature, @@@ -399,13 -399,13 +399,13 @@@ template class GridOutput : public EvaluationBase { - public: - GridOutput (const std::string &output_name_base); + public: + GridOutput (const std::string &output_name_base); - virtual void operator () (const DoFHandler &dof_handler, - const Vector &solution) const; - private: - const std::string output_name_base; + virtual void operator () (const DoFHandler &dof_handler, - const Vector &solution) const; ++ const Vector &solution) const; + private: + const std::string output_name_base; }; diff --cc deal.II/examples/step-18/step-18.cc index e7e1cd304b,e1528caebb..c05759b764 --- a/deal.II/examples/step-18/step-18.cc +++ b/deal.II/examples/step-18/step-18.cc @@@ -845,11 -853,11 +853,11 @@@ namespace Step1 template inline void - BodyForce::vector_value (const Point & /*p*/, + BodyForce::vector_value (const Point &/*p*/, - Vector &values) const + Vector &values) const { Assert (values.size() == dim, - ExcDimensionMismatch (values.size(), dim)); + ExcDimensionMismatch (values.size(), dim)); const double g = 9.81; const double rho = 7700; @@@ -966,11 -974,11 +974,11 @@@ template void IncrementalBoundaryValues:: - vector_value (const Point & /*p*/, + vector_value (const Point &/*p*/, - Vector &values) const + Vector &values) const { Assert (values.size() == dim, - ExcDimensionMismatch (values.size(), dim)); + ExcDimensionMismatch (values.size(), dim)); values = 0; values(2) = -present_timestep * velocity; diff --cc deal.II/examples/step-20/step-20.cc index 8a3e1093f3,01303b4d7e..fd816b8be1 --- a/deal.II/examples/step-20/step-20.cc +++ b/deal.II/examples/step-20/step-20.cc @@@ -191,15 -191,15 +191,15 @@@ namespace Step2 }; - // And then we also have to define - // these respective functions, of - // course. Given our discussion in - // the introduction of how the - // solution should look like, the - // following computations should be - // straightforward: + // And then we also have to define + // these respective functions, of + // course. Given our discussion in + // the introduction of how the + // solution should look like, the + // following computations should be + // straightforward: template - double RightHandSide::value (const Point & /*p*/, + double RightHandSide::value (const Point &/*p*/, const unsigned int /*component*/) const { return 0; diff --cc deal.II/examples/step-28/step-28.cc index 64b0276c58,a3c477b375..0363c2dad6 --- a/deal.II/examples/step-28/step-28.cc +++ b/deal.II/examples/step-28/step-28.cc @@@ -1592,94 -1592,94 +1592,94 @@@ namespace Step2 template class NeutronDiffusionProblem { + public: + class Parameters + { public: - class Parameters - { - public: - Parameters (); - - static void declare_parameters (ParameterHandler &prm); - void get_parameters (ParameterHandler &prm); - - unsigned int n_groups; - unsigned int n_refinement_cycles; - - unsigned int fe_degree; - - double convergence_tolerance; - }; - - - - NeutronDiffusionProblem (const Parameters ¶meters); - ~NeutronDiffusionProblem (); - - void run (); - - private: - // @sect5{Private member functions} - - // There are not that many member - // functions in this class since - // most of the functionality has - // been moved into the - // EnergyGroup class - // and is simply called from the - // run() member - // function of this class. The - // ones that remain have - // self-explanatory names: - void initialize_problem(); - - void refine_grid (); - - double get_total_fission_source () const; - - - // @sect5{Private member variables} - - // Next, we have a few member - // variables. In particular, - // these are (i) a reference to - // the parameter object (owned by - // the main function of this - // program, and passed to the - // constructor of this class), - // (ii) an object describing the - // material parameters for the - // number of energy groups - // requested in the input file, - // and (iii) the finite element - // to be used by all energy - // groups: - const Parameters ¶meters; - const MaterialData material_data; - FE_Q fe; - - // Furthermore, we have (iv) the - // value of the computed - // eigenvalue at the present - // iteration. This is, in fact, - // the only part of the solution - // that is shared between all - // energy groups -- all other - // parts of the solution, such as - // neutron fluxes are particular - // to one or the other energy - // group, and are therefore - // stored in objects that - // describe a single energy - // group: - double k_eff; - - // Finally, (v), we have an array - // of pointers to the energy - // group objects. The length of - // this array is, of course, - // equal to the number of energy - // groups specified in the - // parameter file. - std::vector*> energy_groups; + Parameters (); + + static void declare_parameters (ParameterHandler &prm); + void get_parameters (ParameterHandler &prm); + + unsigned int n_groups; + unsigned int n_refinement_cycles; + + unsigned int fe_degree; + + double convergence_tolerance; + }; + + + + NeutronDiffusionProblem (const Parameters ¶meters); + ~NeutronDiffusionProblem (); + + void run (); + + private: + // @sect5{Private member functions} + + // There are not that many member + // functions in this class since + // most of the functionality has + // been moved into the + // EnergyGroup class + // and is simply called from the + // run() member + // function of this class. The + // ones that remain have + // self-explanatory names: + void initialize_problem(); + + void refine_grid (); + + double get_total_fission_source () const; + + + // @sect5{Private member variables} + + // Next, we have a few member + // variables. In particular, + // these are (i) a reference to + // the parameter object (owned by + // the main function of this + // program, and passed to the + // constructor of this class), + // (ii) an object describing the + // material parameters for the + // number of energy groups + // requested in the input file, + // and (iii) the finite element + // to be used by all energy + // groups: - const Parameters ¶meters; ++ const Parameters ¶meters; + const MaterialData material_data; + FE_Q fe; + + // Furthermore, we have (iv) the + // value of the computed + // eigenvalue at the present + // iteration. This is, in fact, + // the only part of the solution + // that is shared between all + // energy groups -- all other + // parts of the solution, such as + // neutron fluxes are particular + // to one or the other energy + // group, and are therefore + // stored in objects that + // describe a single energy + // group: + double k_eff; + + // Finally, (v), we have an array + // of pointers to the energy + // group objects. The length of + // this array is, of course, + // equal to the number of energy + // groups specified in the + // parameter file. + std::vector*> energy_groups; }; diff --cc deal.II/examples/step-29/step-29.cc index 183e15034a,4e0c83615e..4e4bff6dd9 --- a/deal.II/examples/step-29/step-29.cc +++ b/deal.II/examples/step-29/step-29.cc @@@ -572,20 -572,20 +572,20 @@@ namespace Step2 - // The constructor takes the - // ParameterHandler object and stores - // it in a reference. It also - // initializes the DoF-Handler and - // the finite element system, which - // consists of two copies of the - // scalar Q1 field, one for $v$ and - // one for $w$: + // The constructor takes the + // ParameterHandler object and stores + // it in a reference. It also + // initializes the DoF-Handler and + // the finite element system, which + // consists of two copies of the + // scalar Q1 field, one for $v$ and + // one for $w$: template - UltrasoundProblem::UltrasoundProblem (ParameterHandler& param) - : - prm(param), - dof_handler(triangulation), - fe(FE_Q(1), 2) - UltrasoundProblem::UltrasoundProblem (ParameterHandler ¶m) ++ UltrasoundProblem::UltrasoundProblem (ParameterHandler ¶m) + : + prm(param), + dof_handler(triangulation), + fe(FE_Q(1), 2) {} diff --cc deal.II/examples/step-31/step-31.cc index 1db324114d,39b6350bcf..033ff91776 --- a/deal.II/examples/step-31/step-31.cc +++ b/deal.II/examples/step-31/step-31.cc @@@ -192,11 -192,11 +192,11 @@@ namespace Step3 template double - TemperatureRightHandSide::value (const Point &p, + TemperatureRightHandSide::value (const Point &p, - const unsigned int component) const + const unsigned int component) const { Assert (component == 0, - ExcMessage ("Invalid operation for a scalar function.")); + ExcMessage ("Invalid operation for a scalar function.")); Assert ((dim==2) || (dim==3), ExcNotImplemented()); @@@ -488,15 -489,15 +489,15 @@@ template BlockSchurPreconditioner:: - BlockSchurPreconditioner(const TrilinosWrappers::BlockSparseMatrix &S, + BlockSchurPreconditioner(const TrilinosWrappers::BlockSparseMatrix &S, - const InverseMatrix &Mpinv, - const PreconditionerA &Apreconditioner) - : - stokes_matrix (&S), - m_inverse (&Mpinv), - a_preconditioner (Apreconditioner), - tmp (stokes_matrix->block(1,1).m()) + const InverseMatrix &Mpinv, + const PreconditionerA &Apreconditioner) + : + stokes_matrix (&S), + m_inverse (&Mpinv), + a_preconditioner (Apreconditioner), + tmp (stokes_matrix->block(1,1).m()) {} @@@ -581,80 -582,80 +582,80 @@@ template class BoussinesqFlowProblem { - public: - BoussinesqFlowProblem (); - void run (); + public: + BoussinesqFlowProblem (); + void run (); + + private: + void setup_dofs (); + void assemble_stokes_preconditioner (); + void build_stokes_preconditioner (); + void assemble_stokes_system (); + void assemble_temperature_system (const double maximal_velocity); + void assemble_temperature_matrix (); + double get_maximal_velocity () const; + std::pair get_extrapolated_temperature_range () const; + void solve (); + void output_results () const; + void refine_mesh (const unsigned int max_grid_level); - private: - void setup_dofs (); - void assemble_stokes_preconditioner (); - void build_stokes_preconditioner (); - void assemble_stokes_system (); - void assemble_temperature_system (const double maximal_velocity); - void assemble_temperature_matrix (); - double get_maximal_velocity () const; - std::pair get_extrapolated_temperature_range () const; - void solve (); - void output_results () const; - void refine_mesh (const unsigned int max_grid_level); - - double - compute_viscosity(const std::vector &old_temperature, - const std::vector &old_old_temperature, - const std::vector > &old_temperature_grads, - const std::vector > &old_old_temperature_grads, - const std::vector &old_temperature_laplacians, - const std::vector &old_old_temperature_laplacians, - const std::vector > &old_velocity_values, - const std::vector > &old_old_velocity_values, - const std::vector &gamma_values, - const double global_u_infty, - const double global_T_variation, - const double cell_diameter) const; - - - Triangulation triangulation; - double global_Omega_diameter; - - const unsigned int stokes_degree; - FESystem stokes_fe; - DoFHandler stokes_dof_handler; - ConstraintMatrix stokes_constraints; - - std::vector stokes_block_sizes; - TrilinosWrappers::BlockSparseMatrix stokes_matrix; - TrilinosWrappers::BlockSparseMatrix stokes_preconditioner_matrix; - - TrilinosWrappers::BlockVector stokes_solution; - TrilinosWrappers::BlockVector old_stokes_solution; - TrilinosWrappers::BlockVector stokes_rhs; - - - const unsigned int temperature_degree; - FE_Q temperature_fe; - DoFHandler temperature_dof_handler; - ConstraintMatrix temperature_constraints; - - TrilinosWrappers::SparseMatrix temperature_mass_matrix; - TrilinosWrappers::SparseMatrix temperature_stiffness_matrix; - TrilinosWrappers::SparseMatrix temperature_matrix; - - TrilinosWrappers::Vector temperature_solution; - TrilinosWrappers::Vector old_temperature_solution; - TrilinosWrappers::Vector old_old_temperature_solution; - TrilinosWrappers::Vector temperature_rhs; - - - double time_step; - double old_time_step; - unsigned int timestep_number; - - std_cxx1x::shared_ptr Amg_preconditioner; - std_cxx1x::shared_ptr Mp_preconditioner; - - bool rebuild_stokes_matrix; - bool rebuild_temperature_matrices; - bool rebuild_stokes_preconditioner; + double + compute_viscosity(const std::vector &old_temperature, + const std::vector &old_old_temperature, - const std::vector > &old_temperature_grads, - const std::vector > &old_old_temperature_grads, ++ const std::vector > &old_temperature_grads, ++ const std::vector > &old_old_temperature_grads, + const std::vector &old_temperature_laplacians, + const std::vector &old_old_temperature_laplacians, - const std::vector > &old_velocity_values, - const std::vector > &old_old_velocity_values, ++ const std::vector > &old_velocity_values, ++ const std::vector > &old_old_velocity_values, + const std::vector &gamma_values, + const double global_u_infty, + const double global_T_variation, + const double cell_diameter) const; + + + Triangulation triangulation; + double global_Omega_diameter; + + const unsigned int stokes_degree; + FESystem stokes_fe; + DoFHandler stokes_dof_handler; + ConstraintMatrix stokes_constraints; + + std::vector stokes_block_sizes; + TrilinosWrappers::BlockSparseMatrix stokes_matrix; + TrilinosWrappers::BlockSparseMatrix stokes_preconditioner_matrix; + + TrilinosWrappers::BlockVector stokes_solution; + TrilinosWrappers::BlockVector old_stokes_solution; + TrilinosWrappers::BlockVector stokes_rhs; + + + const unsigned int temperature_degree; + FE_Q temperature_fe; + DoFHandler temperature_dof_handler; + ConstraintMatrix temperature_constraints; + + TrilinosWrappers::SparseMatrix temperature_mass_matrix; + TrilinosWrappers::SparseMatrix temperature_stiffness_matrix; + TrilinosWrappers::SparseMatrix temperature_matrix; + + TrilinosWrappers::Vector temperature_solution; + TrilinosWrappers::Vector old_temperature_solution; + TrilinosWrappers::Vector old_old_temperature_solution; + TrilinosWrappers::Vector temperature_rhs; + + + double time_step; + double old_time_step; + unsigned int timestep_number; + + std_cxx1x::shared_ptr Amg_preconditioner; + std_cxx1x::shared_ptr Mp_preconditioner; + + bool rebuild_stokes_matrix; + bool rebuild_temperature_matrices; + bool rebuild_stokes_preconditioner; }; @@@ -961,17 -962,17 +962,17 @@@ double BoussinesqFlowProblem:: compute_viscosity (const std::vector &old_temperature, - const std::vector &old_old_temperature, - const std::vector > &old_temperature_grads, - const std::vector > &old_old_temperature_grads, - const std::vector &old_temperature_laplacians, - const std::vector &old_old_temperature_laplacians, - const std::vector > &old_velocity_values, - const std::vector > &old_old_velocity_values, - const std::vector &gamma_values, - const double global_u_infty, - const double global_T_variation, - const double cell_diameter) const + const std::vector &old_old_temperature, - const std::vector > &old_temperature_grads, - const std::vector > &old_old_temperature_grads, ++ const std::vector > &old_temperature_grads, ++ const std::vector > &old_old_temperature_grads, + const std::vector &old_temperature_laplacians, + const std::vector &old_old_temperature_laplacians, - const std::vector > &old_velocity_values, - const std::vector > &old_old_velocity_values, ++ const std::vector > &old_velocity_values, ++ const std::vector > &old_old_velocity_values, + const std::vector &gamma_values, + const double global_u_infty, + const double global_T_variation, + const double cell_diameter) const { const double beta = 0.015 * dim; const double alpha = 1; diff --cc deal.II/examples/step-32/step-32.cc index f75422b4d8,db3c7123d8..b81ffa873d --- a/deal.II/examples/step-32/step-32.cc +++ b/deal.II/examples/step-32/step-32.cc @@@ -177,8 -177,8 +177,8 @@@ namespace Step3 template double - TemperatureInitialValues::value (const Point &p, + TemperatureInitialValues::value (const Point &p, - const unsigned int) const + const unsigned int) const { const double r = p.norm(); const double h = R1-R0; @@@ -268,60 -268,60 +268,60 @@@ template class BlockSchurPreconditioner : public Subscriptor { - public: - BlockSchurPreconditioner (const TrilinosWrappers::BlockSparseMatrix &S, - const TrilinosWrappers::BlockSparseMatrix &Spre, - const PreconditionerMp &Mppreconditioner, - const PreconditionerA &Apreconditioner, - const bool do_solve_A) - : - stokes_matrix (&S), - stokes_preconditioner_matrix (&Spre), - mp_preconditioner (Mppreconditioner), - a_preconditioner (Apreconditioner), - do_solve_A (do_solve_A) - {} - - void vmult (TrilinosWrappers::MPI::BlockVector &dst, - const TrilinosWrappers::MPI::BlockVector &src) const - { - TrilinosWrappers::MPI::Vector utmp(src.block(0)); - - { - SolverControl solver_control(5000, 1e-6 * src.block(1).l2_norm()); - - SolverCG solver(solver_control); - - solver.solve(stokes_preconditioner_matrix->block(1,1), - dst.block(1), src.block(1), - mp_preconditioner); - - dst.block(1) *= -1.0; - } - - { - stokes_matrix->block(0,1).vmult(utmp, dst.block(1)); - utmp*=-1.0; - utmp.add(src.block(0)); - } - - if (do_solve_A == true) - { - SolverControl solver_control(5000, utmp.l2_norm()*1e-2); - TrilinosWrappers::SolverCG solver(solver_control); - solver.solve(stokes_matrix->block(0,0), dst.block(0), utmp, - a_preconditioner); - } - else - a_preconditioner.vmult (dst.block(0), utmp); - } - - private: - const SmartPointer stokes_matrix; - const SmartPointer stokes_preconditioner_matrix; - const PreconditionerMp &mp_preconditioner; - const PreconditionerA &a_preconditioner; - const bool do_solve_A; + public: - BlockSchurPreconditioner (const TrilinosWrappers::BlockSparseMatrix &S, - const TrilinosWrappers::BlockSparseMatrix &Spre, ++ BlockSchurPreconditioner (const TrilinosWrappers::BlockSparseMatrix &S, ++ const TrilinosWrappers::BlockSparseMatrix &Spre, + const PreconditionerMp &Mppreconditioner, + const PreconditionerA &Apreconditioner, + const bool do_solve_A) + : + stokes_matrix (&S), + stokes_preconditioner_matrix (&Spre), + mp_preconditioner (Mppreconditioner), + a_preconditioner (Apreconditioner), + do_solve_A (do_solve_A) + {} + + void vmult (TrilinosWrappers::MPI::BlockVector &dst, + const TrilinosWrappers::MPI::BlockVector &src) const + { + TrilinosWrappers::MPI::Vector utmp(src.block(0)); + + { + SolverControl solver_control(5000, 1e-6 * src.block(1).l2_norm()); + + SolverCG solver(solver_control); + + solver.solve(stokes_preconditioner_matrix->block(1,1), + dst.block(1), src.block(1), + mp_preconditioner); + + dst.block(1) *= -1.0; + } + + { + stokes_matrix->block(0,1).vmult(utmp, dst.block(1)); + utmp*=-1.0; + utmp.add(src.block(0)); + } + + if (do_solve_A == true) + { + SolverControl solver_control(5000, utmp.l2_norm()*1e-2); + TrilinosWrappers::SolverCG solver(solver_control); + solver.solve(stokes_matrix->block(0,0), dst.block(0), utmp, + a_preconditioner); + } + else + a_preconditioner.vmult (dst.block(0), utmp); + } + + private: + const SmartPointer stokes_matrix; + const SmartPointer stokes_preconditioner_matrix; + const PreconditionerMp &mp_preconditioner; - const PreconditionerA &a_preconditioner; ++ const PreconditionerA &a_preconditioner; + const bool do_solve_A; }; } @@@ -893,352 -893,352 +893,352 @@@ template class BoussinesqFlowProblem { - public: - struct Parameters; - BoussinesqFlowProblem (Parameters ¶meters); - void run (); + public: + struct Parameters; + BoussinesqFlowProblem (Parameters ¶meters); + void run (); + + private: + void setup_dofs (); + void assemble_stokes_preconditioner (); + void build_stokes_preconditioner (); + void assemble_stokes_system (); + void assemble_temperature_matrix (); + void assemble_temperature_system (const double maximal_velocity); + void project_temperature_field (); + double get_maximal_velocity () const; + double get_cfl_number () const; + double get_entropy_variation (const double average_temperature) const; + std::pair get_extrapolated_temperature_range () const; + void solve (); + void output_results (); + void refine_mesh (const unsigned int max_grid_level); - private: - void setup_dofs (); - void assemble_stokes_preconditioner (); - void build_stokes_preconditioner (); - void assemble_stokes_system (); - void assemble_temperature_matrix (); - void assemble_temperature_system (const double maximal_velocity); - void project_temperature_field (); - double get_maximal_velocity () const; - double get_cfl_number () const; - double get_entropy_variation (const double average_temperature) const; - std::pair get_extrapolated_temperature_range () const; - void solve (); - void output_results (); - void refine_mesh (const unsigned int max_grid_level); - - double - compute_viscosity(const std::vector &old_temperature, - const std::vector &old_old_temperature, - const std::vector > &old_temperature_grads, - const std::vector > &old_old_temperature_grads, - const std::vector &old_temperature_laplacians, - const std::vector &old_old_temperature_laplacians, - const std::vector > &old_velocity_values, - const std::vector > &old_old_velocity_values, - const std::vector > &old_strain_rates, - const std::vector > &old_old_strain_rates, - const double global_u_infty, - const double global_T_variation, - const double average_temperature, - const double global_entropy_variation, - const double cell_diameter) const; + double + compute_viscosity(const std::vector &old_temperature, + const std::vector &old_old_temperature, - const std::vector > &old_temperature_grads, - const std::vector > &old_old_temperature_grads, ++ const std::vector > &old_temperature_grads, ++ const std::vector > &old_old_temperature_grads, + const std::vector &old_temperature_laplacians, + const std::vector &old_old_temperature_laplacians, - const std::vector > &old_velocity_values, - const std::vector > &old_old_velocity_values, - const std::vector > &old_strain_rates, - const std::vector > &old_old_strain_rates, ++ const std::vector > &old_velocity_values, ++ const std::vector > &old_old_velocity_values, ++ const std::vector > &old_strain_rates, ++ const std::vector > &old_old_strain_rates, + const double global_u_infty, + const double global_T_variation, + const double average_temperature, + const double global_entropy_variation, + const double cell_diameter) const; + + public: + + // The first significant new + // component is the definition + // of a struct for the + // parameters according to the + // discussion in the + // introduction. This structure + // is initialized by reading + // from a parameter file during + // construction of this object. + struct Parameters + { + Parameters (const std::string ¶meter_filename); - public: + static void declare_parameters (ParameterHandler &prm); + void parse_parameters (ParameterHandler &prm); - // The first significant new - // component is the definition - // of a struct for the - // parameters according to the - // discussion in the - // introduction. This structure - // is initialized by reading - // from a parameter file during - // construction of this object. - struct Parameters - { - Parameters (const std::string ¶meter_filename); + double end_time; - static void declare_parameters (ParameterHandler &prm); - void parse_parameters (ParameterHandler &prm); + unsigned int initial_global_refinement; + unsigned int initial_adaptive_refinement; - double end_time; + bool generate_graphical_output; + unsigned int graphical_output_interval; - unsigned int initial_global_refinement; - unsigned int initial_adaptive_refinement; + unsigned int adaptive_refinement_interval; - bool generate_graphical_output; - unsigned int graphical_output_interval; + double stabilization_alpha; + double stabilization_c_R; + double stabilization_beta; - unsigned int adaptive_refinement_interval; + unsigned int stokes_velocity_degree; + bool use_locally_conservative_discretization; - double stabilization_alpha; - double stabilization_c_R; - double stabilization_beta; + unsigned int temperature_degree; + }; - unsigned int stokes_velocity_degree; - bool use_locally_conservative_discretization; + private: + Parameters ¶meters; + + // The pcout (for + // %parallel + // std::cout) + // object is used to simplify + // writing output: each MPI + // process can use this to + // generate output as usual, + // but since each of these + // processes will (hopefully) + // produce the same output it + // will just be replicated many + // times over; with the + // ConditionalOStream class, + // only the output generated by + // one MPI process will + // actually be printed to + // screen, whereas the output + // by all the other threads + // will simply be forgotten. + ConditionalOStream pcout; + + // The following member + // variables will then again be + // similar to those in step-31 + // (and to other tutorial + // programs). As mentioned in + // the introduction, we fully + // distribute computations, so + // we will have to use the + // parallel::distributed::Triangulation + // class (see step-40) but the + // remainder of these variables + // is rather standard with two + // exceptions: + // + // - The mapping + // variable is used to denote a + // higher-order polynomial + // mapping. As mentioned in the + // introduction, we use this + // mapping when forming + // integrals through quadrature + // for all cells that are + // adjacent to either the inner + // or outer boundaries of our + // domain where the boundary is + // curved. + // + // - In a bit of naming + // confusion, you will notice + // below that some of the + // variables from namespace + // TrilinosWrappers are taken + // from namespace + // TrilinosWrappers::MPI (such + // as the right hand side + // vectors) whereas others are + // not (such as the various + // matrices). For the matrices, + // we happen to use the same + // class names for %parallel + // and sequential data + // structures, i.e., all + // matrices will actually be + // considered %parallel + // below. On the other hand, + // for vectors, only those from + // namespace + // TrilinosWrappers::MPI are + // actually distributed. In + // particular, we will + // frequently have to query + // velocities and temperatures + // at arbitrary quadrature + // points; consequently, rather + // than importing ghost + // information of a vector + // whenever we need access to + // degrees of freedom that are + // relevant locally but owned + // by another processor, we + // solve linear systems in + // %parallel but then + // immediately initialize a + // vector including ghost + // entries of the solution for + // further processing. The + // various + // *_solution + // vectors are therefore filled + // immediately after solving + // their respective linear + // system in %parallel and will + // always contain values for + // all @ref + // GlossLocallyRelevantDof + // "locally relevant degrees of freedom"; + // the fully + // distributed vectors that we + // obtain from the solution + // process and that only ever + // contain the @ref + // GlossLocallyOwnedDof + // "locally owned degrees of freedom" + // are destroyed + // immediately after the + // solution process and after + // we have copied the relevant + // values into the member + // variable vectors. + parallel::distributed::Triangulation triangulation; + double global_Omega_diameter; + + const MappingQ mapping; + + const FESystem stokes_fe; + DoFHandler stokes_dof_handler; + ConstraintMatrix stokes_constraints; + + TrilinosWrappers::BlockSparseMatrix stokes_matrix; + TrilinosWrappers::BlockSparseMatrix stokes_preconditioner_matrix; + + TrilinosWrappers::MPI::BlockVector stokes_solution; + TrilinosWrappers::MPI::BlockVector old_stokes_solution; + TrilinosWrappers::MPI::BlockVector stokes_rhs; + + + FE_Q temperature_fe; + DoFHandler temperature_dof_handler; + ConstraintMatrix temperature_constraints; + + TrilinosWrappers::SparseMatrix temperature_mass_matrix; + TrilinosWrappers::SparseMatrix temperature_stiffness_matrix; + TrilinosWrappers::SparseMatrix temperature_matrix; + + TrilinosWrappers::MPI::Vector temperature_solution; + TrilinosWrappers::MPI::Vector old_temperature_solution; + TrilinosWrappers::MPI::Vector old_old_temperature_solution; + TrilinosWrappers::MPI::Vector temperature_rhs; + + + double time_step; + double old_time_step; + unsigned int timestep_number; + + std_cxx1x::shared_ptr Amg_preconditioner; + std_cxx1x::shared_ptr Mp_preconditioner; + std_cxx1x::shared_ptr T_preconditioner; + + bool rebuild_stokes_matrix; + bool rebuild_stokes_preconditioner; + bool rebuild_temperature_matrices; + bool rebuild_temperature_preconditioner; + + // The next member variable, + // computing_timer + // is used to conveniently + // account for compute time + // spent in certain "sections" + // of the code that are + // repeatedly entered. For + // example, we will enter (and + // leave) sections for Stokes + // matrix assembly and would + // like to accumulate the run + // time spent in this section + // over all time steps. Every + // so many time steps as well + // as at the end of the program + // (through the destructor of + // the TimerOutput class) we + // will then produce a nice + // summary of the times spent + // in the different sections + // into which we categorize the + // run-time of this program. + TimerOutput computing_timer; + + // After these member variables + // we have a number of + // auxiliary functions that + // have been broken out of the + // ones listed + // above. Specifically, there + // are first three functions + // that we call from + // setup_dofs and + // then the ones that do the + // assembling of linear + // systems: + void setup_stokes_matrix (const std::vector &stokes_partitioning); + void setup_stokes_preconditioner (const std::vector &stokes_partitioning); + void setup_temperature_matrices (const IndexSet &temperature_partitioning); + + + // Following the @ref + // MTWorkStream + // "task-based parallelization" + // paradigm, + // we split all the assembly + // routines into two parts: a + // first part that can do all + // the calculations on a + // certain cell without taking + // care of other threads, and a + // second part (which is + // writing the local data into + // the global matrices and + // vectors) which can be + // entered by only one thread + // at a time. In order to + // implement that, we provide + // functions for each of those + // two steps for all the four + // assembly routines that we + // use in this program. The + // following eight functions do + // exactly this: + void + local_assemble_stokes_preconditioner (const typename DoFHandler::active_cell_iterator &cell, + Assembly::Scratch::StokesPreconditioner &scratch, + Assembly::CopyData::StokesPreconditioner &data); - unsigned int temperature_degree; - }; + void + copy_local_to_global_stokes_preconditioner (const Assembly::CopyData::StokesPreconditioner &data); - private: - Parameters ¶meters; - - // The pcout (for - // %parallel - // std::cout) - // object is used to simplify - // writing output: each MPI - // process can use this to - // generate output as usual, - // but since each of these - // processes will (hopefully) - // produce the same output it - // will just be replicated many - // times over; with the - // ConditionalOStream class, - // only the output generated by - // one MPI process will - // actually be printed to - // screen, whereas the output - // by all the other threads - // will simply be forgotten. - ConditionalOStream pcout; - - // The following member - // variables will then again be - // similar to those in step-31 - // (and to other tutorial - // programs). As mentioned in - // the introduction, we fully - // distribute computations, so - // we will have to use the - // parallel::distributed::Triangulation - // class (see step-40) but the - // remainder of these variables - // is rather standard with two - // exceptions: - // - // - The mapping - // variable is used to denote a - // higher-order polynomial - // mapping. As mentioned in the - // introduction, we use this - // mapping when forming - // integrals through quadrature - // for all cells that are - // adjacent to either the inner - // or outer boundaries of our - // domain where the boundary is - // curved. - // - // - In a bit of naming - // confusion, you will notice - // below that some of the - // variables from namespace - // TrilinosWrappers are taken - // from namespace - // TrilinosWrappers::MPI (such - // as the right hand side - // vectors) whereas others are - // not (such as the various - // matrices). For the matrices, - // we happen to use the same - // class names for %parallel - // and sequential data - // structures, i.e., all - // matrices will actually be - // considered %parallel - // below. On the other hand, - // for vectors, only those from - // namespace - // TrilinosWrappers::MPI are - // actually distributed. In - // particular, we will - // frequently have to query - // velocities and temperatures - // at arbitrary quadrature - // points; consequently, rather - // than importing ghost - // information of a vector - // whenever we need access to - // degrees of freedom that are - // relevant locally but owned - // by another processor, we - // solve linear systems in - // %parallel but then - // immediately initialize a - // vector including ghost - // entries of the solution for - // further processing. The - // various - // *_solution - // vectors are therefore filled - // immediately after solving - // their respective linear - // system in %parallel and will - // always contain values for - // all @ref - // GlossLocallyRelevantDof - // "locally relevant degrees of freedom"; - // the fully - // distributed vectors that we - // obtain from the solution - // process and that only ever - // contain the @ref - // GlossLocallyOwnedDof - // "locally owned degrees of freedom" - // are destroyed - // immediately after the - // solution process and after - // we have copied the relevant - // values into the member - // variable vectors. - parallel::distributed::Triangulation triangulation; - double global_Omega_diameter; - - const MappingQ mapping; - - const FESystem stokes_fe; - DoFHandler stokes_dof_handler; - ConstraintMatrix stokes_constraints; - - TrilinosWrappers::BlockSparseMatrix stokes_matrix; - TrilinosWrappers::BlockSparseMatrix stokes_preconditioner_matrix; - - TrilinosWrappers::MPI::BlockVector stokes_solution; - TrilinosWrappers::MPI::BlockVector old_stokes_solution; - TrilinosWrappers::MPI::BlockVector stokes_rhs; - - - FE_Q temperature_fe; - DoFHandler temperature_dof_handler; - ConstraintMatrix temperature_constraints; - - TrilinosWrappers::SparseMatrix temperature_mass_matrix; - TrilinosWrappers::SparseMatrix temperature_stiffness_matrix; - TrilinosWrappers::SparseMatrix temperature_matrix; - - TrilinosWrappers::MPI::Vector temperature_solution; - TrilinosWrappers::MPI::Vector old_temperature_solution; - TrilinosWrappers::MPI::Vector old_old_temperature_solution; - TrilinosWrappers::MPI::Vector temperature_rhs; - - - double time_step; - double old_time_step; - unsigned int timestep_number; - - std_cxx1x::shared_ptr Amg_preconditioner; - std_cxx1x::shared_ptr Mp_preconditioner; - std_cxx1x::shared_ptr T_preconditioner; - - bool rebuild_stokes_matrix; - bool rebuild_stokes_preconditioner; - bool rebuild_temperature_matrices; - bool rebuild_temperature_preconditioner; - - // The next member variable, - // computing_timer - // is used to conveniently - // account for compute time - // spent in certain "sections" - // of the code that are - // repeatedly entered. For - // example, we will enter (and - // leave) sections for Stokes - // matrix assembly and would - // like to accumulate the run - // time spent in this section - // over all time steps. Every - // so many time steps as well - // as at the end of the program - // (through the destructor of - // the TimerOutput class) we - // will then produce a nice - // summary of the times spent - // in the different sections - // into which we categorize the - // run-time of this program. - TimerOutput computing_timer; - - // After these member variables - // we have a number of - // auxiliary functions that - // have been broken out of the - // ones listed - // above. Specifically, there - // are first three functions - // that we call from - // setup_dofs and - // then the ones that do the - // assembling of linear - // systems: - void setup_stokes_matrix (const std::vector &stokes_partitioning); - void setup_stokes_preconditioner (const std::vector &stokes_partitioning); - void setup_temperature_matrices (const IndexSet &temperature_partitioning); - - - // Following the @ref - // MTWorkStream - // "task-based parallelization" - // paradigm, - // we split all the assembly - // routines into two parts: a - // first part that can do all - // the calculations on a - // certain cell without taking - // care of other threads, and a - // second part (which is - // writing the local data into - // the global matrices and - // vectors) which can be - // entered by only one thread - // at a time. In order to - // implement that, we provide - // functions for each of those - // two steps for all the four - // assembly routines that we - // use in this program. The - // following eight functions do - // exactly this: - void - local_assemble_stokes_preconditioner (const typename DoFHandler::active_cell_iterator &cell, - Assembly::Scratch::StokesPreconditioner &scratch, - Assembly::CopyData::StokesPreconditioner &data); - - void - copy_local_to_global_stokes_preconditioner (const Assembly::CopyData::StokesPreconditioner &data); - - - void - local_assemble_stokes_system (const typename DoFHandler::active_cell_iterator &cell, - Assembly::Scratch::StokesSystem &scratch, - Assembly::CopyData::StokesSystem &data); - - void - copy_local_to_global_stokes_system (const Assembly::CopyData::StokesSystem &data); - - - void - local_assemble_temperature_matrix (const typename DoFHandler::active_cell_iterator &cell, - Assembly::Scratch::TemperatureMatrix &scratch, - Assembly::CopyData::TemperatureMatrix &data); - - void - copy_local_to_global_temperature_matrix (const Assembly::CopyData::TemperatureMatrix &data); - - - - void - local_assemble_temperature_rhs (const std::pair global_T_range, - const double global_max_velocity, - const double global_entropy_variation, - const typename DoFHandler::active_cell_iterator &cell, - Assembly::Scratch::TemperatureRHS &scratch, - Assembly::CopyData::TemperatureRHS &data); - - void - copy_local_to_global_temperature_rhs (const Assembly::CopyData::TemperatureRHS &data); - - // Finally, we forward declare - // a member class that we will - // define later on and that - // will be used to compute a - // number of quantities from - // our solution vectors that - // we'd like to put into the - // output files for - // visualization. - class Postprocessor; + + void + local_assemble_stokes_system (const typename DoFHandler::active_cell_iterator &cell, - Assembly::Scratch::StokesSystem &scratch, ++ Assembly::Scratch::StokesSystem &scratch, + Assembly::CopyData::StokesSystem &data); + + void + copy_local_to_global_stokes_system (const Assembly::CopyData::StokesSystem &data); + + + void + local_assemble_temperature_matrix (const typename DoFHandler::active_cell_iterator &cell, - Assembly::Scratch::TemperatureMatrix &scratch, ++ Assembly::Scratch::TemperatureMatrix &scratch, + Assembly::CopyData::TemperatureMatrix &data); + + void + copy_local_to_global_temperature_matrix (const Assembly::CopyData::TemperatureMatrix &data); + + + + void + local_assemble_temperature_rhs (const std::pair global_T_range, + const double global_max_velocity, + const double global_entropy_variation, + const typename DoFHandler::active_cell_iterator &cell, + Assembly::Scratch::TemperatureRHS &scratch, + Assembly::CopyData::TemperatureRHS &data); + + void + copy_local_to_global_temperature_rhs (const Assembly::CopyData::TemperatureRHS &data); + + // Finally, we forward declare + // a member class that we will + // define later on and that + // will be used to compute a + // number of quantities from + // our solution vectors that + // we'd like to put into the + // output files for + // visualization. + class Postprocessor; }; @@@ -1967,20 -1968,20 +1968,20 @@@ double BoussinesqFlowProblem:: compute_viscosity (const std::vector &old_temperature, - const std::vector &old_old_temperature, - const std::vector > &old_temperature_grads, - const std::vector > &old_old_temperature_grads, - const std::vector &old_temperature_laplacians, - const std::vector &old_old_temperature_laplacians, - const std::vector > &old_velocity_values, - const std::vector > &old_old_velocity_values, - const std::vector > &old_strain_rates, - const std::vector > &old_old_strain_rates, - const double global_u_infty, - const double global_T_variation, - const double average_temperature, - const double global_entropy_variation, - const double cell_diameter) const + const std::vector &old_old_temperature, - const std::vector > &old_temperature_grads, - const std::vector > &old_old_temperature_grads, ++ const std::vector > &old_temperature_grads, ++ const std::vector > &old_old_temperature_grads, + const std::vector &old_temperature_laplacians, + const std::vector &old_old_temperature_laplacians, - const std::vector > &old_velocity_values, - const std::vector > &old_old_velocity_values, - const std::vector > &old_strain_rates, - const std::vector > &old_old_strain_rates, ++ const std::vector > &old_velocity_values, ++ const std::vector > &old_old_velocity_values, ++ const std::vector > &old_strain_rates, ++ const std::vector > &old_old_strain_rates, + const double global_u_infty, + const double global_T_variation, + const double average_temperature, + const double global_entropy_variation, + const double cell_diameter) const { if (global_u_infty == 0) return 5e-3 * cell_diameter; diff --cc deal.II/examples/step-33/step-33.cc index dccad4abe0,e402227987..1721b8b098 --- a/deal.II/examples/step-33/step-33.cc +++ b/deal.II/examples/step-33/step-33.cc @@@ -107,532 -107,533 +107,533 @@@ namespace Step3 template struct EulerEquations { - // @sect4{Component description} - - // First a few variables that - // describe the various components of our - // solution vector in a generic way. This - // includes the number of components in the - // system (Euler's equations have one entry - // for momenta in each spatial direction, - // plus the energy and density components, - // for a total of dim+2 - // components), as well as functions that - // describe the index within the solution - // vector of the first momentum component, - // the density component, and the energy - // density component. Note that all these - // %numbers depend on the space dimension; - // defining them in a generic way (rather - // than by implicit convention) makes our - // code more flexible and makes it easier - // to later extend it, for example by - // adding more components to the equations. - static const unsigned int n_components = dim + 2; - static const unsigned int first_momentum_component = 0; - static const unsigned int density_component = dim; - static const unsigned int energy_component = dim+1; - - // When generating graphical - // output way down in this - // program, we need to specify - // the names of the solution - // variables as well as how the - // various components group into - // vector and scalar fields. We - // could describe this there, but - // in order to keep things that - // have to do with the Euler - // equation localized here and - // the rest of the program as - // generic as possible, we - // provide this sort of - // information in the following - // two functions: - static - std::vector - component_names () - { - std::vector names (dim, "momentum"); - names.push_back ("density"); - names.push_back ("energy_density"); + // @sect4{Component description} + + // First a few variables that + // describe the various components of our + // solution vector in a generic way. This + // includes the number of components in the + // system (Euler's equations have one entry + // for momenta in each spatial direction, + // plus the energy and density components, + // for a total of dim+2 + // components), as well as functions that + // describe the index within the solution + // vector of the first momentum component, + // the density component, and the energy + // density component. Note that all these + // %numbers depend on the space dimension; + // defining them in a generic way (rather + // than by implicit convention) makes our + // code more flexible and makes it easier + // to later extend it, for example by + // adding more components to the equations. + static const unsigned int n_components = dim + 2; + static const unsigned int first_momentum_component = 0; + static const unsigned int density_component = dim; + static const unsigned int energy_component = dim+1; + + // When generating graphical + // output way down in this + // program, we need to specify + // the names of the solution + // variables as well as how the + // various components group into + // vector and scalar fields. We + // could describe this there, but + // in order to keep things that + // have to do with the Euler + // equation localized here and + // the rest of the program as + // generic as possible, we + // provide this sort of + // information in the following + // two functions: + static + std::vector + component_names () + { + std::vector names (dim, "momentum"); + names.push_back ("density"); + names.push_back ("energy_density"); - return names; - } + return names; + } - static + static + std::vector + component_interpretation () + { std::vector - component_interpretation () - { - std::vector - data_component_interpretation - (dim, DataComponentInterpretation::component_is_part_of_vector); - data_component_interpretation - .push_back (DataComponentInterpretation::component_is_scalar); - data_component_interpretation - .push_back (DataComponentInterpretation::component_is_scalar); - - return data_component_interpretation; - } + data_component_interpretation + (dim, DataComponentInterpretation::component_is_part_of_vector); + data_component_interpretation + .push_back (DataComponentInterpretation::component_is_scalar); + data_component_interpretation + .push_back (DataComponentInterpretation::component_is_scalar); + + return data_component_interpretation; + } - // @sect4{Transformations between variables} - - // Next, we define the gas - // constant. We will set it to 1.4 - // in its definition immediately - // following the declaration of - // this class (unlike integer - // variables, like the ones above, - // static const floating point - // member variables cannot be - // initialized within the class - // declaration in C++). This value - // of 1.4 is representative of a - // gas that consists of molecules - // composed of two atoms, such as - // air which consists up to small - // traces almost entirely of $N_2$ - // and $O_2$. - static const double gas_gamma; - - - // In the following, we will need to - // compute the kinetic energy and the - // pressure from a vector of conserved - // variables. This we can do based on the - // energy density and the kinetic energy - // $\frac 12 \rho |\mathbf v|^2 = - // \frac{|\rho \mathbf v|^2}{2\rho}$ - // (note that the independent variables - // contain the momentum components $\rho - // v_i$, not the velocities $v_i$). - // - // There is one slight problem: We will - // need to call the following functions - // with input arguments of type - // std::vector@ and - // Vector@. The - // problem is that the former has an - // access operator - // operator[] whereas the - // latter, for historical reasons, has - // operator(). We wouldn't - // be able to write the function in a - // generic way if we were to use one or - // the other of these. Fortunately, we - // can use the following trick: instead - // of writing v[i] or - // v(i), we can use - // *(v.begin() + i), i.e. we - // generate an iterator that points to - // the ith element, and then - // dereference it. This works for both - // kinds of vectors -- not the prettiest - // solution, but one that works. - template - static - number - compute_kinetic_energy (const InputVector &W) - { - number kinetic_energy = 0; - for (unsigned int d=0; dstd::vector@ and + // Vector@. The + // problem is that the former has an + // access operator + // operator[] whereas the + // latter, for historical reasons, has + // operator(). We wouldn't + // be able to write the function in a + // generic way if we were to use one or + // the other of these. Fortunately, we + // can use the following trick: instead + // of writing v[i] or + // v(i), we can use + // *(v.begin() + i), i.e. we + // generate an iterator that points to + // the ith element, and then + // dereference it. This works for both + // kinds of vectors -- not the prettiest + // solution, but one that works. + template + static + number + compute_kinetic_energy (const InputVector &W) + { + number kinetic_energy = 0; + for (unsigned int d=0; d - static - number - compute_pressure (const InputVector &W) - { - return ((gas_gamma-1.0) * - (*(W.begin() + energy_component) - - compute_kinetic_energy(W))); - } + template + static + number + compute_pressure (const InputVector &W) + { + return ((gas_gamma-1.0) * + (*(W.begin() + energy_component) - + compute_kinetic_energy(W))); + } - // @sect4{EulerEquations::compute_flux_matrix} - - // We define the flux function - // $F(W)$ as one large matrix. - // Each row of this matrix - // represents a scalar - // conservation law for the - // component in that row. The - // exact form of this matrix is - // given in the - // introduction. Note that we - // know the size of the matrix: - // it has as many rows as the - // system has components, and - // dim columns; - // rather than using a FullMatrix - // object for such a matrix - // (which has a variable number - // of rows and columns and must - // therefore allocate memory on - // the heap each time such a - // matrix is created), we use a - // rectangular array of numbers - // right away. - // - // We templatize the numerical type of - // the flux function so that we may use - // the automatic differentiation type - // here. Similarly, we will call the - // function with different input vector - // data types, so we templatize on it as - // well: - template - static - void compute_flux_matrix (const InputVector &W, - number (&flux)[n_components][dim]) + // @sect4{EulerEquations::compute_flux_matrix} + + // We define the flux function + // $F(W)$ as one large matrix. + // Each row of this matrix + // represents a scalar + // conservation law for the + // component in that row. The + // exact form of this matrix is + // given in the + // introduction. Note that we + // know the size of the matrix: + // it has as many rows as the + // system has components, and + // dim columns; + // rather than using a FullMatrix + // object for such a matrix + // (which has a variable number + // of rows and columns and must + // therefore allocate memory on + // the heap each time such a + // matrix is created), we use a + // rectangular array of numbers + // right away. + // + // We templatize the numerical type of + // the flux function so that we may use + // the automatic differentiation type + // here. Similarly, we will call the + // function with different input vector + // data types, so we templatize on it as + // well: + template + static + void compute_flux_matrix (const InputVector &W, + number (&flux)[n_components][dim]) + { + // First compute the pressure that + // appears in the flux matrix, and + // then compute the first + // dim columns of the + // matrix that correspond to the + // momentum terms: + const number pressure = compute_pressure (W); + + for (unsigned int d=0; ddim columns of the - // matrix that correspond to the - // momentum terms: - const number pressure = compute_pressure (W); - - for (unsigned int d=0; d - static - void numerical_normal_flux (const Point &normal, - const InputVector &Wplus, - const InputVector &Wminus, - const double alpha, - Sacado::Fad::DFad (&normal_flux)[n_components]) - { - Sacado::Fad::DFad iflux[n_components][dim]; - Sacado::Fad::DFad oflux[n_components][dim]; - - compute_flux_matrix (Wplus, iflux); - compute_flux_matrix (Wminus, oflux); - for (unsigned int di=0; di + static + void numerical_normal_flux (const Point &normal, + const InputVector &Wplus, + const InputVector &Wminus, + const double alpha, + Sacado::Fad::DFad (&normal_flux)[n_components]) + { + Sacado::Fad::DFad iflux[n_components][dim]; + Sacado::Fad::DFad oflux[n_components][dim]; - normal_flux[di] += 0.5*alpha*(Wplus[di] - Wminus[di]); - } - } + compute_flux_matrix (Wplus, iflux); + compute_flux_matrix (Wminus, oflux); - // @sect4{EulerEquations::compute_forcing_vector} - - // In the same way as describing the flux - // function $\mathbf F(\mathbf w)$, we - // also need to have a way to describe - // the right hand side forcing term. As - // mentioned in the introduction, we - // consider only gravity here, which - // leads to the specific form $\mathbf - // G(\mathbf w) = \left( - // g_1\rho, g_2\rho, g_3\rho, 0, - // \rho \mathbf g \cdot \mathbf v - // \right)^T$, shown here for - // the 3d case. More specifically, we - // will consider only $\mathbf - // g=(0,0,-1)^T$ in 3d, or $\mathbf - // g=(0,-1)^T$ in 2d. This naturally - // leads to the following function: - template - static - void compute_forcing_vector (const InputVector &W, - number (&forcing)[n_components]) + for (unsigned int di=0; di + static + void compute_forcing_vector (const InputVector &W, + number (&forcing)[n_components]) + { + const double gravity = -1.0; - // @sect4{Dealing with boundary conditions} + for (unsigned int c=0; cWminus will of course be - // modified, so it shouldn't be a - // const argument. Yet it is - // in the implementation below, and needs - // to be in order to allow the code to - // compile. The reason is that we call - // this function at a place where - // Wminus is of type - // Table@<2,Sacado::Fad::DFad@ - // @>, this being 2d table with - // indices representing the quadrature - // point and the vector component, - // respectively. We call this function - // with Wminus[q] as last - // argument; subscripting a 2d table - // yields a temporary accessor object - // representing a 1d vector, just what we - // want here. The problem is that a - // temporary accessor object can't be - // bound to a non-const reference - // argument of a function, as we would - // like here, according to the C++ 1998 - // and 2003 standards (something that - // will be fixed with the next standard - // in the form of rvalue references). We - // get away with making the output - // argument here a constant because it is - // the accessor object that's - // constant, not the table it points to: - // that one can still be written to. The - // hack is unpleasant nevertheless - // because it restricts the kind of data - // types that may be used as template - // argument to this function: a regular - // vector isn't going to do because that - // one can not be written to when marked - // const. With no good - // solution around at the moment, we'll - // go with the pragmatic, even if not - // pretty, solution shown here: - template - static - void - compute_Wminus (const BoundaryKind (&boundary_kind)[n_components], - const Point &normal_vector, - const DataVector &Wplus, - const Vector &boundary_values, - const DataVector &Wminus) - { - for (unsigned int c = 0; c < n_components; c++) - switch (boundary_kind[c]) - { - case inflow_boundary: - { - Wminus[c] = boundary_values(c); - break; - } + // Another thing we have to deal with is + // boundary conditions. To this end, let + // us first define the kinds of boundary + // conditions we currently know how to + // deal with: + enum BoundaryKind + { + inflow_boundary, + outflow_boundary, + no_penetration_boundary, + pressure_boundary + }; - case outflow_boundary: - { - Wminus[c] = Wplus[c]; - break; - } - // Prescribed pressure boundary - // conditions are a bit more - // complicated by the fact that - // even though the pressure is - // prescribed, we really are - // setting the energy component - // here, which will depend on - // velocity and pressure. So - // even though this seems like - // a Dirichlet type boundary - // condition, we get - // sensitivities of energy to - // velocity and density (unless - // these are also prescribed): - case pressure_boundary: - { - const typename DataVector::value_type - density = (boundary_kind[density_component] == - inflow_boundary - ? - boundary_values(density_component) - : - Wplus[density_component]); + // The next part is to actually decide + // what to do at each kind of + // boundary. To this end, remember from + // the introduction that boundary + // conditions are specified by choosing a + // value $\mathbf w^-$ on the outside of + // a boundary given an inhomogeneity + // $\mathbf j$ and possibly the + // solution's value $\mathbf w^+$ on the + // inside. Both are then passed to the + // numerical flux $\mathbf + // H(\mathbf{w}^+, \mathbf{w}^-, + // \mathbf{n})$ to define boundary + // contributions to the bilinear form. + // + // Boundary conditions can in some cases + // be specified for each component of the + // solution vector independently. For + // example, if component $c$ is marked + // for inflow, then $w^-_c = j_c$. If it + // is an outflow, then $w^-_c = + // w^+_c$. These two simple cases are + // handled first in the function below. + // + // There is a little snag that makes this + // function unpleasant from a C++ + // language viewpoint: The output vector + // Wminus will of course be + // modified, so it shouldn't be a + // const argument. Yet it is + // in the implementation below, and needs + // to be in order to allow the code to + // compile. The reason is that we call + // this function at a place where + // Wminus is of type + // Table@<2,Sacado::Fad::DFad@ + // @>, this being 2d table with + // indices representing the quadrature + // point and the vector component, + // respectively. We call this function + // with Wminus[q] as last + // argument; subscripting a 2d table + // yields a temporary accessor object + // representing a 1d vector, just what we + // want here. The problem is that a + // temporary accessor object can't be + // bound to a non-const reference + // argument of a function, as we would + // like here, according to the C++ 1998 + // and 2003 standards (something that + // will be fixed with the next standard + // in the form of rvalue references). We + // get away with making the output + // argument here a constant because it is + // the accessor object that's + // constant, not the table it points to: + // that one can still be written to. The + // hack is unpleasant nevertheless + // because it restricts the kind of data + // types that may be used as template + // argument to this function: a regular + // vector isn't going to do because that + // one can not be written to when marked + // const. With no good + // solution around at the moment, we'll + // go with the pragmatic, even if not + // pretty, solution shown here: + template + static + void + compute_Wminus (const BoundaryKind (&boundary_kind)[n_components], + const Point &normal_vector, + const DataVector &Wplus, + const Vector &boundary_values, + const DataVector &Wminus) + { + for (unsigned int c = 0; c < n_components; c++) + switch (boundary_kind[c]) + { + case inflow_boundary: + { + Wminus[c] = boundary_values(c); + break; + } - typename DataVector::value_type kinetic_energy = 0; - for (unsigned int d=0; d vdotn = 0; + for (unsigned int d = 0; d < dim; d++) + { + vdotn += Wplus[d]*normal_vector[d]; + } - case no_penetration_boundary: - { - // We prescribe the - // velocity (we are dealing with a - // particular component here so - // that the average of the - // velocities is orthogonal to the - // surface normal. This creates - // sensitivies of across the - // velocity components. - Sacado::Fad::DFad vdotn = 0; - for (unsigned int d = 0; d < dim; d++) { - vdotn += Wplus[d]*normal_vector[d]; - } - - Wminus[c] = Wplus[c] - 2.0*vdotn*normal_vector[c]; - break; - } + Wminus[c] = Wplus[c] - 2.0*vdotn*normal_vector[c]; + break; + } - default: - Assert (false, ExcNotImplemented()); - } - } + default: + Assert (false, ExcNotImplemented()); + } + } - // @sect4{EulerEquations::compute_refinement_indicators} - - // In this class, we also want to specify - // how to refine the mesh. The class - // ConservationLaw that will - // use all the information we provide - // here in the EulerEquation - // class is pretty agnostic about the - // particular conservation law it solves: - // as doesn't even really care how many - // components a solution vector - // has. Consequently, it can't know what - // a reasonable refinement indicator - // would be. On the other hand, here we - // do, or at least we can come up with a - // reasonable choice: we simply look at - // the gradient of the density, and - // compute - // $\eta_K=\log\left(1+|\nabla\rho(x_K)|\right)$, - // where $x_K$ is the center of cell $K$. - // - // There are certainly a number of - // equally reasonable refinement - // indicators, but this one does, and it - // is easy to compute: - static - void - compute_refinement_indicators (const DoFHandler &dof_handler, - const Mapping &mapping, - const Vector &solution, - Vector &refinement_indicators) - { - const unsigned int dofs_per_cell = dof_handler.get_fe().dofs_per_cell; - std::vector dofs (dofs_per_cell); + // @sect4{EulerEquations::compute_refinement_indicators} + + // In this class, we also want to specify + // how to refine the mesh. The class + // ConservationLaw that will + // use all the information we provide + // here in the EulerEquation + // class is pretty agnostic about the + // particular conservation law it solves: + // as doesn't even really care how many + // components a solution vector + // has. Consequently, it can't know what + // a reasonable refinement indicator + // would be. On the other hand, here we + // do, or at least we can come up with a + // reasonable choice: we simply look at + // the gradient of the density, and + // compute + // $\eta_K=\log\left(1+|\nabla\rho(x_K)|\right)$, + // where $x_K$ is the center of cell $K$. + // + // There are certainly a number of + // equally reasonable refinement + // indicators, but this one does, and it + // is easy to compute: + static + void + compute_refinement_indicators (const DoFHandler &dof_handler, + const Mapping &mapping, - const Vector &solution, ++ const Vector &solution, + Vector &refinement_indicators) + { + const unsigned int dofs_per_cell = dof_handler.get_fe().dofs_per_cell; + std::vector dofs (dofs_per_cell); - const QMidpoint quadrature_formula; - const UpdateFlags update_flags = update_gradients; - FEValues fe_v (mapping, dof_handler.get_fe(), - quadrature_formula, update_flags); + const QMidpoint quadrature_formula; + const UpdateFlags update_flags = update_gradients; + FEValues fe_v (mapping, dof_handler.get_fe(), + quadrature_formula, update_flags); - std::vector > > - dU (1, std::vector >(n_components)); + std::vector > > + dU (1, std::vector >(n_components)); - typename DoFHandler::active_cell_iterator - cell = dof_handler.begin_active(), - endc = dof_handler.end(); - for (unsigned int cell_no=0; cell!=endc; ++cell, ++cell_no) - { - fe_v.reinit(cell); - fe_v.get_function_grads (solution, dU); + typename DoFHandler::active_cell_iterator + cell = dof_handler.begin_active(), + endc = dof_handler.end(); + for (unsigned int cell_no=0; cell!=endc; ++cell, ++cell_no) + { + fe_v.reinit(cell); + fe_v.get_function_grads (solution, dU); - refinement_indicators(cell_no) - = std::log(1+ - std::sqrt(dU[0][density_component] * - dU[0][density_component])); - } + refinement_indicators(cell_no) + = std::log(1+ + std::sqrt(dU[0][density_component] * + dU[0][density_component])); } + } @@@ -764,26 -765,26 +765,26 @@@ EulerEquations::Postprocessor:: compute_derived_quantities_vector (const std::vector > &uh, const std::vector > > &duh, - const std::vector > > & /*dduh*/, - const std::vector > & /*normals*/, - const std::vector > & /*evaluation_points*/, + const std::vector > > &/*dduh*/, + const std::vector > &/*normals*/, + const std::vector > &/*evaluation_points*/, std::vector > &computed_quantities) const { - // At the beginning of the function, let us - // make sure that all variables have the - // correct sizes, so that we can access - // individual vector elements without - // having to wonder whether we might read - // or write invalid elements; we also check - // that the duh vector only - // contains data if we really need it (the - // system knows about this because we say - // so in the - // get_needed_update_flags() - // function below). For the inner vectors, - // we check that at least the first element - // of the outer vector has the correct - // inner size: + // At the beginning of the function, let us + // make sure that all variables have the + // correct sizes, so that we can access + // individual vector elements without + // having to wonder whether we might read + // or write invalid elements; we also check + // that the duh vector only + // contains data if we really need it (the + // system knows about this because we say + // so in the + // get_needed_update_flags() + // function below). For the inner vectors, + // we check that at least the first element + // of the outer vector has the correct + // inner size: const unsigned int n_quadrature_points = uh.size(); if (do_schlieren_plot == true) diff --cc deal.II/examples/step-35/step-35.cc index cd57ab99b7,cb37555fe7..109da8c0dd --- a/deal.II/examples/step-35/step-35.cc +++ b/deal.II/examples/step-35/step-35.cc @@@ -419,267 -419,267 +419,267 @@@ namespace Step3 template class NavierStokesProjection { - public: - NavierStokesProjection (const RunTimeParameters::Data_Storage &data); - - void run (const bool verbose = false, - const unsigned int n_plots = 10); - protected: - RunTimeParameters::MethodFormulation type; - - const unsigned int deg; - const double dt; - const double t_0, T, Re; - - EquationData::Velocity vel_exact; - std::map boundary_values; - std::vector boundary_indicators; - - Triangulation triangulation; - - FE_Q fe_velocity; - FE_Q fe_pressure; - - DoFHandler dof_handler_velocity; - DoFHandler dof_handler_pressure; - - QGauss quadrature_pressure; - QGauss quadrature_velocity; - - SparsityPattern sparsity_pattern_velocity; - SparsityPattern sparsity_pattern_pressure; - SparsityPattern sparsity_pattern_pres_vel; - - SparseMatrix vel_Laplace_plus_Mass; - SparseMatrix vel_it_matrix[dim]; - SparseMatrix vel_Mass; - SparseMatrix vel_Laplace; - SparseMatrix vel_Advection; - SparseMatrix pres_Laplace; - SparseMatrix pres_Mass; - SparseMatrix pres_Diff[dim]; - SparseMatrix pres_iterative; - - Vector pres_n; - Vector pres_n_minus_1; - Vector phi_n; - Vector phi_n_minus_1; - Vector u_n[dim]; - Vector u_n_minus_1[dim]; - Vector u_star[dim]; - Vector force[dim]; - Vector v_tmp; - Vector pres_tmp; - Vector rot_u; - - SparseILU prec_velocity[dim]; - SparseILU prec_pres_Laplace; - SparseDirectUMFPACK prec_mass; - SparseDirectUMFPACK prec_vel_mass; - - DeclException2 (ExcInvalidTimeStep, - double, double, - << " The time step " << arg1 << " is out of range." - << std::endl - << " The permitted range is (0," << arg2 << "]"); - - void create_triangulation_and_dofs (const unsigned int n_refines); - - void initialize(); - - void interpolate_velocity (); - - void diffusion_step (const bool reinit_prec); - - void projection_step (const bool reinit_prec); - - void update_pressure (const bool reinit_prec); - - private: - unsigned int vel_max_its; - unsigned int vel_Krylov_size; - unsigned int vel_off_diagonals; - unsigned int vel_update_prec; - double vel_eps; - double vel_diag_strength; - - void initialize_velocity_matrices(); - - void initialize_pressure_matrices(); - - // The next few structures and functions - // are for doing various things in - // parallel. They follow the scheme laid - // out in @ref threads, using the - // WorkStream class. As explained there, - // this requires us to declare two - // structures for each of the assemblers, - // a per-task data and a scratch data - // structure. These are then handed over - // to functions that assemble local - // contributions and that copy these - // local contributions to the global - // objects. - // - // One of the things that are specific to - // this program is that we don't just - // have a single DoFHandler object that - // represents both the velocities and the - // pressure, but we use individual - // DoFHandler objects for these two kinds - // of variables. We pay for this - // optimization when we want to assemble - // terms that involve both variables, - // such as the divergence of the velocity - // and the gradient of the pressure, - // times the respective test - // functions. When doing so, we can't - // just anymore use a single FEValues - // object, but rather we need two, and - // they need to be initialized with cell - // iterators that point to the same cell - // in the triangulation but different - // DoFHandlers. - // - // To do this in practice, we declare a - // "synchronous" iterator -- an object - // that internally consists of several - // (in our case two) iterators, and each - // time the synchronous iteration is - // moved up one step, each of the - // iterators stored internally is moved - // up one step as well, thereby always - // staying in sync. As it so happens, - // there is a deal.II class that - // facilitates this sort of thing. - typedef std_cxx1x::tuple< typename DoFHandler::active_cell_iterator, - typename DoFHandler::active_cell_iterator - > IteratorTuple; - - typedef SynchronousIterators IteratorPair; - - void initialize_gradient_operator(); - - struct InitGradPerTaskData - { - unsigned int d; - unsigned int vel_dpc; - unsigned int pres_dpc; - FullMatrix local_grad; - std::vector vel_local_dof_indices; - std::vector pres_local_dof_indices; - - InitGradPerTaskData (const unsigned int dd, - const unsigned int vdpc, - const unsigned int pdpc) - : - d(dd), - vel_dpc (vdpc), - pres_dpc (pdpc), - local_grad (vdpc, pdpc), - vel_local_dof_indices (vdpc), - pres_local_dof_indices (pdpc) - {} - }; + public: + NavierStokesProjection (const RunTimeParameters::Data_Storage &data); + + void run (const bool verbose = false, + const unsigned int n_plots = 10); + protected: + RunTimeParameters::MethodFormulation type; + + const unsigned int deg; + const double dt; + const double t_0, T, Re; + + EquationData::Velocity vel_exact; + std::map boundary_values; + std::vector boundary_indicators; + + Triangulation triangulation; + + FE_Q fe_velocity; + FE_Q fe_pressure; + + DoFHandler dof_handler_velocity; + DoFHandler dof_handler_pressure; + + QGauss quadrature_pressure; + QGauss quadrature_velocity; + + SparsityPattern sparsity_pattern_velocity; + SparsityPattern sparsity_pattern_pressure; + SparsityPattern sparsity_pattern_pres_vel; + + SparseMatrix vel_Laplace_plus_Mass; + SparseMatrix vel_it_matrix[dim]; + SparseMatrix vel_Mass; + SparseMatrix vel_Laplace; + SparseMatrix vel_Advection; + SparseMatrix pres_Laplace; + SparseMatrix pres_Mass; + SparseMatrix pres_Diff[dim]; + SparseMatrix pres_iterative; + + Vector pres_n; + Vector pres_n_minus_1; + Vector phi_n; + Vector phi_n_minus_1; + Vector u_n[dim]; + Vector u_n_minus_1[dim]; + Vector u_star[dim]; + Vector force[dim]; + Vector v_tmp; + Vector pres_tmp; + Vector rot_u; + + SparseILU prec_velocity[dim]; + SparseILU prec_pres_Laplace; + SparseDirectUMFPACK prec_mass; + SparseDirectUMFPACK prec_vel_mass; + + DeclException2 (ExcInvalidTimeStep, + double, double, + << " The time step " << arg1 << " is out of range." + << std::endl + << " The permitted range is (0," << arg2 << "]"); + + void create_triangulation_and_dofs (const unsigned int n_refines); + + void initialize(); + + void interpolate_velocity (); + + void diffusion_step (const bool reinit_prec); + + void projection_step (const bool reinit_prec); + + void update_pressure (const bool reinit_prec); + + private: + unsigned int vel_max_its; + unsigned int vel_Krylov_size; + unsigned int vel_off_diagonals; + unsigned int vel_update_prec; + double vel_eps; + double vel_diag_strength; + + void initialize_velocity_matrices(); + + void initialize_pressure_matrices(); + + // The next few structures and functions + // are for doing various things in + // parallel. They follow the scheme laid + // out in @ref threads, using the + // WorkStream class. As explained there, + // this requires us to declare two + // structures for each of the assemblers, + // a per-task data and a scratch data + // structure. These are then handed over + // to functions that assemble local + // contributions and that copy these + // local contributions to the global + // objects. + // + // One of the things that are specific to + // this program is that we don't just + // have a single DoFHandler object that + // represents both the velocities and the + // pressure, but we use individual + // DoFHandler objects for these two kinds + // of variables. We pay for this + // optimization when we want to assemble + // terms that involve both variables, + // such as the divergence of the velocity + // and the gradient of the pressure, + // times the respective test + // functions. When doing so, we can't + // just anymore use a single FEValues + // object, but rather we need two, and + // they need to be initialized with cell + // iterators that point to the same cell + // in the triangulation but different + // DoFHandlers. + // + // To do this in practice, we declare a + // "synchronous" iterator -- an object + // that internally consists of several + // (in our case two) iterators, and each + // time the synchronous iteration is + // moved up one step, each of the + // iterators stored internally is moved + // up one step as well, thereby always + // staying in sync. As it so happens, + // there is a deal.II class that + // facilitates this sort of thing. + typedef std_cxx1x::tuple< typename DoFHandler::active_cell_iterator, + typename DoFHandler::active_cell_iterator + > IteratorTuple; + + typedef SynchronousIterators IteratorPair; + + void initialize_gradient_operator(); + + struct InitGradPerTaskData + { + unsigned int d; + unsigned int vel_dpc; + unsigned int pres_dpc; + FullMatrix local_grad; + std::vector vel_local_dof_indices; + std::vector pres_local_dof_indices; + + InitGradPerTaskData (const unsigned int dd, + const unsigned int vdpc, + const unsigned int pdpc) + : + d(dd), + vel_dpc (vdpc), + pres_dpc (pdpc), + local_grad (vdpc, pdpc), + vel_local_dof_indices (vdpc), + pres_local_dof_indices (pdpc) + {} + }; - struct InitGradScratchData - { - unsigned int nqp; - FEValues fe_val_vel; - FEValues fe_val_pres; - InitGradScratchData (const FE_Q &fe_v, - const FE_Q &fe_p, - const QGauss &quad, - const UpdateFlags flags_v, - const UpdateFlags flags_p) - : - nqp (quad.size()), - fe_val_vel (fe_v, quad, flags_v), - fe_val_pres (fe_p, quad, flags_p) - {} - InitGradScratchData (const InitGradScratchData &data) - : - nqp (data.nqp), - fe_val_vel (data.fe_val_vel.get_fe(), - data.fe_val_vel.get_quadrature(), - data.fe_val_vel.get_update_flags()), - fe_val_pres (data.fe_val_pres.get_fe(), - data.fe_val_pres.get_quadrature(), - data.fe_val_pres.get_update_flags()) - {} - }; + struct InitGradScratchData + { + unsigned int nqp; + FEValues fe_val_vel; + FEValues fe_val_pres; + InitGradScratchData (const FE_Q &fe_v, + const FE_Q &fe_p, + const QGauss &quad, + const UpdateFlags flags_v, + const UpdateFlags flags_p) + : + nqp (quad.size()), + fe_val_vel (fe_v, quad, flags_v), + fe_val_pres (fe_p, quad, flags_p) + {} + InitGradScratchData (const InitGradScratchData &data) + : + nqp (data.nqp), + fe_val_vel (data.fe_val_vel.get_fe(), + data.fe_val_vel.get_quadrature(), + data.fe_val_vel.get_update_flags()), + fe_val_pres (data.fe_val_pres.get_fe(), + data.fe_val_pres.get_quadrature(), + data.fe_val_pres.get_update_flags()) + {} + }; - void assemble_one_cell_of_gradient (const IteratorPair &SI, - InitGradScratchData &scratch, - InitGradPerTaskData &data); - void assemble_one_cell_of_gradient (const IteratorPair &SI, ++ void assemble_one_cell_of_gradient (const IteratorPair &SI, + InitGradScratchData &scratch, + InitGradPerTaskData &data); - void copy_gradient_local_to_global (const InitGradPerTaskData &data); + void copy_gradient_local_to_global (const InitGradPerTaskData &data); - // The same general layout also applies - // to the following classes and functions - // implementing the assembly of the - // advection term: - void assemble_advection_term(); + // The same general layout also applies + // to the following classes and functions + // implementing the assembly of the + // advection term: + void assemble_advection_term(); - struct AdvectionPerTaskData - { - FullMatrix local_advection; - std::vector local_dof_indices; - AdvectionPerTaskData (const unsigned int dpc) - : - local_advection (dpc, dpc), - local_dof_indices (dpc) - {} - }; + struct AdvectionPerTaskData + { + FullMatrix local_advection; + std::vector local_dof_indices; + AdvectionPerTaskData (const unsigned int dpc) + : + local_advection (dpc, dpc), + local_dof_indices (dpc) + {} + }; - struct AdvectionScratchData - { - unsigned int nqp; - unsigned int dpc; - std::vector< Point > u_star_local; - std::vector< Tensor<1,dim> > grad_u_star; - std::vector u_star_tmp; - FEValues fe_val; - AdvectionScratchData (const FE_Q &fe, - const QGauss &quad, - const UpdateFlags flags) - : - nqp (quad.size()), - dpc (fe.dofs_per_cell), - u_star_local (nqp), - grad_u_star (nqp), - u_star_tmp (nqp), - fe_val (fe, quad, flags) - {} - - AdvectionScratchData (const AdvectionScratchData &data) - : - nqp (data.nqp), - dpc (data.dpc), - u_star_local (nqp), - grad_u_star (nqp), - u_star_tmp (nqp), - fe_val (data.fe_val.get_fe(), - data.fe_val.get_quadrature(), - data.fe_val.get_update_flags()) - {} - }; + struct AdvectionScratchData + { + unsigned int nqp; + unsigned int dpc; + std::vector< Point > u_star_local; + std::vector< Tensor<1,dim> > grad_u_star; + std::vector u_star_tmp; + FEValues fe_val; + AdvectionScratchData (const FE_Q &fe, + const QGauss &quad, + const UpdateFlags flags) + : + nqp (quad.size()), + dpc (fe.dofs_per_cell), + u_star_local (nqp), + grad_u_star (nqp), + u_star_tmp (nqp), + fe_val (fe, quad, flags) + {} + + AdvectionScratchData (const AdvectionScratchData &data) + : + nqp (data.nqp), + dpc (data.dpc), + u_star_local (nqp), + grad_u_star (nqp), + u_star_tmp (nqp), + fe_val (data.fe_val.get_fe(), + data.fe_val.get_quadrature(), + data.fe_val.get_update_flags()) + {} + }; - void assemble_one_cell_of_advection (const typename DoFHandler::active_cell_iterator &cell, - AdvectionScratchData &scratch, - AdvectionPerTaskData &data); + void assemble_one_cell_of_advection (const typename DoFHandler::active_cell_iterator &cell, + AdvectionScratchData &scratch, + AdvectionPerTaskData &data); - void copy_advection_local_to_global (const AdvectionPerTaskData &data); + void copy_advection_local_to_global (const AdvectionPerTaskData &data); - // The final few functions implement the - // diffusion solve as well as - // postprocessing the output, including - // computing the curl of the velocity: - void diffusion_component_solve (const unsigned int d); + // The final few functions implement the + // diffusion solve as well as + // postprocessing the output, including + // computing the curl of the velocity: + void diffusion_component_solve (const unsigned int d); - void output_results (const unsigned int step); + void output_results (const unsigned int step); - void assemble_vorticity (const bool reinit_prec); + void assemble_vorticity (const bool reinit_prec); }; diff --cc deal.II/examples/step-37/step-37.cc index a664f42dcc,807b710c8e..080c0d9277 --- a/deal.II/examples/step-37/step-37.cc +++ b/deal.II/examples/step-37/step-37.cc @@@ -389,46 -389,46 +389,46 @@@ namespace Step3 template class LaplaceOperator : public Subscriptor { - public: - LaplaceOperator (); + public: + LaplaceOperator (); - void clear(); + void clear(); - void reinit (const MGDoFHandler &dof_handler, - const ConstraintMatrix &constraints, - const unsigned int level = numbers::invalid_unsigned_int); + void reinit (const MGDoFHandler &dof_handler, - const ConstraintMatrix &constraints, ++ const ConstraintMatrix &constraints, + const unsigned int level = numbers::invalid_unsigned_int); - unsigned int m () const; - unsigned int n () const; + unsigned int m () const; + unsigned int n () const; - void vmult (Vector &dst, - const Vector &src) const; - void Tvmult (Vector &dst, - const Vector &src) const; - void vmult_add (Vector &dst, - const Vector &src) const; - void Tvmult_add (Vector &dst, - const Vector &src) const; + void vmult (Vector &dst, + const Vector &src) const; + void Tvmult (Vector &dst, + const Vector &src) const; + void vmult_add (Vector &dst, + const Vector &src) const; + void Tvmult_add (Vector &dst, + const Vector &src) const; - number el (const unsigned int row, - const unsigned int col) const; - void set_diagonal (const Vector &diagonal); + number el (const unsigned int row, + const unsigned int col) const; + void set_diagonal (const Vector &diagonal); - std::size_t memory_consumption () const; + std::size_t memory_consumption () const; - private: - void local_apply (const MatrixFree &data, - Vector &dst, - const Vector &src, - const std::pair &cell_range) const; + private: + void local_apply (const MatrixFree &data, + Vector &dst, + const Vector &src, + const std::pair &cell_range) const; - void evaluate_coefficient(const Coefficient &function); + void evaluate_coefficient(const Coefficient &function); - MatrixFree data; - AlignedVector > coefficient; + MatrixFree data; + AlignedVector > coefficient; - Vector diagonal_values; - bool diagonal_is_available; + Vector diagonal_values; + bool diagonal_is_available; }; @@@ -572,8 -572,8 +572,8 @@@ template void LaplaceOperator::reinit (const MGDoFHandler &dof_handler, - const ConstraintMatrix &constraints, - const unsigned int level) - const ConstraintMatrix &constraints, ++ const ConstraintMatrix &constraints, + const unsigned int level) { typename MatrixFree::AdditionalData additional_data; additional_data.tasks_parallel_scheme = diff --cc deal.II/examples/step-42/step-42.cc index c1cb541fae,7bbab130b1..cceb2afb67 --- a/deal.II/examples/step-42/step-42.cc +++ b/deal.II/examples/step-42/step-42.cc @@@ -170,14 -170,14 +170,14 @@@ namespace Step4 MPI_Comm _mpi_communicator, ConditionalOStream _pcout); - void plast_linear_hardening (SymmetricTensor<4,dim> &stress_strain_tensor, - SymmetricTensor<2,dim> &strain_tensor, + void plast_linear_hardening (SymmetricTensor<4,dim> &stress_strain_tensor, + SymmetricTensor<2,dim> &strain_tensor, - unsigned int &elast_points, - unsigned int &plast_points, + unsigned int &elast_points, + unsigned int &plast_points, double &yield); - void linearized_plast_linear_hardening (SymmetricTensor<4,dim> &stress_strain_tensor_linearized, - SymmetricTensor<4,dim> &stress_strain_tensor, - SymmetricTensor<2,dim> &strain_tensor); + void linearized_plast_linear_hardening (SymmetricTensor<4,dim> &stress_strain_tensor_linearized, + SymmetricTensor<4,dim> &stress_strain_tensor, + SymmetricTensor<2,dim> &strain_tensor); inline SymmetricTensor<2,dim> get_strain (const FEValues &fe_values, const unsigned int shape_func, const unsigned int q_point) const; @@@ -260,36 -260,36 +260,36 @@@ } template - void ConstitutiveLaw::linearized_plast_linear_hardening (SymmetricTensor<4,dim> &stress_strain_tensor_linearized, - SymmetricTensor<4,dim> &stress_strain_tensor, - SymmetricTensor<2,dim> &strain_tensor) + void ConstitutiveLaw::linearized_plast_linear_hardening (SymmetricTensor<4,dim> &stress_strain_tensor_linearized, - SymmetricTensor<4,dim> &stress_strain_tensor, - SymmetricTensor<2,dim> &strain_tensor) ++ SymmetricTensor<4,dim> &stress_strain_tensor, ++ SymmetricTensor<2,dim> &strain_tensor) { if (dim == 3) - { - SymmetricTensor<2,dim> stress_tensor; - stress_tensor = (stress_strain_tensor_kappa + stress_strain_tensor_mu)*strain_tensor; - double tmp = E/((1+nu)*(1-2*nu)); + { + SymmetricTensor<2,dim> stress_tensor; + stress_tensor = (stress_strain_tensor_kappa + stress_strain_tensor_mu)*strain_tensor; + double tmp = E/((1+nu)*(1-2*nu)); - stress_strain_tensor = stress_strain_tensor_mu; - stress_strain_tensor_linearized = stress_strain_tensor_mu; + stress_strain_tensor = stress_strain_tensor_mu; + stress_strain_tensor_linearized = stress_strain_tensor_mu; - SymmetricTensor<2,dim> deviator_stress_tensor = deviator(stress_tensor); + SymmetricTensor<2,dim> deviator_stress_tensor = deviator(stress_tensor); - double deviator_stress_tensor_norm = deviator_stress_tensor.norm (); + double deviator_stress_tensor_norm = deviator_stress_tensor.norm (); - double beta = 1.0; - if (deviator_stress_tensor_norm >= sigma_0) - { - beta = (sigma_0 + gamma)/deviator_stress_tensor_norm; - stress_strain_tensor *= beta; - stress_strain_tensor_linearized *= beta; - deviator_stress_tensor /= deviator_stress_tensor_norm; - stress_strain_tensor_linearized -= beta*2*mu*outer_product(deviator_stress_tensor, deviator_stress_tensor); - } + double beta = 1.0; + if (deviator_stress_tensor_norm >= sigma_0) + { + beta = (sigma_0 + gamma)/deviator_stress_tensor_norm; + stress_strain_tensor *= beta; + stress_strain_tensor_linearized *= beta; + deviator_stress_tensor /= deviator_stress_tensor_norm; + stress_strain_tensor_linearized -= beta*2*mu*outer_product(deviator_stress_tensor, deviator_stress_tensor); + } - stress_strain_tensor += stress_strain_tensor_kappa; - stress_strain_tensor_linearized += stress_strain_tensor_kappa; - } + stress_strain_tensor += stress_strain_tensor_kappa; + stress_strain_tensor_linearized += stress_strain_tensor_kappa; + } } namespace EquationData diff --cc deal.II/examples/step-43/step-43.cc index 6876372425,d24bb00104..ec3d2f2fa3 --- a/deal.II/examples/step-43/step-43.cc +++ b/deal.II/examples/step-43/step-43.cc @@@ -460,15 -460,15 +460,15 @@@ namespace Step4 template BlockSchurPreconditioner:: - BlockSchurPreconditioner(const TrilinosWrappers::BlockSparseMatrix &S, + BlockSchurPreconditioner(const TrilinosWrappers::BlockSparseMatrix &S, const InverseMatrix &Mpinv, + PreconditionerMp> &Mpinv, const PreconditionerA &Apreconditioner) - : - darcy_matrix (&S), - m_inverse (&Mpinv), - a_preconditioner (Apreconditioner), - tmp (darcy_matrix->block(1,1).m()) + : + darcy_matrix (&S), + m_inverse (&Mpinv), + a_preconditioner (Apreconditioner), + tmp (darcy_matrix->block(1,1).m()) {} @@@ -548,130 -548,130 +548,130 @@@ template class TwoPhaseFlowProblem { - public: - TwoPhaseFlowProblem (const unsigned int degree); - void run (); - - private: - void setup_dofs (); - void assemble_darcy_preconditioner (); - void build_darcy_preconditioner (); - void assemble_darcy_system (); - void assemble_saturation_system (); - void assemble_saturation_matrix (); - void assemble_saturation_rhs (); - void assemble_saturation_rhs_cell_term (const FEValues &saturation_fe_values, - const FEValues &darcy_fe_values, - const double global_max_u_F_prime, - const double global_S_variation, - const std::vector &local_dof_indices); - void assemble_saturation_rhs_boundary_term (const FEFaceValues &saturation_fe_face_values, - const FEFaceValues &darcy_fe_face_values, - const std::vector &local_dof_indices); - void solve (); - void refine_mesh (const unsigned int min_grid_level, - const unsigned int max_grid_level); - void output_results () const; - - // We follow with a number of - // helper functions that are - // used in a variety of places - // throughout the program: - double get_max_u_F_prime () const; - std::pair get_extrapolated_saturation_range () const; - bool determine_whether_to_solve_for_pressure_and_velocity () const; - void project_back_saturation (); - double compute_viscosity (const std::vector &old_saturation, - const std::vector &old_old_saturation, - const std::vector > &old_saturation_grads, - const std::vector > &old_old_saturation_grads, - const std::vector > &present_darcy_values, - const double global_max_u_F_prime, - const double global_S_variation, - const double cell_diameter) const; - - - // This all is followed by the - // member variables, most of - // which are similar to the - // ones in step-31, with the - // exception of the ones that - // pertain to the macro time - // stepping for the - // velocity/pressure system: - Triangulation triangulation; - double global_Omega_diameter; - - const unsigned int degree; - - const unsigned int darcy_degree; - FESystem darcy_fe; - DoFHandler darcy_dof_handler; - ConstraintMatrix darcy_constraints; - - ConstraintMatrix darcy_preconditioner_constraints; - - TrilinosWrappers::BlockSparseMatrix darcy_matrix; - TrilinosWrappers::BlockSparseMatrix darcy_preconditioner_matrix; - - TrilinosWrappers::BlockVector darcy_solution; - TrilinosWrappers::BlockVector darcy_rhs; - - TrilinosWrappers::BlockVector last_computed_darcy_solution; - TrilinosWrappers::BlockVector second_last_computed_darcy_solution; - - - const unsigned int saturation_degree; - FE_Q saturation_fe; - DoFHandler saturation_dof_handler; - ConstraintMatrix saturation_constraints; - - TrilinosWrappers::SparseMatrix saturation_matrix; - - - TrilinosWrappers::Vector saturation_solution; - TrilinosWrappers::Vector old_saturation_solution; - TrilinosWrappers::Vector old_old_saturation_solution; - TrilinosWrappers::Vector saturation_rhs; - - TrilinosWrappers::Vector saturation_matching_last_computed_darcy_solution; - - const double saturation_refinement_threshold; - - double time; - const double end_time; - - double current_macro_time_step; - double old_macro_time_step; - - double time_step; - double old_time_step; - unsigned int timestep_number; - - const double viscosity; - const double porosity; - const double AOS_threshold; - - std_cxx1x::shared_ptr Amg_preconditioner; - std_cxx1x::shared_ptr Mp_preconditioner; - - bool rebuild_saturation_matrix; - - // At the very end we declare a - // variable that denotes the - // material model. Compared to - // step-21, we do this here as - // a member variable since we - // will want to use it in a - // variety of places and so - // having a central place where - // such a variable is declared - // will make it simpler to - // replace one class by another - // (e.g. replace - // RandomMedium::KInverse by - // SingleCurvingCrack::KInverse). - const RandomMedium::KInverse k_inverse; + public: + TwoPhaseFlowProblem (const unsigned int degree); + void run (); + + private: + void setup_dofs (); + void assemble_darcy_preconditioner (); + void build_darcy_preconditioner (); + void assemble_darcy_system (); + void assemble_saturation_system (); + void assemble_saturation_matrix (); + void assemble_saturation_rhs (); + void assemble_saturation_rhs_cell_term (const FEValues &saturation_fe_values, + const FEValues &darcy_fe_values, + const double global_max_u_F_prime, + const double global_S_variation, + const std::vector &local_dof_indices); + void assemble_saturation_rhs_boundary_term (const FEFaceValues &saturation_fe_face_values, + const FEFaceValues &darcy_fe_face_values, + const std::vector &local_dof_indices); + void solve (); + void refine_mesh (const unsigned int min_grid_level, + const unsigned int max_grid_level); + void output_results () const; + + // We follow with a number of + // helper functions that are + // used in a variety of places + // throughout the program: + double get_max_u_F_prime () const; + std::pair get_extrapolated_saturation_range () const; + bool determine_whether_to_solve_for_pressure_and_velocity () const; + void project_back_saturation (); + double compute_viscosity (const std::vector &old_saturation, + const std::vector &old_old_saturation, - const std::vector > &old_saturation_grads, - const std::vector > &old_old_saturation_grads, ++ const std::vector > &old_saturation_grads, ++ const std::vector > &old_old_saturation_grads, + const std::vector > &present_darcy_values, + const double global_max_u_F_prime, + const double global_S_variation, + const double cell_diameter) const; + + + // This all is followed by the + // member variables, most of + // which are similar to the + // ones in step-31, with the + // exception of the ones that + // pertain to the macro time + // stepping for the + // velocity/pressure system: + Triangulation triangulation; + double global_Omega_diameter; + + const unsigned int degree; + + const unsigned int darcy_degree; + FESystem darcy_fe; + DoFHandler darcy_dof_handler; + ConstraintMatrix darcy_constraints; + + ConstraintMatrix darcy_preconditioner_constraints; + + TrilinosWrappers::BlockSparseMatrix darcy_matrix; + TrilinosWrappers::BlockSparseMatrix darcy_preconditioner_matrix; + + TrilinosWrappers::BlockVector darcy_solution; + TrilinosWrappers::BlockVector darcy_rhs; + + TrilinosWrappers::BlockVector last_computed_darcy_solution; + TrilinosWrappers::BlockVector second_last_computed_darcy_solution; + + + const unsigned int saturation_degree; + FE_Q saturation_fe; + DoFHandler saturation_dof_handler; + ConstraintMatrix saturation_constraints; + + TrilinosWrappers::SparseMatrix saturation_matrix; + + + TrilinosWrappers::Vector saturation_solution; + TrilinosWrappers::Vector old_saturation_solution; + TrilinosWrappers::Vector old_old_saturation_solution; + TrilinosWrappers::Vector saturation_rhs; + + TrilinosWrappers::Vector saturation_matching_last_computed_darcy_solution; + + const double saturation_refinement_threshold; + + double time; + const double end_time; + + double current_macro_time_step; + double old_macro_time_step; + + double time_step; + double old_time_step; + unsigned int timestep_number; + + const double viscosity; + const double porosity; + const double AOS_threshold; + + std_cxx1x::shared_ptr Amg_preconditioner; + std_cxx1x::shared_ptr Mp_preconditioner; + + bool rebuild_saturation_matrix; + + // At the very end we declare a + // variable that denotes the + // material model. Compared to + // step-21, we do this here as + // a member variable since we + // will want to use it in a + // variety of places and so + // having a central place where + // such a variable is declared + // will make it simpler to + // replace one class by another + // (e.g. replace + // RandomMedium::KInverse by + // SingleCurvingCrack::KInverse). + const RandomMedium::KInverse k_inverse; }; diff --cc deal.II/examples/step-9/step-9.cc index 40d12c9b88,ba43fa9e49..3ae105a770 --- a/deal.II/examples/step-9/step-9.cc +++ b/deal.II/examples/step-9/step-9.cc @@@ -510,129 -510,129 +510,129 @@@ namespace Step - // @sect3{GradientEstimation class declaration} - - // Now, finally, here comes the class - // that will compute the difference - // approximation of the gradient on - // each cell and weighs that with a - // power of the mesh size, as - // described in the introduction. - // This class is a simple version of - // the DerivativeApproximation - // class in the library, that uses - // similar techniques to obtain - // finite difference approximations - // of the gradient of a finite - // element field, or if higher - // derivatives. - // - // The - // class has one public static - // function estimate that is - // called to compute a vector of - // error indicators, and one private - // function that does the actual work - // on an interval of all active - // cells. The latter is called by the - // first one in order to be able to - // do the computations in parallel if - // your computer has more than one - // processor. While the first - // function accepts as parameter a - // vector into which the error - // indicator is written for each - // cell. This vector is passed on to - // the second function that actually - // computes the error indicators on - // some cells, and the respective - // elements of the vector are - // written. By the way, we made it - // somewhat of a convention to use - // vectors of floats for error - // indicators rather than the common - // vectors of doubles, as the - // additional accuracy is not - // necessary for estimated values. - // - // In addition to these two - // functions, the class declares to - // exceptions which are raised when a - // cell has no neighbors in each of - // the space directions (in which - // case the matrix described in the - // introduction would be singular and - // can't be inverted), while the - // other one is used in the more - // common case of invalid parameters - // to a function, namely a vector of - // wrong size. - // - // Two annotations to this class are - // still in order: the first is that - // the class has no non-static member - // functions or variables, so this is - // not really a class, but rather - // serves the purpose of a - // namespace in C++. The reason - // that we chose a class over a - // namespace is that this way we can - // declare functions that are - // private, i.e. visible to the - // outside world but not - // callable. This can be done with - // namespaces as well, if one - // declares some functions in header - // files in the namespace and - // implements these and other - // functions in the implementation - // file. The functions not declared - // in the header file are still in - // the namespace but are not callable - // from outside. However, as we have - // only one file here, it is not - // possible to hide functions in the - // present case. - // - // The second is that the dimension - // template parameter is attached to - // the function rather than to the - // class itself. This way, you don't - // have to specify the template - // parameter yourself as in most - // other cases, but the compiler can - // figure its value out itself from - // the dimension of the DoF handler - // object that one passes as first - // argument. - // - // Finally note that the - // IndexInterval typedef is - // introduced as a convenient - // abbreviation for an otherwise - // lengthy type name. + // @sect3{GradientEstimation class declaration} + + // Now, finally, here comes the class + // that will compute the difference + // approximation of the gradient on + // each cell and weighs that with a + // power of the mesh size, as + // described in the introduction. + // This class is a simple version of + // the DerivativeApproximation + // class in the library, that uses + // similar techniques to obtain + // finite difference approximations + // of the gradient of a finite + // element field, or if higher + // derivatives. + // + // The + // class has one public static + // function estimate that is + // called to compute a vector of + // error indicators, and one private + // function that does the actual work + // on an interval of all active + // cells. The latter is called by the + // first one in order to be able to + // do the computations in parallel if + // your computer has more than one + // processor. While the first + // function accepts as parameter a + // vector into which the error + // indicator is written for each + // cell. This vector is passed on to + // the second function that actually + // computes the error indicators on + // some cells, and the respective + // elements of the vector are + // written. By the way, we made it + // somewhat of a convention to use + // vectors of floats for error + // indicators rather than the common + // vectors of doubles, as the + // additional accuracy is not + // necessary for estimated values. + // + // In addition to these two + // functions, the class declares to + // exceptions which are raised when a + // cell has no neighbors in each of + // the space directions (in which + // case the matrix described in the + // introduction would be singular and + // can't be inverted), while the + // other one is used in the more + // common case of invalid parameters + // to a function, namely a vector of + // wrong size. + // + // Two annotations to this class are + // still in order: the first is that + // the class has no non-static member + // functions or variables, so this is + // not really a class, but rather + // serves the purpose of a + // namespace in C++. The reason + // that we chose a class over a + // namespace is that this way we can + // declare functions that are + // private, i.e. visible to the + // outside world but not + // callable. This can be done with + // namespaces as well, if one + // declares some functions in header + // files in the namespace and + // implements these and other + // functions in the implementation + // file. The functions not declared + // in the header file are still in + // the namespace but are not callable + // from outside. However, as we have + // only one file here, it is not + // possible to hide functions in the + // present case. + // + // The second is that the dimension + // template parameter is attached to + // the function rather than to the + // class itself. This way, you don't + // have to specify the template + // parameter yourself as in most + // other cases, but the compiler can + // figure its value out itself from + // the dimension of the DoF handler + // object that one passes as first + // argument. + // + // Finally note that the + // IndexInterval typedef is + // introduced as a convenient + // abbreviation for an otherwise + // lengthy type name. class GradientEstimation { - public: - template - static void estimate (const DoFHandler &dof, - const Vector &solution, - Vector &error_per_cell); - - DeclException2 (ExcInvalidVectorLength, - int, int, - << "Vector has length " << arg1 << ", but should have " - << arg2); - DeclException0 (ExcInsufficientDirections); - - private: - typedef std::pair IndexInterval; - - template - static void estimate_interval (const DoFHandler &dof, - const Vector &solution, - const IndexInterval &index_interval, - Vector &error_per_cell); + public: + template + static void estimate (const DoFHandler &dof, - const Vector &solution, ++ const Vector &solution, + Vector &error_per_cell); + + DeclException2 (ExcInvalidVectorLength, + int, int, + << "Vector has length " << arg1 << ", but should have " + << arg2); + DeclException0 (ExcInsufficientDirections); + + private: + typedef std::pair IndexInterval; + + template + static void estimate_interval (const DoFHandler &dof, - const Vector &solution, ++ const Vector &solution, + const IndexInterval &index_interval, + Vector &error_per_cell); }; @@@ -1459,24 -1459,24 +1459,24 @@@ template void GradientEstimation::estimate (const DoFHandler &dof_handler, - const Vector &solution, + const Vector &solution, Vector &error_per_cell) { - // Before starting with the work, - // we check that the vector into - // which the results are written, - // has the right size. It is a - // common error that such - // parameters have the wrong size, - // but the resulting damage by not - // catching these errors are very - // subtle as they are usually - // corruption of data somewhere in - // memory. Often, the problems - // emerging from this are not - // reproducible, and we found that - // it is well worth the effort to - // check for such things. + // Before starting with the work, + // we check that the vector into + // which the results are written, + // has the right size. It is a + // common error that such + // parameters have the wrong size, + // but the resulting damage by not + // catching these errors are very + // subtle as they are usually + // corruption of data somewhere in + // memory. Often, the problems + // emerging from this are not + // reproducible, and we found that + // it is well worth the effort to + // check for such things. Assert (error_per_cell.size() == dof_handler.get_tria().n_active_cells(), ExcInvalidVectorLength (error_per_cell.size(), dof_handler.get_tria().n_active_cells())); diff --cc deal.II/include/deal.II/base/conditional_ostream.h index a4c799f48b,d786f5d02e..753ade963e --- a/deal.II/include/deal.II/base/conditional_ostream.h +++ b/deal.II/include/deal.II/base/conditional_ostream.h @@@ -80,81 -80,81 +80,81 @@@ DEAL_II_NAMESPACE_OPE */ class ConditionalOStream { - public: - /** - * Constructor. Set the stream to which - * we want to write, and the condition - * based on which writes are actually - * forwarded. Per default the condition - * of an object is active. - */ - ConditionalOStream (std::ostream &stream, - const bool active = true); - - /** - * Depending on the - * active flag set the - * condition of this stream to - * active (true) or non-active - * (false). An object of this - * class prints to cout - * if and only if its condition - * is active. - */ - void set_condition (const bool active); - - /** - * Return the condition of the object. - */ - bool is_active() const; - - /** - * Return a reference to the stream - * currently in use. - */ - std::ostream & get_stream () const; - - /** - * Output a constant something through - * this stream. This function must be @p - * const so that member objects of this - * type can also be used from @p const - * member functions of the surrounding - * class. - */ - template - const ConditionalOStream & - operator << (const T &t) const; - - /** - * Treat ostream manipulators. This - * function must be @p const so that - * member objects of this type can also - * be used from @p const member functions - * of the surrounding class. - * - * Note that compilers want to see this - * treated differently from the general - * template above since functions like @p - * std::endl are actually overloaded and - * can't be bound directly to a template - * type. - */ - const ConditionalOStream & - operator<< (std::ostream& (*p) (std::ostream&)) const; - - private: - /** - * Reference to the stream we - * want to write to. - */ - std::ostream &output_stream; - - /** - * Stores the actual condition - * the object is in. - */ - bool active_flag; + public: + /** + * Constructor. Set the stream to which + * we want to write, and the condition + * based on which writes are actually + * forwarded. Per default the condition + * of an object is active. + */ + ConditionalOStream (std::ostream &stream, + const bool active = true); + + /** + * Depending on the + * active flag set the + * condition of this stream to + * active (true) or non-active + * (false). An object of this + * class prints to cout + * if and only if its condition + * is active. + */ + void set_condition (const bool active); + + /** + * Return the condition of the object. + */ + bool is_active() const; + + /** + * Return a reference to the stream + * currently in use. + */ + std::ostream &get_stream () const; + + /** + * Output a constant something through + * this stream. This function must be @p + * const so that member objects of this + * type can also be used from @p const + * member functions of the surrounding + * class. + */ + template + const ConditionalOStream & + operator << (const T &t) const; + + /** + * Treat ostream manipulators. This + * function must be @p const so that + * member objects of this type can also + * be used from @p const member functions + * of the surrounding class. + * + * Note that compilers want to see this + * treated differently from the general + * template above since functions like @p + * std::endl are actually overloaded and + * can't be bound directly to a template + * type. + */ + const ConditionalOStream & + operator<< (std::ostream& (*p) (std::ostream &)) const; + + private: + /** + * Reference to the stream we + * want to write to. + */ - std::ostream &output_stream; ++ std::ostream &output_stream; + + /** + * Stores the actual condition + * the object is in. + */ + bool active_flag; }; diff --cc deal.II/include/deal.II/base/data_out_base.h index b42e689aa9,f1ff018f5c..a8124528fc --- a/deal.II/include/deal.II/base/data_out_base.h +++ b/deal.II/include/deal.II/base/data_out_base.h @@@ -2178,640 -2179,640 +2179,640 @@@ private template class DataOutInterface : private DataOutBase { - public: - /* - * Import a few names that were - * previously in this class and have then - * moved to the base class. Since the - * base class is inherited from - * privately, we need to re-import these - * symbols to make sure that references - * to DataOutInterface::XXX - * remain valid. - */ - using DataOutBase::OutputFormat; - using DataOutBase::default_format; - using DataOutBase::dx; - using DataOutBase::gnuplot; - using DataOutBase::povray; - using DataOutBase::eps; - using DataOutBase::tecplot; - using DataOutBase::tecplot_binary; - using DataOutBase::vtk; - using DataOutBase::vtu; - using DataOutBase::deal_II_intermediate; - using DataOutBase::parse_output_format; - using DataOutBase::get_output_format_names; - using DataOutBase::determine_intermediate_format_dimensions; - - /** - * Constructor. - */ - DataOutInterface (); - - /** - * Destructor. Does nothing, but is - * declared virtual since this class has - * virtual functions. - */ - virtual ~DataOutInterface (); - - /** - * Obtain data through get_patches() - * and write it to out - * in OpenDX format. See - * DataOutBase::write_dx. - */ - void write_dx (std::ostream &out) const; - - /** - * Obtain data through get_patches() - * and write it to out - * in EPS format. See - * DataOutBase::write_eps. - */ - void write_eps (std::ostream &out) const; - - /** - * Obtain data through get_patches() - * and write it to out - * in GMV format. See - * DataOutBase::write_gmv. - */ - void write_gmv (std::ostream &out) const; - - /** - * Obtain data through get_patches() - * and write it to out - * in GNUPLOT format. See - * DataOutBase::write_gnuplot. - */ - void write_gnuplot (std::ostream &out) const; - - /** - * Obtain data through get_patches() - * and write it to out - * in POVRAY format. See - * DataOutBase::write_povray. - */ - void write_povray (std::ostream &out) const; - - /** - * Obtain data through get_patches() - * and write it to out - * in Tecplot format. See - * DataOutBase::write_tecplot. - */ - void write_tecplot (std::ostream &out) const; - - /** - * Obtain data through - * get_patches() and write it in - * the Tecplot binary output - * format. Note that the name of - * the output file must be - * specified through the - * TecplotFlags interface. - */ - void write_tecplot_binary (std::ostream &out) const; - - /** - * Obtain data through - * get_patches() and write it to - * out in UCD format for - * AVS. See - * DataOutBase::write_ucd. - */ - void write_ucd (std::ostream &out) const; - - /** - * Obtain data through get_patches() - * and write it to out - * in Vtk format. See - * DataOutBase::write_vtk. - */ - void write_vtk (std::ostream &out) const; - - /** - * Obtain data through get_patches() - * and write it to out - * in Vtu (VTK's XML) format. See - * DataOutBase::write_vtu. - * - * Some visualization programs, - * such as ParaView, can read - * several separate VTU files to - * parallelize visualization. In - * that case, you need a - * .pvtu file that - * describes which VTU files form - * a group. The - * DataOutInterface::write_pvtu_record() - * function can generate such a - * master record. Likewise, - * DataOutInterface::write_visit_record() - * does the same for VisIt. Finally, - * DataOutInterface::write_pvd_record() - * can be used to group together - * the files that jointly make up - * a time dependent simulation. - */ - void write_vtu (std::ostream &out) const; - - /** - * Collective MPI call to write the - * solution from all participating nodes - * (those in the given communicator) to a - * single compressed .vtu file on a - * shared file system. The communicator - * can be a sub communicator of the one - * used by the computation. This routine - * uses MPI I/O to achieve high - * performance on parallel filesystems. - * Also see - * DataOutInterface::write_vtu(). - */ - void write_vtu_in_parallel (const char* filename, MPI_Comm comm) const; - - /** - * Some visualization programs, such as - * ParaView, can read several separate - * VTU files to parallelize - * visualization. In that case, you need - * a .pvtu file that - * describes which VTU files (written, - * for example, through the write_vtu() - * function) form a group. The current - * function can generate such a master - * record. - * - * The file so written contains a list of - * (scalar or vector) fields whose values - * are described by the individual files - * that comprise the set of parallel VTU - * files along with the names of these - * files. This function gets the names - * and types of fields through the - * get_patches() function of this class - * like all the other write_xxx() - * functions. The second argument to this - * function specifies the names of the - * files that form the parallel set. - * - * @note See DataOutBase::write_vtu for - * writing each piece. Also note that - * only one parallel process needs to - * call the current function, listing the - * names of the files written by all - * parallel processes. - * - * @note The use of this function is - * explained in step-40. - * - * @note In order to tell Paraview to - * group together multiple pvtu - * files that each describe one time - * step of a time dependent simulation, - * see the - * DataOutInterface::write_pvd_record() - * function. - * - * @note At the time of writing, - * the other big VTK-based - * visualization program, VisIt, - * can not read pvtu - * records. However, it can read - * visit records as written by - * the write_visit_record() - * function. - */ - void write_pvtu_record (std::ostream &out, - const std::vector &piece_names) const; - - /** - * In ParaView it is possible to visualize time-dependent - * data tagged with the current - * integration time of a time dependent simulation. To use this - * feature you need a .pvd - * file that describes which VTU or PVTU file - * belongs to which timestep. This function writes a file that - * provides this mapping, i.e., it takes a list of pairs each of - * which indicates a particular time instant and the corresponding - * file that contains the graphical data for this time instant. - * - * A typical use case, in program that computes a time dependent - * solution, would be the following (time and - * time_step are member variables of the class with types - * double and unsigned int, respectively; - * the variable times_and_names is of type - * std::vector@ @>): - * - * @code - * template - * void MyEquation::output_results () const - * { - * DataOut data_out; - * - * data_out.attach_dof_handler (dof_handler); - * data_out.add_data_vector (solution, "U"); - * data_out.build_patches (); - * - * const std::string filename = "solution-" + - * Utilities::int_to_string (timestep_number, 3) + - * ".vtu"; - * std::ofstream output (filename.c_str()); - * data_out.write_vtu (output); - * - * times_and_names.push_back (std::pair (time, filename)); - * std::ofstream pvd_output ("solution.pvd"); - * data_out.write_pvd_record (pvd_output, times_and_names); - * } - * @endcode - * - * @note See DataOutBase::write_vtu or - * DataOutInterface::write_pvtu_record for - * writing solutions at each timestep. - * - * @note The second element of each pair, i.e., the file in which - * the graphical data for each time is stored, may itself be again - * a file that references other files. For example, it could be - * the name for a .pvtu file that references multiple - * parts of a parallel computation. - * - * @author Marco Engelhard, 2012 - */ - void write_pvd_record (std::ostream &out, - const std::vector > ×_and_names) const; - - /** - * This function is the exact - * equivalent of the - * write_pvtu_record() function - * but for the VisIt - * visualization program. See - * there for the purpose of this - * function. - * - * This function is documented - * in the "Creating a master file - * for parallel" section (section 5.7) - * of the "Getting data into VisIt" - * report that can be found here: - * https://wci.llnl.gov/codes/visit/2.0.0/GettingDataIntoVisIt2.0.0.pdf - */ - void write_visit_record (std::ostream &out, - const std::vector &piece_names) const; - - /** - * Obtain data through get_patches() - * and write it to out - * in deal.II intermediate - * format. See - * DataOutBase::write_deal_II_intermediate. - * - * Note that the intermediate - * format is what its name - * suggests: a direct - * representation of internal - * data. It isn't standardized - * and will change whenever we - * change our internal - * representation. You can only - * expect to process files - * written in this format using - * the same version of deal.II - * that was used for writing. - */ - void write_deal_II_intermediate (std::ostream &out) const; - - XDMFEntry create_xdmf_entry (const char *h5_filename, - const double cur_time, - MPI_Comm comm) const; - - void write_xdmf_file (const std::vector &entries, - const char *filename, - MPI_Comm comm) const; - - void write_hdf5_parallel (const char* filename, MPI_Comm comm) const; - /** - * Write data and grid to out - * according to the given data - * format. This function simply - * calls the appropriate - * write_* function. If no - * output format is requested, - * the default_format is - * written. - * - * An error occurs if no format - * is provided and the default - * format is default_format. - */ - void write (std::ostream &out, - const OutputFormat output_format = default_format) const; - - /** - * Set the default format. The - * value set here is used - * anytime, output for format - * default_format is - * requested. - */ - void set_default_format (const OutputFormat default_format); - - /** - * Set the flags to be used for - * output in OpenDX format. - */ - void set_flags (const DXFlags &dx_flags); - - /** - * Set the flags to be used for - * output in UCD format. - */ - void set_flags (const UcdFlags &ucd_flags); - - /** - * Set the flags to be used for - * output in GNUPLOT format. - */ - void set_flags (const GnuplotFlags &gnuplot_flags); - - /** - * Set the flags to be used for - * output in POVRAY format. - */ - void set_flags (const PovrayFlags &povray_flags); - - /** - * Set the flags to be used for - * output in EPS output. - */ - void set_flags (const EpsFlags &eps_flags); - - /** - * Set the flags to be used for - * output in GMV format. - */ - void set_flags (const GmvFlags &gmv_flags); - - /** - * Set the flags to be used for - * output in Tecplot format. - */ - void set_flags (const TecplotFlags &tecplot_flags); - - /** - * Set the flags to be used for - * output in VTK format. - */ - void set_flags (const VtkFlags &vtk_flags); - - /** - * Set the flags to be used for output in - * deal.II intermediate format. - */ - void set_flags (const Deal_II_IntermediateFlags &deal_II_intermediate_flags); - - /** - * A function that returns the same - * string as the respective function in - * the base class does; the only - * exception being that if the parameter - * is omitted, then the value for the - * present default format is returned, - * i.e. the correct suffix for the format - * that was set through - * set_default_format() or - * parse_parameters() before calling this - * function. - */ - std::string - default_suffix (const OutputFormat output_format = default_format) const; - - /** - * Declare parameters for all - * output formats by declaring - * subsections within the - * parameter file for each output - * format and call the respective - * declare_parameters - * functions of the flag classes - * for each output format. - * - * Some of the declared - * subsections may not contain - * entries, if the respective - * format does not export any - * flags. - * - * Note that the top-level - * parameters denoting the number - * of subdivisions per patch and - * the output format are not - * declared, since they are only - * passed to virtual functions - * and are not stored inside - * objects of this type. You have - * to declare them yourself. - */ - static void declare_parameters (ParameterHandler &prm); - - /** - * Read the parameters declared - * in declare_parameters and - * set the flags for the output - * formats accordingly. - * - * The flags thus obtained - * overwrite all previous - * contents of the flag objects - * as default-constructed or set - * by the set_flags() function. - */ - void parse_parameters (ParameterHandler &prm); - - /** - * Determine an estimate for - * the memory consumption (in - * bytes) of this - * object. Since sometimes - * the size of objects can - * not be determined exactly - * (for example: what is the - * memory consumption of an - * STL std::map type with a - * certain number of - * elements?), this is only - * an estimate. however often - * quite close to the true - * value. - */ - std::size_t memory_consumption () const; - - protected: - /** - * This is the abstract function - * through which derived classes - * propagate preprocessed data in - * the form of Patch - * structures (declared in the - * base class DataOutBase) to - * the actual output - * function. You need to overload - * this function to allow the - * output functions to know what - * they shall print. - */ - virtual - const std::vector > & - get_patches () const = 0; - - /** - * Abstract virtual function - * through which the names of - * data sets are obtained by the - * output functions of the base - * class. - */ - virtual - std::vector - get_dataset_names () const = 0; - - /** - * This functions returns - * information about how the - * individual components of - * output files that consist of - * more than one data set are to - * be interpreted. - * - * It returns a list of index - * pairs and corresponding name - * indicating which components of - * the output are to be - * considered vector-valued - * rather than just a collection - * of scalar data. The index - * pairs are inclusive; for - * example, if we have a Stokes - * problem in 2d with components - * (u,v,p), then the - * corresponding vector data - * range should be (0,1), and the - * returned list would consist of - * only a single element with a - * tuple such as (0,1,"velocity"). - * - * Since some of the derived - * classes do not know about - * vector data, this function has - * a default implementation that - * simply returns an empty - * string, meaning that all data - * is to be considered a - * collection of scalar fields. - */ - virtual - std::vector > - get_vector_data_ranges () const; - - /** - * The default number of - * subdivisions for patches. This - * is filled by parse_parameters() - * and should be obeyed by - * build_patches() in derived - * classes. - */ - unsigned int default_subdivisions; + public: + /* + * Import a few names that were + * previously in this class and have then + * moved to the base class. Since the + * base class is inherited from + * privately, we need to re-import these + * symbols to make sure that references + * to DataOutInterface::XXX + * remain valid. + */ + using DataOutBase::OutputFormat; + using DataOutBase::default_format; + using DataOutBase::dx; + using DataOutBase::gnuplot; + using DataOutBase::povray; + using DataOutBase::eps; + using DataOutBase::tecplot; + using DataOutBase::tecplot_binary; + using DataOutBase::vtk; + using DataOutBase::vtu; + using DataOutBase::deal_II_intermediate; + using DataOutBase::parse_output_format; + using DataOutBase::get_output_format_names; + using DataOutBase::determine_intermediate_format_dimensions; + + /** + * Constructor. + */ + DataOutInterface (); + + /** + * Destructor. Does nothing, but is + * declared virtual since this class has + * virtual functions. + */ + virtual ~DataOutInterface (); + + /** + * Obtain data through get_patches() + * and write it to out + * in OpenDX format. See + * DataOutBase::write_dx. + */ + void write_dx (std::ostream &out) const; + + /** + * Obtain data through get_patches() + * and write it to out + * in EPS format. See + * DataOutBase::write_eps. + */ + void write_eps (std::ostream &out) const; + + /** + * Obtain data through get_patches() + * and write it to out + * in GMV format. See + * DataOutBase::write_gmv. + */ + void write_gmv (std::ostream &out) const; + + /** + * Obtain data through get_patches() + * and write it to out + * in GNUPLOT format. See + * DataOutBase::write_gnuplot. + */ + void write_gnuplot (std::ostream &out) const; + + /** + * Obtain data through get_patches() + * and write it to out + * in POVRAY format. See + * DataOutBase::write_povray. + */ + void write_povray (std::ostream &out) const; + + /** + * Obtain data through get_patches() + * and write it to out + * in Tecplot format. See + * DataOutBase::write_tecplot. + */ + void write_tecplot (std::ostream &out) const; + + /** + * Obtain data through + * get_patches() and write it in + * the Tecplot binary output + * format. Note that the name of + * the output file must be + * specified through the + * TecplotFlags interface. + */ + void write_tecplot_binary (std::ostream &out) const; + + /** + * Obtain data through + * get_patches() and write it to + * out in UCD format for + * AVS. See + * DataOutBase::write_ucd. + */ + void write_ucd (std::ostream &out) const; + + /** + * Obtain data through get_patches() + * and write it to out + * in Vtk format. See + * DataOutBase::write_vtk. + */ + void write_vtk (std::ostream &out) const; + + /** + * Obtain data through get_patches() + * and write it to out + * in Vtu (VTK's XML) format. See + * DataOutBase::write_vtu. + * + * Some visualization programs, + * such as ParaView, can read + * several separate VTU files to + * parallelize visualization. In + * that case, you need a + * .pvtu file that + * describes which VTU files form + * a group. The + * DataOutInterface::write_pvtu_record() + * function can generate such a + * master record. Likewise, + * DataOutInterface::write_visit_record() + * does the same for VisIt. Finally, + * DataOutInterface::write_pvd_record() + * can be used to group together + * the files that jointly make up + * a time dependent simulation. + */ + void write_vtu (std::ostream &out) const; + + /** + * Collective MPI call to write the + * solution from all participating nodes + * (those in the given communicator) to a + * single compressed .vtu file on a + * shared file system. The communicator + * can be a sub communicator of the one + * used by the computation. This routine + * uses MPI I/O to achieve high + * performance on parallel filesystems. + * Also see + * DataOutInterface::write_vtu(). + */ + void write_vtu_in_parallel (const char *filename, MPI_Comm comm) const; + + /** + * Some visualization programs, such as + * ParaView, can read several separate + * VTU files to parallelize + * visualization. In that case, you need + * a .pvtu file that + * describes which VTU files (written, + * for example, through the write_vtu() + * function) form a group. The current + * function can generate such a master + * record. + * + * The file so written contains a list of + * (scalar or vector) fields whose values + * are described by the individual files + * that comprise the set of parallel VTU + * files along with the names of these + * files. This function gets the names + * and types of fields through the + * get_patches() function of this class + * like all the other write_xxx() + * functions. The second argument to this + * function specifies the names of the + * files that form the parallel set. + * + * @note See DataOutBase::write_vtu for + * writing each piece. Also note that + * only one parallel process needs to + * call the current function, listing the + * names of the files written by all + * parallel processes. + * + * @note The use of this function is + * explained in step-40. + * + * @note In order to tell Paraview to + * group together multiple pvtu + * files that each describe one time + * step of a time dependent simulation, + * see the + * DataOutInterface::write_pvd_record() + * function. + * + * @note At the time of writing, + * the other big VTK-based + * visualization program, VisIt, + * can not read pvtu + * records. However, it can read + * visit records as written by + * the write_visit_record() + * function. + */ + void write_pvtu_record (std::ostream &out, + const std::vector &piece_names) const; + + /** + * In ParaView it is possible to visualize time-dependent + * data tagged with the current + * integration time of a time dependent simulation. To use this + * feature you need a .pvd + * file that describes which VTU or PVTU file + * belongs to which timestep. This function writes a file that + * provides this mapping, i.e., it takes a list of pairs each of + * which indicates a particular time instant and the corresponding + * file that contains the graphical data for this time instant. + * + * A typical use case, in program that computes a time dependent + * solution, would be the following (time and + * time_step are member variables of the class with types + * double and unsigned int, respectively; + * the variable times_and_names is of type + * std::vector@ @>): + * + * @code + * template + * void MyEquation::output_results () const + * { + * DataOut data_out; + * + * data_out.attach_dof_handler (dof_handler); + * data_out.add_data_vector (solution, "U"); + * data_out.build_patches (); + * + * const std::string filename = "solution-" + + * Utilities::int_to_string (timestep_number, 3) + + * ".vtu"; + * std::ofstream output (filename.c_str()); + * data_out.write_vtu (output); + * + * times_and_names.push_back (std::pair (time, filename)); + * std::ofstream pvd_output ("solution.pvd"); + * data_out.write_pvd_record (pvd_output, times_and_names); + * } + * @endcode + * + * @note See DataOutBase::write_vtu or + * DataOutInterface::write_pvtu_record for + * writing solutions at each timestep. + * + * @note The second element of each pair, i.e., the file in which + * the graphical data for each time is stored, may itself be again + * a file that references other files. For example, it could be + * the name for a .pvtu file that references multiple + * parts of a parallel computation. + * + * @author Marco Engelhard, 2012 + */ + void write_pvd_record (std::ostream &out, - const std::vector > ×_and_names) const; ++ const std::vector > ×_and_names) const; + + /** + * This function is the exact + * equivalent of the + * write_pvtu_record() function + * but for the VisIt + * visualization program. See + * there for the purpose of this + * function. + * + * This function is documented + * in the "Creating a master file + * for parallel" section (section 5.7) + * of the "Getting data into VisIt" + * report that can be found here: + * https://wci.llnl.gov/codes/visit/2.0.0/GettingDataIntoVisIt2.0.0.pdf + */ + void write_visit_record (std::ostream &out, + const std::vector &piece_names) const; + + /** + * Obtain data through get_patches() + * and write it to out + * in deal.II intermediate + * format. See + * DataOutBase::write_deal_II_intermediate. + * + * Note that the intermediate + * format is what its name + * suggests: a direct + * representation of internal + * data. It isn't standardized + * and will change whenever we + * change our internal + * representation. You can only + * expect to process files + * written in this format using + * the same version of deal.II + * that was used for writing. + */ + void write_deal_II_intermediate (std::ostream &out) const; + + XDMFEntry create_xdmf_entry (const char *h5_filename, + const double cur_time, + MPI_Comm comm) const; + + void write_xdmf_file (const std::vector &entries, + const char *filename, + MPI_Comm comm) const; + + void write_hdf5_parallel (const char *filename, MPI_Comm comm) const; + /** + * Write data and grid to out + * according to the given data + * format. This function simply + * calls the appropriate + * write_* function. If no + * output format is requested, + * the default_format is + * written. + * + * An error occurs if no format + * is provided and the default + * format is default_format. + */ + void write (std::ostream &out, + const OutputFormat output_format = default_format) const; + + /** + * Set the default format. The + * value set here is used + * anytime, output for format + * default_format is + * requested. + */ + void set_default_format (const OutputFormat default_format); + + /** + * Set the flags to be used for + * output in OpenDX format. + */ + void set_flags (const DXFlags &dx_flags); + + /** + * Set the flags to be used for + * output in UCD format. + */ + void set_flags (const UcdFlags &ucd_flags); + + /** + * Set the flags to be used for + * output in GNUPLOT format. + */ + void set_flags (const GnuplotFlags &gnuplot_flags); + + /** + * Set the flags to be used for + * output in POVRAY format. + */ + void set_flags (const PovrayFlags &povray_flags); + + /** + * Set the flags to be used for + * output in EPS output. + */ + void set_flags (const EpsFlags &eps_flags); + + /** + * Set the flags to be used for + * output in GMV format. + */ + void set_flags (const GmvFlags &gmv_flags); + + /** + * Set the flags to be used for + * output in Tecplot format. + */ + void set_flags (const TecplotFlags &tecplot_flags); + + /** + * Set the flags to be used for + * output in VTK format. + */ + void set_flags (const VtkFlags &vtk_flags); + + /** + * Set the flags to be used for output in + * deal.II intermediate format. + */ + void set_flags (const Deal_II_IntermediateFlags &deal_II_intermediate_flags); + + /** + * A function that returns the same + * string as the respective function in + * the base class does; the only + * exception being that if the parameter + * is omitted, then the value for the + * present default format is returned, + * i.e. the correct suffix for the format + * that was set through + * set_default_format() or + * parse_parameters() before calling this + * function. + */ + std::string + default_suffix (const OutputFormat output_format = default_format) const; + + /** + * Declare parameters for all + * output formats by declaring + * subsections within the + * parameter file for each output + * format and call the respective + * declare_parameters + * functions of the flag classes + * for each output format. + * + * Some of the declared + * subsections may not contain + * entries, if the respective + * format does not export any + * flags. + * + * Note that the top-level + * parameters denoting the number + * of subdivisions per patch and + * the output format are not + * declared, since they are only + * passed to virtual functions + * and are not stored inside + * objects of this type. You have + * to declare them yourself. + */ + static void declare_parameters (ParameterHandler &prm); + + /** + * Read the parameters declared + * in declare_parameters and + * set the flags for the output + * formats accordingly. + * + * The flags thus obtained + * overwrite all previous + * contents of the flag objects + * as default-constructed or set + * by the set_flags() function. + */ + void parse_parameters (ParameterHandler &prm); + + /** + * Determine an estimate for + * the memory consumption (in + * bytes) of this + * object. Since sometimes + * the size of objects can + * not be determined exactly + * (for example: what is the + * memory consumption of an + * STL std::map type with a + * certain number of + * elements?), this is only + * an estimate. however often + * quite close to the true + * value. + */ + std::size_t memory_consumption () const; + + protected: + /** + * This is the abstract function + * through which derived classes + * propagate preprocessed data in + * the form of Patch + * structures (declared in the + * base class DataOutBase) to + * the actual output + * function. You need to overload + * this function to allow the + * output functions to know what + * they shall print. + */ + virtual + const std::vector > & + get_patches () const = 0; + + /** + * Abstract virtual function + * through which the names of + * data sets are obtained by the + * output functions of the base + * class. + */ + virtual + std::vector + get_dataset_names () const = 0; + + /** + * This functions returns + * information about how the + * individual components of + * output files that consist of + * more than one data set are to + * be interpreted. + * + * It returns a list of index + * pairs and corresponding name + * indicating which components of + * the output are to be + * considered vector-valued + * rather than just a collection + * of scalar data. The index + * pairs are inclusive; for + * example, if we have a Stokes + * problem in 2d with components + * (u,v,p), then the + * corresponding vector data + * range should be (0,1), and the + * returned list would consist of + * only a single element with a + * tuple such as (0,1,"velocity"). + * + * Since some of the derived + * classes do not know about + * vector data, this function has + * a default implementation that + * simply returns an empty + * string, meaning that all data + * is to be considered a + * collection of scalar fields. + */ + virtual + std::vector > + get_vector_data_ranges () const; + + /** + * The default number of + * subdivisions for patches. This + * is filled by parse_parameters() + * and should be obeyed by + * build_patches() in derived + * classes. + */ + unsigned int default_subdivisions; - private: - /** - * Standard output format. Use - * this format, if output format - * default_format is - * requested. It can be changed - * by the set_format function - * or in a parameter file. - */ - OutputFormat default_fmt; - - /** - * Flags to be used upon output - * of OpenDX data. Can be changed by - * using the set_flags - * function. - */ - DXFlags dx_flags; - - /** - * Flags to be used upon output - * of UCD data. Can be changed by - * using the set_flags - * function. - */ - UcdFlags ucd_flags; - - /** - * Flags to be used upon output - * of GNUPLOT data. Can be - * changed by using the - * set_flags function. - */ - GnuplotFlags gnuplot_flags; - - /** - * Flags to be used upon output - * of POVRAY data. Can be changed - * by using the set_flags - * function. - */ - PovrayFlags povray_flags; - - /** - * Flags to be used upon output - * of EPS data in one space - * dimension. Can be changed by - * using the set_flags - * function. - */ - EpsFlags eps_flags; - - /** - * Flags to be used upon output - * of gmv data in one space - * dimension. Can be changed by - * using the set_flags - * function. - */ - GmvFlags gmv_flags; - - /** - * Flags to be used upon output - * of Tecplot data in one space - * dimension. Can be changed by - * using the set_flags - * function. - */ - TecplotFlags tecplot_flags; - - /** - * Flags to be used upon output - * of vtk data in one space - * dimension. Can be changed by - * using the set_flags - * function. - */ - VtkFlags vtk_flags; - - /** - * Flags to be used upon output of - * deal.II intermediate data in one space - * dimension. Can be changed by using the - * set_flags function. - */ - Deal_II_IntermediateFlags deal_II_intermediate_flags; + private: + /** + * Standard output format. Use + * this format, if output format + * default_format is + * requested. It can be changed + * by the set_format function + * or in a parameter file. + */ + OutputFormat default_fmt; + + /** + * Flags to be used upon output + * of OpenDX data. Can be changed by + * using the set_flags + * function. + */ + DXFlags dx_flags; + + /** + * Flags to be used upon output + * of UCD data. Can be changed by + * using the set_flags + * function. + */ + UcdFlags ucd_flags; + + /** + * Flags to be used upon output + * of GNUPLOT data. Can be + * changed by using the + * set_flags function. + */ + GnuplotFlags gnuplot_flags; + + /** + * Flags to be used upon output + * of POVRAY data. Can be changed + * by using the set_flags + * function. + */ + PovrayFlags povray_flags; + + /** + * Flags to be used upon output + * of EPS data in one space + * dimension. Can be changed by + * using the set_flags + * function. + */ + EpsFlags eps_flags; + + /** + * Flags to be used upon output + * of gmv data in one space + * dimension. Can be changed by + * using the set_flags + * function. + */ + GmvFlags gmv_flags; + + /** + * Flags to be used upon output + * of Tecplot data in one space + * dimension. Can be changed by + * using the set_flags + * function. + */ + TecplotFlags tecplot_flags; + + /** + * Flags to be used upon output + * of vtk data in one space + * dimension. Can be changed by + * using the set_flags + * function. + */ + VtkFlags vtk_flags; + + /** + * Flags to be used upon output of + * deal.II intermediate data in one space + * dimension. Can be changed by using the + * set_flags function. + */ + Deal_II_IntermediateFlags deal_II_intermediate_flags; }; diff --cc deal.II/include/deal.II/base/exceptions.h index 87c3c8fc09,002ddae58f..ed9b1615e1 --- a/deal.II/include/deal.II/base/exceptions.h +++ b/deal.II/include/deal.II/base/exceptions.h @@@ -47,131 -47,131 +47,131 @@@ DEAL_II_NAMESPACE_OPE */ class ExceptionBase : public std::exception { - public: - /** - * Default constructor. - */ - ExceptionBase (); - - /** - * The constructor takes the file in which the - * error happened, the line and the violated - * condition as well as the name of the - * exception class as a char* as arguments. - */ - ExceptionBase (const char* f, const int l, const char *func, - const char* c, const char *e); - - /** - * Copy constructor. - */ - ExceptionBase (const ExceptionBase &exc); - - /** - * Destructor. Empty, but needed - * for the sake of exception - * specification, since the base - * class has this exception - * specification and the - * automatically generated - * destructor would have a - * different one due to member - * objects. - */ - virtual ~ExceptionBase () throw(); - - /** - * Set the file name and line of where the - * exception appeared as well as the violated - * condition and the name of the exception as - * a char pointer. - */ - void set_fields (const char *f, - const int l, - const char *func, - const char *c, - const char *e); - - /** - * Print out the general part of the error - * information. - */ - void print_exc_data (std::ostream &out) const; - - /** - * Print more specific information about the - * exception which occured. Overload this - * function in your own exception classes. - */ - virtual void print_info (std::ostream &out) const; - - - /** - * Function derived from the base class - * which allows to pass information like - * the line and name of the file where the - * exception occurred as well as user - * information. - * - * This function is mainly used - * when using exceptions - * declared by the - * DeclException* - * macros with the - * throw mechanism or - * the AssertThrow - * macro. - */ - virtual const char * what () const throw (); - - /** - * Print a stacktrace, if one has - * been recorded previously, to - * the given stream. - */ - void print_stack_trace (std::ostream &out) const; - - protected: - /** - * Name of the file this exception happen in. - */ - const char *file; - - /** - * Line number in this file. - */ - unsigned int line; - - /** - * Name of the function, pretty printed. - */ - const char *function; - - /** - * The violated condition, as a string. - */ - const char *cond; - - /** - * Name of the exception and call sequence. - */ - const char *exc; - - /** - * A backtrace to the position - * where the problem happened, if - * the system supports this. - */ - char ** stacktrace; - - /** - * The number of stacktrace - * frames that are stored in the - * previous variable. Zero if the - * system does not support stack - * traces. - */ - int n_stacktrace_frames; + public: + /** + * Default constructor. + */ + ExceptionBase (); + + /** + * The constructor takes the file in which the + * error happened, the line and the violated + * condition as well as the name of the + * exception class as a char* as arguments. + */ + ExceptionBase (const char *f, const int l, const char *func, + const char *c, const char *e); + + /** + * Copy constructor. + */ + ExceptionBase (const ExceptionBase &exc); + + /** + * Destructor. Empty, but needed + * for the sake of exception + * specification, since the base + * class has this exception + * specification and the + * automatically generated + * destructor would have a + * different one due to member + * objects. + */ + virtual ~ExceptionBase () throw(); + + /** + * Set the file name and line of where the + * exception appeared as well as the violated + * condition and the name of the exception as + * a char pointer. + */ + void set_fields (const char *f, + const int l, + const char *func, + const char *c, + const char *e); + + /** + * Print out the general part of the error + * information. + */ + void print_exc_data (std::ostream &out) const; + + /** + * Print more specific information about the + * exception which occured. Overload this + * function in your own exception classes. + */ + virtual void print_info (std::ostream &out) const; + + + /** + * Function derived from the base class + * which allows to pass information like + * the line and name of the file where the + * exception occurred as well as user + * information. + * + * This function is mainly used + * when using exceptions + * declared by the + * DeclException* + * macros with the + * throw mechanism or + * the AssertThrow + * macro. + */ + virtual const char *what () const throw (); + + /** + * Print a stacktrace, if one has + * been recorded previously, to + * the given stream. + */ + void print_stack_trace (std::ostream &out) const; + + protected: + /** + * Name of the file this exception happen in. + */ - const char *file; ++ const char *file; + + /** + * Line number in this file. + */ + unsigned int line; + + /** + * Name of the function, pretty printed. + */ - const char *function; ++ const char *function; + + /** + * The violated condition, as a string. + */ - const char *cond; ++ const char *cond; + + /** + * Name of the exception and call sequence. + */ - const char *exc; ++ const char *exc; + + /** + * A backtrace to the position + * where the problem happened, if + * the system supports this. + */ + char **stacktrace; + + /** + * The number of stacktrace + * frames that are stored in the + * previous variable. Zero if the + * system does not support stack + * traces. + */ + int n_stacktrace_frames; }; diff --cc deal.II/include/deal.II/base/logstream.h index e8223227c5,d4d47ef84b..cb50979e38 --- a/deal.II/include/deal.II/base/logstream.h +++ b/deal.II/include/deal.II/base/logstream.h @@@ -85,530 -85,530 +85,530 @@@ DEAL_II_NAMESPACE_OPE */ class LogStream : public Subscriptor { + public: + /** + * A subclass allowing for the + * safe generation and removal of + * prefices. + * + * Somewhere at the beginning of + * a block, create one of these + * objects, and it will appear as + * a prefix in LogStream output + * like @p deallog. At the end of + * the block, the prefix will + * automatically be removed, when + * this object is destroyed. + */ + class Prefix + { public: - /** - * A subclass allowing for the - * safe generation and removal of - * prefices. - * - * Somewhere at the beginning of - * a block, create one of these - * objects, and it will appear as - * a prefix in LogStream output - * like @p deallog. At the end of - * the block, the prefix will - * automatically be removed, when - * this object is destroyed. - */ - class Prefix - { - public: - /** - * Set a new prefix for - * @p deallog, which will be - * removed when the variable - * is destroyed . - */ - Prefix(const std::string& text); - - /** - * Set a new prefix for the - * given stream, which will - * be removed when the - * variable is destroyed . - */ - Prefix(const std::string& text, LogStream& stream); - - /** - * Remove the prefix - * associated with this - * variable. - */ - ~Prefix (); - - private: - SmartPointer stream; - }; - - /** - * Standard constructor, since we - * intend to provide an object - * deallog in the library. Set the - * standard output stream to std::cerr. - */ - LogStream (); - - /** - * Destructor. - */ - ~LogStream(); - - /** - * Enable output to a second - * stream o. - */ - void attach (std::ostream& o); - - /** - * Disable output to the second - * stream. You may want to call - * close on the stream that was - * previously attached to this object. - */ - void detach (); - - /** - * Setup the logstream for - * regression test mode. - * - * This sets the parameters - * #double_threshold, - * #float_threshold, and #offset - * to nonzero values. The exact - * values being used have been - * determined experimentally and - * can be found in the source - * code. - * - * Called with an argument - * false, switches off - * test mode and sets all - * involved parameters to zero. - */ - void test_mode (bool on=true); - - /** - * Gives the default stream (std_out). - */ - std::ostream& get_console (); - - /** - * Gives the file stream. - */ - std::ostream& get_file_stream (); - - /** - * @return true, if file stream - * has already been attached. - */ - bool has_file () const; - - /** - * Reroutes cerr to LogStream. - * Works as a switch, turning - * logging of cerr on - * and off alternatingly with - * every call. - */ - void log_cerr (); - - /** - * Return the prefix string. - */ - const std::string& get_prefix () const; - - /** - * @deprecated Use Prefix instead - * - * Push another prefix on the - * stack. Prefixes are - * automatically separated by a - * colon and there is a double - * colon after the last prefix. - */ - void push (const std::string& text); - - /** - * @deprecated Use Prefix instead - * - * Remove the last prefix. - */ - void pop (); - - /** - * Maximum number of levels to be - * printed on the console. This - * function allows to restrict - * console output to the upmost - * levels of iterations. Only - * output with less than n - * prefixes is printed. By calling - * this function with n=0, no - * console output will be written. - * - * The previous value of this - * parameter is returned. - */ - unsigned int depth_console (const unsigned int n); - - /** - * Maximum number of levels to be - * written to the log file. The - * functionality is the same as - * depth_console, nevertheless, - * this function should be used - * with care, since it may spoile - * the value of a log file. - * - * The previous value of this - * parameter is returned. - */ - unsigned int depth_file (const unsigned int n); - - /** - * Set time printing flag. If this flag - * is true, each output line will - * be prepended by the user time used - * by the running program so far. - * - * The previous value of this - * parameter is returned. - */ - bool log_execution_time (const bool flag); - - /** - * Output time differences - * between consecutive logs. If - * this function is invoked with - * true, the time difference - * between the previous log line - * and the recent one is - * printed. If it is invoked with - * false, the accumulated - * time since start of the - * program is printed (default - * behavior). - * - * The measurement of times is - * not changed by this function, - * just the output. - * - * The previous value of this - * parameter is returned. - */ - bool log_time_differences (const bool flag); - - /** - * Write detailed timing - * information. - * - * - */ - void timestamp(); - - /** - * Log the thread id. - */ - bool log_thread_id (const bool flag); - - /** - * Set a threshold for the - * minimal absolute value of - * double values. All numbers - * with a smaller absolute value - * will be printed as zero. - * - * The default value for this - * threshold is zero, - * i.e. numbers are printed - * according to their real value. - * - * This feature is mostly useful - * for automated tests: there, - * one would like to reproduce - * the exact same solution in - * each run of a - * testsuite. However, subtle - * difference in processor, - * operating system, or compiler - * version can lead to - * differences in the last few - * digits of numbers, due to - * different rounding. While one - * can avoid trouble for most - * numbers when comparing with - * stored results by simply - * limiting the accuracy of - * output, this does not hold for - * numbers very close to zero, - * i.e. zero plus accumulated - * round-off. For these numbers, - * already the first digit is - * tainted by round-off. Using - * the present function, it is - * possible to eliminate this - * source of problems, by simply - * writing zero to the output in - * this case. - */ - void threshold_double(const double t); - /** - * The same as - * threshold_double(), but for - * float values. - */ - void threshold_float(const float t); - - /** - * Output a constant something - * through this stream. - */ - template - LogStream & operator << (const T &t); - - /** - * Output double precision - * numbers through this - * stream. - * - * If they are set, this function - * applies the methods for making - * floating point output - * reproducible as discussed in - * the introduction. - */ - LogStream & operator << (const double t); - - /** - * Output single precision - * numbers through this - * stream. - * - * If they are set, this function - * applies the methods for making - * floating point output - * reproducible as discussed in - * the introduction. - */ - LogStream & operator << (const float t); - - /** - * Treat ostream - * manipulators. This passes on - * the whole thing to the - * template function with the - * exception of the - * std::endl - * manipulator, for which special - * action is performed: write the - * temporary stream buffer - * including a header to the file - * and std::cout and - * empty the buffer. - * - * An overload of this function is needed - * anyway, since the compiler can't bind - * manipulators like @p std::endl - * directly to template arguments @p T - * like in the previous general - * template. This is due to the fact that - * @p std::endl is actually an overloaded - * set of functions for @p std::ostream, - * @p std::wostream, and potentially more - * of this kind. This function is - * therefore necessary to pick one - * element from this overload set. - */ - LogStream & operator<< (std::ostream& (*p) (std::ostream&)); - - /** - * Determine an estimate for - * the memory consumption (in - * bytes) of this - * object. Since sometimes - * the size of objects can - * not be determined exactly - * (for example: what is the - * memory consumption of an - * STL std::map type with a - * certain number of - * elements?), this is only - * an estimate. however often - * quite close to the true - * value. - */ - std::size_t memory_consumption () const; - - /** - * Exception. - */ - DeclException0(ExcNoFileStreamGiven); + /** + * Set a new prefix for + * @p deallog, which will be + * removed when the variable + * is destroyed . + */ + Prefix(const std::string &text); + + /** + * Set a new prefix for the + * given stream, which will + * be removed when the + * variable is destroyed . + */ + Prefix(const std::string &text, LogStream &stream); + + /** + * Remove the prefix + * associated with this + * variable. + */ + ~Prefix (); private: - - /** - * Stack of strings which are printed - * at the beginning of each line to - * allow identification where the - * output was generated. - */ - std::stack prefixes; - - /** - * Default stream, where the output - * is to go to. This stream defaults - * to std::cerr, but can be set to another - * stream through the constructor. - */ - std::ostream *std_out; - - /** - * Pointer to a stream, where a copy of - * the output is to go to. Usually, this - * will be a file stream. - * - * You can set and reset this stream - * by the attach function. - */ - std::ostream *file; - - /** - * Value denoting the number of - * prefixes to be printed to the - * standard output. If more than - * this number of prefixes is - * pushed to the stack, then no - * output will be generated until - * the number of prefixes shrinks - * back below this number. - */ - unsigned int std_depth; - - /** - * Same for the maximum depth of - * prefixes for output to a file. - */ - unsigned int file_depth; - - /** - * Flag for printing execution time. - */ - bool print_utime; - - /** - * Flag for printing time differences. - */ - bool diff_utime; - - /** - * Time of last output line. - */ - double last_time; - - /** - * Threshold for printing double - * values. Every number with - * absolute value less than this - * is printed as zero. - */ - double double_threshold; - - /** - * Threshold for printing float - * values. Every number with - * absolute value less than this - * is printed as zero. - */ - float float_threshold; - - /** - * An offset added to every float - * or double number upon - * output. This is done after the - * number is compared to - * #double_threshold or #float_threshold, - * but before rounding. - * - * This functionality was - * introduced to produce more - * reproducible floating point - * output for regression - * tests. The rationale is, that - * an exact output value is much - * more likely to be 1/8 than - * 0.124997. If we round to two - * digits though, 1/8 becomes - * unreliably either .12 or .13 - * due to machine accuracy. On - * the other hand, if we add a - * something above machine - * accuracy first, we will always - * get .13. - * - * It is safe to leave this - * value equal to zero. For - * regression tests, the function - * test_mode() sets it to a - * reasonable value. - * - * The offset is relative to the - * magnitude of the number. - */ - double offset; - - /** - * Flag for printing thread id. - */ - bool print_thread_id; - - /** - * The value times() returned - * on initialization. - */ - double reference_time_val; - - /** - * The tms structure times() - * filled on initialization. - */ - struct tms reference_tms; - - /** - * Original buffer of - * std::cerr. We store - * the address of that buffer - * when #log_cerr is called, and - * reset it to this value if - * #log_cerr is called a second - * time, or when the destructor - * of this class is run. - */ - std::streambuf *old_cerr; - - /** - * Print head of line. This prints - * optional time information and - * the contents of the prefix stack. - */ - void print_line_head (); - - /** - * Actually do the work of - * writing output. This function - * unifies the work that is - * common to the two - * operator<< functions. - */ - template - void print (const T &t); - /** - * Check if we are on a new line - * and print the header before - * the data. - */ - std::ostringstream& get_stream(); - - /** - * Type of the stream map - */ - typedef std::map > stream_map_type; - - /** - * We generate a stringstream for - * every process that sends log - * messages. - */ - stream_map_type outstreams; + SmartPointer stream; + }; + + /** + * Standard constructor, since we + * intend to provide an object + * deallog in the library. Set the + * standard output stream to std::cerr. + */ + LogStream (); + + /** + * Destructor. + */ + ~LogStream(); + + /** + * Enable output to a second + * stream o. + */ + void attach (std::ostream &o); + + /** + * Disable output to the second + * stream. You may want to call + * close on the stream that was + * previously attached to this object. + */ + void detach (); + + /** + * Setup the logstream for + * regression test mode. + * + * This sets the parameters + * #double_threshold, + * #float_threshold, and #offset + * to nonzero values. The exact + * values being used have been + * determined experimentally and + * can be found in the source + * code. + * + * Called with an argument + * false, switches off + * test mode and sets all + * involved parameters to zero. + */ + void test_mode (bool on=true); + + /** + * Gives the default stream (std_out). + */ + std::ostream &get_console (); + + /** + * Gives the file stream. + */ + std::ostream &get_file_stream (); + + /** + * @return true, if file stream + * has already been attached. + */ + bool has_file () const; + + /** + * Reroutes cerr to LogStream. + * Works as a switch, turning + * logging of cerr on + * and off alternatingly with + * every call. + */ + void log_cerr (); + + /** + * Return the prefix string. + */ + const std::string &get_prefix () const; + + /** + * @deprecated Use Prefix instead + * + * Push another prefix on the + * stack. Prefixes are + * automatically separated by a + * colon and there is a double + * colon after the last prefix. + */ + void push (const std::string &text); + + /** + * @deprecated Use Prefix instead + * + * Remove the last prefix. + */ + void pop (); + + /** + * Maximum number of levels to be + * printed on the console. This + * function allows to restrict + * console output to the upmost + * levels of iterations. Only + * output with less than n + * prefixes is printed. By calling + * this function with n=0, no + * console output will be written. + * + * The previous value of this + * parameter is returned. + */ + unsigned int depth_console (const unsigned int n); + + /** + * Maximum number of levels to be + * written to the log file. The + * functionality is the same as + * depth_console, nevertheless, + * this function should be used + * with care, since it may spoile + * the value of a log file. + * + * The previous value of this + * parameter is returned. + */ + unsigned int depth_file (const unsigned int n); + + /** + * Set time printing flag. If this flag + * is true, each output line will + * be prepended by the user time used + * by the running program so far. + * + * The previous value of this + * parameter is returned. + */ + bool log_execution_time (const bool flag); + + /** + * Output time differences + * between consecutive logs. If + * this function is invoked with + * true, the time difference + * between the previous log line + * and the recent one is + * printed. If it is invoked with + * false, the accumulated + * time since start of the + * program is printed (default + * behavior). + * + * The measurement of times is + * not changed by this function, + * just the output. + * + * The previous value of this + * parameter is returned. + */ + bool log_time_differences (const bool flag); + + /** + * Write detailed timing + * information. + * + * + */ + void timestamp(); + + /** + * Log the thread id. + */ + bool log_thread_id (const bool flag); + + /** + * Set a threshold for the + * minimal absolute value of + * double values. All numbers + * with a smaller absolute value + * will be printed as zero. + * + * The default value for this + * threshold is zero, + * i.e. numbers are printed + * according to their real value. + * + * This feature is mostly useful + * for automated tests: there, + * one would like to reproduce + * the exact same solution in + * each run of a + * testsuite. However, subtle + * difference in processor, + * operating system, or compiler + * version can lead to + * differences in the last few + * digits of numbers, due to + * different rounding. While one + * can avoid trouble for most + * numbers when comparing with + * stored results by simply + * limiting the accuracy of + * output, this does not hold for + * numbers very close to zero, + * i.e. zero plus accumulated + * round-off. For these numbers, + * already the first digit is + * tainted by round-off. Using + * the present function, it is + * possible to eliminate this + * source of problems, by simply + * writing zero to the output in + * this case. + */ + void threshold_double(const double t); + /** + * The same as + * threshold_double(), but for + * float values. + */ + void threshold_float(const float t); + + /** + * Output a constant something + * through this stream. + */ + template + LogStream &operator << (const T &t); + + /** + * Output double precision + * numbers through this + * stream. + * + * If they are set, this function + * applies the methods for making + * floating point output + * reproducible as discussed in + * the introduction. + */ + LogStream &operator << (const double t); + + /** + * Output single precision + * numbers through this + * stream. + * + * If they are set, this function + * applies the methods for making + * floating point output + * reproducible as discussed in + * the introduction. + */ + LogStream &operator << (const float t); + + /** + * Treat ostream + * manipulators. This passes on + * the whole thing to the + * template function with the + * exception of the + * std::endl + * manipulator, for which special + * action is performed: write the + * temporary stream buffer + * including a header to the file + * and std::cout and + * empty the buffer. + * + * An overload of this function is needed + * anyway, since the compiler can't bind + * manipulators like @p std::endl + * directly to template arguments @p T + * like in the previous general + * template. This is due to the fact that + * @p std::endl is actually an overloaded + * set of functions for @p std::ostream, + * @p std::wostream, and potentially more + * of this kind. This function is + * therefore necessary to pick one + * element from this overload set. + */ + LogStream &operator<< (std::ostream& (*p) (std::ostream &)); + + /** + * Determine an estimate for + * the memory consumption (in + * bytes) of this + * object. Since sometimes + * the size of objects can + * not be determined exactly + * (for example: what is the + * memory consumption of an + * STL std::map type with a + * certain number of + * elements?), this is only + * an estimate. however often + * quite close to the true + * value. + */ + std::size_t memory_consumption () const; + + /** + * Exception. + */ + DeclException0(ExcNoFileStreamGiven); + + private: + + /** + * Stack of strings which are printed + * at the beginning of each line to + * allow identification where the + * output was generated. + */ + std::stack prefixes; + + /** + * Default stream, where the output + * is to go to. This stream defaults + * to std::cerr, but can be set to another + * stream through the constructor. + */ - std::ostream *std_out; ++ std::ostream *std_out; + + /** + * Pointer to a stream, where a copy of + * the output is to go to. Usually, this + * will be a file stream. + * + * You can set and reset this stream + * by the attach function. + */ - std::ostream *file; ++ std::ostream *file; + + /** + * Value denoting the number of + * prefixes to be printed to the + * standard output. If more than + * this number of prefixes is + * pushed to the stack, then no + * output will be generated until + * the number of prefixes shrinks + * back below this number. + */ + unsigned int std_depth; + + /** + * Same for the maximum depth of + * prefixes for output to a file. + */ + unsigned int file_depth; + + /** + * Flag for printing execution time. + */ + bool print_utime; + + /** + * Flag for printing time differences. + */ + bool diff_utime; + + /** + * Time of last output line. + */ + double last_time; + + /** + * Threshold for printing double + * values. Every number with + * absolute value less than this + * is printed as zero. + */ + double double_threshold; + + /** + * Threshold for printing float + * values. Every number with + * absolute value less than this + * is printed as zero. + */ + float float_threshold; + + /** + * An offset added to every float + * or double number upon + * output. This is done after the + * number is compared to + * #double_threshold or #float_threshold, + * but before rounding. + * + * This functionality was + * introduced to produce more + * reproducible floating point + * output for regression + * tests. The rationale is, that + * an exact output value is much + * more likely to be 1/8 than + * 0.124997. If we round to two + * digits though, 1/8 becomes + * unreliably either .12 or .13 + * due to machine accuracy. On + * the other hand, if we add a + * something above machine + * accuracy first, we will always + * get .13. + * + * It is safe to leave this + * value equal to zero. For + * regression tests, the function + * test_mode() sets it to a + * reasonable value. + * + * The offset is relative to the + * magnitude of the number. + */ + double offset; + + /** + * Flag for printing thread id. + */ + bool print_thread_id; + + /** + * The value times() returned + * on initialization. + */ + double reference_time_val; + + /** + * The tms structure times() + * filled on initialization. + */ + struct tms reference_tms; + + /** + * Original buffer of + * std::cerr. We store + * the address of that buffer + * when #log_cerr is called, and + * reset it to this value if + * #log_cerr is called a second + * time, or when the destructor + * of this class is run. + */ + std::streambuf *old_cerr; + + /** + * Print head of line. This prints + * optional time information and + * the contents of the prefix stack. + */ + void print_line_head (); + + /** + * Actually do the work of + * writing output. This function + * unifies the work that is + * common to the two + * operator<< functions. + */ + template + void print (const T &t); + /** + * Check if we are on a new line + * and print the header before + * the data. + */ + std::ostringstream &get_stream(); + + /** + * Type of the stream map + */ + typedef std::map > stream_map_type; + + /** + * We generate a stringstream for + * every process that sends log + * messages. + */ + stream_map_type outstreams; }; diff --cc deal.II/include/deal.II/base/parallel.h index 285322472f,2036053859..0afd44ae30 --- a/deal.II/include/deal.II/base/parallel.h +++ b/deal.II/include/deal.II/base/parallel.h @@@ -341,13 -341,13 +341,13 @@@ namespace paralle namespace internal { #if DEAL_II_USE_MT == 1 - /** - * Take a range argument and call the - * given function with its begin and end. - */ + /** + * Take a range argument and call the + * given function with its begin and end. + */ template void apply_to_subranges (const tbb::blocked_range &range, - const Function &f) + const Function &f) { f (range.begin(), range.end()); } diff --cc deal.II/include/deal.II/base/parameter_handler.h index 9a81d2a33a,8d706cba21..f19849ef96 --- a/deal.II/include/deal.II/base/parameter_handler.h +++ b/deal.II/include/deal.II/base/parameter_handler.h @@@ -54,611 -54,611 +54,611 @@@ class LogStream namespace Patterns { - /** - * Base class to declare common - * interface. The purpose of this - * class is mostly to define the - * interface of patterns, and to - * force derived classes to have a - * clone function. It is thus, - * in the languages of the "Design - * Patterns" book (Gamma et al.), a - * "prototype". - */ + /** + * Base class to declare common + * interface. The purpose of this + * class is mostly to define the + * interface of patterns, and to + * force derived classes to have a + * clone function. It is thus, + * in the languages of the "Design + * Patterns" book (Gamma et al.), a + * "prototype". + */ class PatternBase { - public: - /** - * Make destructor of this and all - * derived classes virtual. - */ - virtual ~PatternBase (); - - /** - * Return true if the given string - * matches the pattern. - */ - virtual bool match (const std::string &test_string) const = 0; - - /** - * Return a string describing the - * pattern. - */ - virtual std::string description () const = 0; - - /** - * Return a pointer to an - * exact copy of the - * object. This is necessary - * since we want to store - * objects of this type in - * containers, were we need - * to copy objects without - * knowledge of their actual - * data type (we only have - * pointers to the base - * class). - * - * Ownership of the objects - * returned by this function - * is passed to the caller of - * this function. - */ - virtual PatternBase * clone () const = 0; - - /** - * Determine an estimate for - * the memory consumption (in - * bytes) of this object. To - * avoid unnecessary - * overhead, we do not force - * derived classes to provide - * this function as a virtual - * overloaded one, but rather - * try to cast the present - * object to one of the known - * derived classes and if - * that fails then take the - * size of this base class - * instead and add 32 byte - * (this value is arbitrary, - * it should account for - * virtual function tables, - * and some possible data - * elements). Since there are - * usually not many thousands - * of objects of this type - * around, and since the - * memory_consumption - * mechanism is used to find - * out where memory in the - * range of many megabytes - * is, this seems like a - * reasonable approximation. - * - * On the other hand, if you - * know that your class - * deviates from this - * assumption significantly, - * you can still overload - * this function. - */ - virtual std::size_t memory_consumption () const; + public: + /** + * Make destructor of this and all + * derived classes virtual. + */ + virtual ~PatternBase (); + + /** + * Return true if the given string + * matches the pattern. + */ + virtual bool match (const std::string &test_string) const = 0; + + /** + * Return a string describing the + * pattern. + */ + virtual std::string description () const = 0; + + /** + * Return a pointer to an + * exact copy of the + * object. This is necessary + * since we want to store + * objects of this type in + * containers, were we need + * to copy objects without + * knowledge of their actual + * data type (we only have + * pointers to the base + * class). + * + * Ownership of the objects + * returned by this function + * is passed to the caller of + * this function. + */ + virtual PatternBase *clone () const = 0; + + /** + * Determine an estimate for + * the memory consumption (in + * bytes) of this object. To + * avoid unnecessary + * overhead, we do not force + * derived classes to provide + * this function as a virtual + * overloaded one, but rather + * try to cast the present + * object to one of the known + * derived classes and if + * that fails then take the + * size of this base class + * instead and add 32 byte + * (this value is arbitrary, + * it should account for + * virtual function tables, + * and some possible data + * elements). Since there are + * usually not many thousands + * of objects of this type + * around, and since the + * memory_consumption + * mechanism is used to find + * out where memory in the + * range of many megabytes + * is, this seems like a + * reasonable approximation. + * + * On the other hand, if you + * know that your class + * deviates from this + * assumption significantly, + * you can still overload + * this function. + */ + virtual std::size_t memory_consumption () const; }; - /** - * Returns pointer to the correct - * derived class based on description. - */ - PatternBase * pattern_factory (const std::string& description); - - /** - * Test for the string being an - * integer. If bounds are given - * to the constructor, then the - * integer given also needs to be - * within the interval specified - * by these bounds. Note that - * unlike common convention in - * the C++ standard library, both - * bounds of this interval are - * inclusive; the reason is that - * in practice in most cases, one - * needs closed intervals, but - * these can only be realized - * with inclusive bounds for - * non-integer values. We thus - * stay consistent by always - * using closed intervals. - * - * If the upper bound given to - * the constructor is smaller - * than the lower bound, then the - * infinite interval is implied, - * i.e. every integer is allowed. - * - * Giving bounds may be useful if - * for example a value can only - * be positive and less than a - * reasonable upper bound (for - * example the number of - * refinement steps to be - * performed), or in many other - * cases. - */ + /** + * Returns pointer to the correct + * derived class based on description. + */ + PatternBase *pattern_factory (const std::string &description); + + /** + * Test for the string being an + * integer. If bounds are given + * to the constructor, then the + * integer given also needs to be + * within the interval specified + * by these bounds. Note that + * unlike common convention in + * the C++ standard library, both + * bounds of this interval are + * inclusive; the reason is that + * in practice in most cases, one + * needs closed intervals, but + * these can only be realized + * with inclusive bounds for + * non-integer values. We thus + * stay consistent by always + * using closed intervals. + * + * If the upper bound given to + * the constructor is smaller + * than the lower bound, then the + * infinite interval is implied, + * i.e. every integer is allowed. + * + * Giving bounds may be useful if + * for example a value can only + * be positive and less than a + * reasonable upper bound (for + * example the number of + * refinement steps to be + * performed), or in many other + * cases. + */ class Integer : public PatternBase { - public: - /** - * Minimal integer value. If - * the numeric_limits class - * is available use this - * information to obtain the - * extremal values, otherwise - * set it so that this class - * understands that all values - * are allowed. - */ - static const int min_int_value; - - /** - * Maximal integer value. If - * the numeric_limits class - * is available use this - * information to obtain the - * extremal values, otherwise - * set it so that this class - * understands that all values - * are allowed. - */ - static const int max_int_value; - - /** - * Constructor. Bounds can be - * specified within which a - * valid parameter has to - * be. If the upper bound is - * smaller than the lower - * bound, then the infinite - * interval is meant. The - * default values are chosen - * such that no bounds are - * enforced on parameters. - */ - Integer (const int lower_bound = min_int_value, - const int upper_bound = max_int_value); - - /** - * Return true if the - * string is an integer and - * its value is within the - * specified range. - */ - virtual bool match (const std::string &test_string) const; - - /** - * Return a description of - * the pattern that valid - * strings are expected to - * match. If bounds were - * specified to the - * constructor, then include - * them into this - * description. - */ - virtual std::string description () const; - - /** - * Return a copy of the - * present object, which is - * newly allocated on the - * heap. Ownership of that - * object is transferred to - * the caller of this - * function. - */ - virtual PatternBase * clone () const; - - /** - * Creates new object if the start of - * description matches - * description_init. Ownership of that - * object is transferred to the caller - * of this function. - */ - static Integer* create (const std::string& description); - - private: - /** - * Value of the lower - * bound. A number that - * satisfies the @ref match - * operation of this class - * must be equal to this - * value or larger, if the - * bounds of the interval for - * a valid range. - */ - const int lower_bound; - - /** - * Value of the upper - * bound. A number that - * satisfies the @ref match - * operation of this class - * must be equal to this - * value or less, if the - * bounds of the interval for - * a valid range. - */ - const int upper_bound; - - /** - * Initial part of description - */ - static const char* description_init; + public: + /** + * Minimal integer value. If + * the numeric_limits class + * is available use this + * information to obtain the + * extremal values, otherwise + * set it so that this class + * understands that all values + * are allowed. + */ + static const int min_int_value; + + /** + * Maximal integer value. If + * the numeric_limits class + * is available use this + * information to obtain the + * extremal values, otherwise + * set it so that this class + * understands that all values + * are allowed. + */ + static const int max_int_value; + + /** + * Constructor. Bounds can be + * specified within which a + * valid parameter has to + * be. If the upper bound is + * smaller than the lower + * bound, then the infinite + * interval is meant. The + * default values are chosen + * such that no bounds are + * enforced on parameters. + */ + Integer (const int lower_bound = min_int_value, + const int upper_bound = max_int_value); + + /** + * Return true if the + * string is an integer and + * its value is within the + * specified range. + */ + virtual bool match (const std::string &test_string) const; + + /** + * Return a description of + * the pattern that valid + * strings are expected to + * match. If bounds were + * specified to the + * constructor, then include + * them into this + * description. + */ + virtual std::string description () const; + + /** + * Return a copy of the + * present object, which is + * newly allocated on the + * heap. Ownership of that + * object is transferred to + * the caller of this + * function. + */ + virtual PatternBase *clone () const; + + /** + * Creates new object if the start of + * description matches + * description_init. Ownership of that + * object is transferred to the caller + * of this function. + */ + static Integer *create (const std::string &description); + + private: + /** + * Value of the lower + * bound. A number that + * satisfies the @ref match + * operation of this class + * must be equal to this + * value or larger, if the + * bounds of the interval for + * a valid range. + */ + const int lower_bound; + + /** + * Value of the upper + * bound. A number that + * satisfies the @ref match + * operation of this class + * must be equal to this + * value or less, if the + * bounds of the interval for + * a valid range. + */ + const int upper_bound; + + /** + * Initial part of description + */ + static const char *description_init; }; - /** - * Test for the string being a - * double. If bounds are - * given to the constructor, then - * the integer given also needs - * to be within the interval - * specified by these - * bounds. Note that unlike - * common convention in the C++ - * standard library, both bounds - * of this interval are - * inclusive; the reason is that - * in practice in most cases, one - * needs closed intervals, but - * these can only be realized - * with inclusive bounds for - * non-integer values. We thus - * stay consistent by always - * using closed intervals. - * - * If the upper bound given to - * the constructor is smaller - * than the lower bound, then the - * infinite interval is implied, - * i.e. every integer is allowed. - * - * Giving bounds may be useful if - * for example a value can only - * be positive and less than a - * reasonable upper bound (for - * example damping parameters are - * frequently only reasonable if - * between zero and one), or in - * many other cases. - */ + /** + * Test for the string being a + * double. If bounds are + * given to the constructor, then + * the integer given also needs + * to be within the interval + * specified by these + * bounds. Note that unlike + * common convention in the C++ + * standard library, both bounds + * of this interval are + * inclusive; the reason is that + * in practice in most cases, one + * needs closed intervals, but + * these can only be realized + * with inclusive bounds for + * non-integer values. We thus + * stay consistent by always + * using closed intervals. + * + * If the upper bound given to + * the constructor is smaller + * than the lower bound, then the + * infinite interval is implied, + * i.e. every integer is allowed. + * + * Giving bounds may be useful if + * for example a value can only + * be positive and less than a + * reasonable upper bound (for + * example damping parameters are + * frequently only reasonable if + * between zero and one), or in + * many other cases. + */ class Double : public PatternBase { - public: - /** - * Minimal double value. If the - * std::numeric_limits - * class is available use this - * information to obtain the - * extremal values, otherwise - * set it so that this class - * understands that all values - * are allowed. - */ - static const double min_double_value; - - /** - * Maximal double value. If the - * numeric_limits class is - * available use this - * information to obtain the - * extremal values, otherwise - * set it so that this class - * understands that all values - * are allowed. - */ - static const double max_double_value; - - /** - * Constructor. Bounds can be - * specified within which a - * valid parameter has to - * be. If the upper bound is - * smaller than the lower - * bound, then the infinite - * interval is meant. The - * default values are chosen - * such that no bounds are - * enforced on parameters. - */ - Double (const double lower_bound = min_double_value, - const double upper_bound = max_double_value); - - /** - * Return true if the - * string is a number and its - * value is within the - * specified range. - */ - virtual bool match (const std::string &test_string) const; - - /** - * Return a description of - * the pattern that valid - * strings are expected to - * match. If bounds were - * specified to the - * constructor, then include - * them into this - * description. - */ - virtual std::string description () const; - - /** - * Return a copy of the - * present object, which is - * newly allocated on the - * heap. Ownership of that - * object is transferred to - * the caller of this - * function. - */ - virtual PatternBase * clone () const; - - /** - * Creates new object if the start of - * description matches - * description_init. Ownership of that - * object is transferred to the caller - * of this function. - */ - static Double* create (const std::string& description); - - private: - /** - * Value of the lower - * bound. A number that - * satisfies the @ref match - * operation of this class - * must be equal to this - * value or larger, if the - * bounds of the interval for - * a valid range. - */ - const double lower_bound; - - /** - * Value of the upper - * bound. A number that - * satisfies the @ref match - * operation of this class - * must be equal to this - * value or less, if the - * bounds of the interval for - * a valid range. - */ - const double upper_bound; - - /** - * Initial part of description - */ - static const char* description_init; + public: + /** + * Minimal double value. If the + * std::numeric_limits + * class is available use this + * information to obtain the + * extremal values, otherwise + * set it so that this class + * understands that all values + * are allowed. + */ + static const double min_double_value; + + /** + * Maximal double value. If the + * numeric_limits class is + * available use this + * information to obtain the + * extremal values, otherwise + * set it so that this class + * understands that all values + * are allowed. + */ + static const double max_double_value; + + /** + * Constructor. Bounds can be + * specified within which a + * valid parameter has to + * be. If the upper bound is + * smaller than the lower + * bound, then the infinite + * interval is meant. The + * default values are chosen + * such that no bounds are + * enforced on parameters. + */ + Double (const double lower_bound = min_double_value, + const double upper_bound = max_double_value); + + /** + * Return true if the + * string is a number and its + * value is within the + * specified range. + */ + virtual bool match (const std::string &test_string) const; + + /** + * Return a description of + * the pattern that valid + * strings are expected to + * match. If bounds were + * specified to the + * constructor, then include + * them into this + * description. + */ + virtual std::string description () const; + + /** + * Return a copy of the + * present object, which is + * newly allocated on the + * heap. Ownership of that + * object is transferred to + * the caller of this + * function. + */ + virtual PatternBase *clone () const; + + /** + * Creates new object if the start of + * description matches + * description_init. Ownership of that + * object is transferred to the caller + * of this function. + */ + static Double *create (const std::string &description); + + private: + /** + * Value of the lower + * bound. A number that + * satisfies the @ref match + * operation of this class + * must be equal to this + * value or larger, if the + * bounds of the interval for + * a valid range. + */ + const double lower_bound; + + /** + * Value of the upper + * bound. A number that + * satisfies the @ref match + * operation of this class + * must be equal to this + * value or less, if the + * bounds of the interval for + * a valid range. + */ + const double upper_bound; + + /** + * Initial part of description + */ + static const char *description_init; + }; + + /** + * Test for the string being one + * of a sequence of values given + * like a regular expression. For + * example, if the string given + * to the constructor is + * "red|blue|black", then the + * @ref match function returns + * true exactly if the string + * is either "red" or "blue" or + * "black". Spaces around the + * pipe signs do not matter and + * are eliminated. + */ + class Selection : public PatternBase + { + public: + /** + * Constructor. Take the + * given parameter as the + * specification of valid + * strings. + */ + Selection (const std::string &seq); + + /** + * Return true if the + * string is an element of + * the description list + * passed to the constructor. + */ + virtual bool match (const std::string &test_string) const; + + /** + * Return a description of + * the pattern that valid + * strings are expected to + * match. Here, this is the + * list of valid strings + * passed to the constructor. + */ + virtual std::string description () const; + + /** + * Return a copy of the + * present object, which is + * newly allocated on the + * heap. Ownership of that + * object is transferred to + * the caller of this + * function. + */ + virtual PatternBase *clone () const; + + /** + * Determine an estimate for + * the memory consumption (in + * bytes) of this object. + */ + std::size_t memory_consumption () const; + + /** + * Creates new object if the start of + * description matches + * description_init. Ownership of that + * object is transferred to the caller + * of this function. + */ + static Selection *create (const std::string &description); + + private: + /** + * List of valid strings as + * passed to the + * constructor. We don't make + * this string constant, as + * we process it somewhat in + * the constructor. + */ + std::string sequence; + + /** + * Initial part of description + */ + static const char *description_init; }; - /** - * Test for the string being one - * of a sequence of values given - * like a regular expression. For - * example, if the string given - * to the constructor is - * "red|blue|black", then the - * @ref match function returns - * true exactly if the string - * is either "red" or "blue" or - * "black". Spaces around the - * pipe signs do not matter and - * are eliminated. - */ - class Selection : public PatternBase - { - public: - /** - * Constructor. Take the - * given parameter as the - * specification of valid - * strings. - */ - Selection (const std::string &seq); - - /** - * Return true if the - * string is an element of - * the description list - * passed to the constructor. - */ - virtual bool match (const std::string &test_string) const; - - /** - * Return a description of - * the pattern that valid - * strings are expected to - * match. Here, this is the - * list of valid strings - * passed to the constructor. - */ - virtual std::string description () const; - - /** - * Return a copy of the - * present object, which is - * newly allocated on the - * heap. Ownership of that - * object is transferred to - * the caller of this - * function. - */ - virtual PatternBase * clone () const; - - /** - * Determine an estimate for - * the memory consumption (in - * bytes) of this object. - */ - std::size_t memory_consumption () const; - - /** - * Creates new object if the start of - * description matches - * description_init. Ownership of that - * object is transferred to the caller - * of this function. - */ - static Selection* create (const std::string& description); - - private: - /** - * List of valid strings as - * passed to the - * constructor. We don't make - * this string constant, as - * we process it somewhat in - * the constructor. - */ - std::string sequence; - - /** - * Initial part of description - */ - static const char* description_init; - }; + /** + * This pattern matches a list of + * comma-separated values each of which + * have to match a pattern given to the + * constructor. With two additional + * parameters, the number of elements this + * list has to have can be specified. If + * none is specified, the list may have + * zero or more entries. + */ + class List : public PatternBase + { + public: + /** + * Maximal integer value. If + * the numeric_limits class + * is available use this + * information to obtain the + * extremal values, otherwise + * set it so that this class + * understands that all values + * are allowed. + */ + static const unsigned int max_int_value; + + /** + * Constructor. Take the + * given parameter as the + * specification of valid + * elements of the list. + * + * The two other arguments can + * be used to denote minimal + * and maximal allowable + * lengths of the list. + */ - List (const PatternBase &base_pattern, ++ List (const PatternBase &base_pattern, + const unsigned int min_elements = 0, + const unsigned int max_elements = max_int_value); + + /** + * Destructor. + */ + virtual ~List (); + + /** + * Return true if the + * string is a comma-separated + * list of strings each of + * which match the pattern + * given to the constructor. + */ + virtual bool match (const std::string &test_string) const; + + /** + * Return a description of + * the pattern that valid + * strings are expected to + * match. + */ + virtual std::string description () const; + + /** + * Return a copy of the + * present object, which is + * newly allocated on the + * heap. Ownership of that + * object is transferred to + * the caller of this + * function. + */ + virtual PatternBase *clone () const; + + /** + * Creates new object if the start of + * description matches + * description_init. Ownership of that + * object is transferred to the caller + * of this function. + */ + static List *create (const std::string &description); + + /** + * Determine an estimate for + * the memory consumption (in + * bytes) of this object. + */ + std::size_t memory_consumption () const; + + /** @addtogroup Exceptions + * @{ */ + + /** + * Exception. + */ + DeclException2 (ExcInvalidRange, + int, int, + << "The values " << arg1 << " and " << arg2 + << " do not form a valid range."); + //@} + private: + /** + * Copy of the pattern that + * each element of the list has + * to satisfy. + */ + PatternBase *pattern; + + /** + * Minimum number of elements + * the list must have. + */ + const unsigned int min_elements; + + /** + * Maximum number of elements + * the list must have. + */ + const unsigned int max_elements; - /** - * This pattern matches a list of - * comma-separated values each of which - * have to match a pattern given to the - * constructor. With two additional - * parameters, the number of elements this - * list has to have can be specified. If - * none is specified, the list may have - * zero or more entries. - */ - class List : public PatternBase - { - public: - /** - * Maximal integer value. If - * the numeric_limits class - * is available use this - * information to obtain the - * extremal values, otherwise - * set it so that this class - * understands that all values - * are allowed. - */ - static const unsigned int max_int_value; - - /** - * Constructor. Take the - * given parameter as the - * specification of valid - * elements of the list. - * - * The two other arguments can - * be used to denote minimal - * and maximal allowable - * lengths of the list. - */ - List (const PatternBase &base_pattern, - const unsigned int min_elements = 0, - const unsigned int max_elements = max_int_value); - - /** - * Destructor. - */ - virtual ~List (); - - /** - * Return true if the - * string is a comma-separated - * list of strings each of - * which match the pattern - * given to the constructor. - */ - virtual bool match (const std::string &test_string) const; - - /** - * Return a description of - * the pattern that valid - * strings are expected to - * match. - */ - virtual std::string description () const; - - /** - * Return a copy of the - * present object, which is - * newly allocated on the - * heap. Ownership of that - * object is transferred to - * the caller of this - * function. - */ - virtual PatternBase * clone () const; - - /** - * Creates new object if the start of - * description matches - * description_init. Ownership of that - * object is transferred to the caller - * of this function. - */ - static List* create (const std::string& description); - - /** - * Determine an estimate for - * the memory consumption (in - * bytes) of this object. - */ - std::size_t memory_consumption () const; - - /** @addtogroup Exceptions - * @{ */ - - /** - * Exception. - */ - DeclException2 (ExcInvalidRange, - int, int, - << "The values " << arg1 << " and " << arg2 - << " do not form a valid range."); - //@} - private: - /** - * Copy of the pattern that - * each element of the list has - * to satisfy. - */ - PatternBase *pattern; - - /** - * Minimum number of elements - * the list must have. - */ - const unsigned int min_elements; - - /** - * Maximum number of elements - * the list must have. - */ - const unsigned int max_elements; - - /** - * Initial part of description - */ - static const char* description_init; + /** + * Initial part of description + */ + static const char *description_init; }; diff --cc deal.II/include/deal.II/base/partitioner.h index 101c00544c,9203409bf6..fff826924b --- a/deal.II/include/deal.II/base/partitioner.h +++ b/deal.II/include/deal.II/base/partitioner.h @@@ -470,7 -470,7 +470,7 @@@ namespace Utilitie inline - const IndexSet& Partitioner::ghost_indices() const - const IndexSet &Partitioner::ghost_indices() const ++ const IndexSet &Partitioner::ghost_indices() const { return ghost_indices_data; } diff --cc deal.II/include/deal.II/base/qprojector.h index 7ffa03220a,e63769f99b..c89c41ffe7 --- a/deal.II/include/deal.II/base/qprojector.h +++ b/deal.II/include/deal.II/base/qprojector.h @@@ -72,380 -72,380 +72,380 @@@ DEAL_II_NAMESPACE_OPE template class QProjector { + public: + /** + * Define a typedef for a + * quadrature that acts on an + * object of one dimension + * less. For cells, this would + * then be a face quadrature. + */ + typedef Quadrature SubQuadrature; + + /** + * Compute the quadrature points + * on the cell if the given + * quadrature formula is used on + * face face_no. For further + * details, see the general doc + * for this class. + */ + static void project_to_face (const SubQuadrature &quadrature, + const unsigned int face_no, + std::vector > &q_points); + + /** + * Compute the cell quadrature + * formula corresponding to using + * quadrature on face + * face_no. For further + * details, see the general doc + * for this class. + */ + static Quadrature + project_to_face (const SubQuadrature &quadrature, + const unsigned int face_no); + + /** + * Compute the quadrature points on the + * cell if the given quadrature formula is + * used on face face_no, subface + * number subface_no corresponding + * to RefineCase::Type + * ref_case. The last argument is + * only used in 3D. + * + * @note Only the points are + * transformed. The quadrature + * weights are the same as those + * of the original rule. + */ + static void project_to_subface (const SubQuadrature &quadrature, + const unsigned int face_no, + const unsigned int subface_no, - std::vector > &q_points, ++ std::vector > &q_points, + const RefinementCase &ref_case=RefinementCase::isotropic_refinement); + + /** + * Compute the cell quadrature formula + * corresponding to using + * quadrature on subface + * subface_no of face + * face_no with + * RefinementCase + * ref_case. The last argument is + * only used in 3D. + * + * @note Only the points are + * transformed. The quadrature + * weights are the same as those + * of the original rule. + */ + static Quadrature + project_to_subface (const SubQuadrature &quadrature, + const unsigned int face_no, + const unsigned int subface_no, + const RefinementCase &ref_case=RefinementCase::isotropic_refinement); + + /** + * Take a face quadrature formula + * and generate a cell quadrature + * formula from it where the + * quadrature points of the given + * argument are projected on all + * faces. + * + * The weights of the new rule + * are replications of the + * original weights. Thus, the + * sum of the weights is not one, + * but the number of faces, which + * is the surface of the + * reference cell. + * + * This in particular allows us + * to extract a subset of points + * corresponding to a single face + * and use it as a quadrature on + * this face, as is done in + * FEFaceValues. + * + * @note In 3D, this function + * produces eight sets of + * quadrature points for each + * face, in order to cope + * possibly different + * orientations of the mesh. + */ + static Quadrature + project_to_all_faces (const SubQuadrature &quadrature); + + /** + * Take a face quadrature formula + * and generate a cell quadrature + * formula from it where the + * quadrature points of the given + * argument are projected on all + * subfaces. + * + * Like in project_to_all_faces(), + * the weights of the new rule + * sum up to the number of faces + * (not subfaces), which + * is the surface of the + * reference cell. + * + * This in particular allows us + * to extract a subset of points + * corresponding to a single subface + * and use it as a quadrature on + * this face, as is done in + * FESubfaceValues. + */ + static Quadrature + project_to_all_subfaces (const SubQuadrature &quadrature); + + /** + * Project a given quadrature + * formula to a child of a + * cell. You may want to use this + * function in case you want to + * extend an integral only over + * the area which a potential + * child would occupy. The child + * numbering is the same as the + * children would be numbered + * upon refinement of the cell. + * + * As integration using this + * quadrature formula now only + * extends over a fraction of the + * cell, the weights of the + * resulting object are divided by + * GeometryInfo::children_per_cell. + */ + static + Quadrature - project_to_child (const Quadrature &quadrature, ++ project_to_child (const Quadrature &quadrature, + const unsigned int child_no); + + /** + * Project a quadrature rule to + * all children of a + * cell. Similarly to + * project_to_all_subfaces(), + * this function replicates the + * formula generated by + * project_to_child() for all + * children, such that the + * weights sum up to one, the + * volume of the total cell + * again. + * + * The child + * numbering is the same as the + * children would be numbered + * upon refinement of the cell. + */ + static + Quadrature - project_to_all_children (const Quadrature &quadrature); ++ project_to_all_children (const Quadrature &quadrature); + + /** + * Project the onedimensional + * rule quadrature to + * the straight line connecting + * the points p1 and + * p2. + */ + static + Quadrature + project_to_line(const Quadrature<1> &quadrature, + const Point &p1, + const Point &p2); + + /** + * Since the + * project_to_all_faces() and + * project_to_all_subfaces() + * functions chain together the + * quadrature points and weights + * of all projections of a face + * quadrature formula to the + * faces or subfaces of a cell, + * we need a way to identify + * where the starting index of + * the points and weights for a + * particular face or subface + * is. This class provides this: + * there are static member + * functions that generate + * objects of this type, given + * face or subface indices, and + * you can then use the generated + * object in place of an integer + * that denotes the offset of a + * given dataset. + * + * @author Wolfgang Bangerth, 2003 + */ + class DataSetDescriptor + { public: - /** - * Define a typedef for a - * quadrature that acts on an - * object of one dimension - * less. For cells, this would - * then be a face quadrature. - */ - typedef Quadrature SubQuadrature; - - /** - * Compute the quadrature points - * on the cell if the given - * quadrature formula is used on - * face face_no. For further - * details, see the general doc - * for this class. - */ - static void project_to_face (const SubQuadrature &quadrature, - const unsigned int face_no, - std::vector > &q_points); - - /** - * Compute the cell quadrature - * formula corresponding to using - * quadrature on face - * face_no. For further - * details, see the general doc - * for this class. - */ - static Quadrature - project_to_face (const SubQuadrature &quadrature, - const unsigned int face_no); - - /** - * Compute the quadrature points on the - * cell if the given quadrature formula is - * used on face face_no, subface - * number subface_no corresponding - * to RefineCase::Type - * ref_case. The last argument is - * only used in 3D. - * - * @note Only the points are - * transformed. The quadrature - * weights are the same as those - * of the original rule. - */ - static void project_to_subface (const SubQuadrature &quadrature, - const unsigned int face_no, - const unsigned int subface_no, - std::vector > &q_points, - const RefinementCase &ref_case=RefinementCase::isotropic_refinement); - - /** - * Compute the cell quadrature formula - * corresponding to using - * quadrature on subface - * subface_no of face - * face_no with - * RefinementCase - * ref_case. The last argument is - * only used in 3D. - * - * @note Only the points are - * transformed. The quadrature - * weights are the same as those - * of the original rule. - */ - static Quadrature - project_to_subface (const SubQuadrature &quadrature, - const unsigned int face_no, - const unsigned int subface_no, - const RefinementCase &ref_case=RefinementCase::isotropic_refinement); - - /** - * Take a face quadrature formula - * and generate a cell quadrature - * formula from it where the - * quadrature points of the given - * argument are projected on all - * faces. - * - * The weights of the new rule - * are replications of the - * original weights. Thus, the - * sum of the weights is not one, - * but the number of faces, which - * is the surface of the - * reference cell. - * - * This in particular allows us - * to extract a subset of points - * corresponding to a single face - * and use it as a quadrature on - * this face, as is done in - * FEFaceValues. - * - * @note In 3D, this function - * produces eight sets of - * quadrature points for each - * face, in order to cope - * possibly different - * orientations of the mesh. - */ - static Quadrature - project_to_all_faces (const SubQuadrature &quadrature); - - /** - * Take a face quadrature formula - * and generate a cell quadrature - * formula from it where the - * quadrature points of the given - * argument are projected on all - * subfaces. - * - * Like in project_to_all_faces(), - * the weights of the new rule - * sum up to the number of faces - * (not subfaces), which - * is the surface of the - * reference cell. - * - * This in particular allows us - * to extract a subset of points - * corresponding to a single subface - * and use it as a quadrature on - * this face, as is done in - * FESubfaceValues. - */ - static Quadrature - project_to_all_subfaces (const SubQuadrature &quadrature); - - /** - * Project a given quadrature - * formula to a child of a - * cell. You may want to use this - * function in case you want to - * extend an integral only over - * the area which a potential - * child would occupy. The child - * numbering is the same as the - * children would be numbered - * upon refinement of the cell. - * - * As integration using this - * quadrature formula now only - * extends over a fraction of the - * cell, the weights of the - * resulting object are divided by - * GeometryInfo::children_per_cell. - */ + /** + * Default constructor. This + * doesn't do much except + * generating an invalid + * index, since you didn't + * give a valid descriptor of + * the cell, face, or subface + * you wanted. + */ + DataSetDescriptor (); + + /** + * Static function to + * generate the offset of a + * cell. Since we only have + * one cell per quadrature + * object, this offset is of + * course zero, but we carry + * this function around for + * consistency with the other + * static functions. + */ + static DataSetDescriptor cell (); + + /** + * Static function to generate an + * offset object for a given face of a + * cell with the given face + * orientation, flip and rotation. This + * function of course is only allowed + * if dim>=2, and the face + * orientation, flip and rotation are + * ignored if the space dimension + * equals 2. + * + * The last argument denotes + * the number of quadrature + * points the + * lower-dimensional face + * quadrature formula (the + * one that has been + * projected onto the faces) + * has. + */ static - Quadrature - project_to_child (const Quadrature &quadrature, - const unsigned int child_no); - - /** - * Project a quadrature rule to - * all children of a - * cell. Similarly to - * project_to_all_subfaces(), - * this function replicates the - * formula generated by - * project_to_child() for all - * children, such that the - * weights sum up to one, the - * volume of the total cell - * again. - * - * The child - * numbering is the same as the - * children would be numbered - * upon refinement of the cell. - */ + DataSetDescriptor + face (const unsigned int face_no, + const bool face_orientation, + const bool face_flip, + const bool face_rotation, + const unsigned int n_quadrature_points); + + /** + * Static function to generate an + * offset object for a given subface of + * a cell with the given face + * orientation, flip and rotation. This + * function of course is only allowed + * if dim>=2, and the face + * orientation, flip and rotation are + * ignored if the space dimension + * equals 2. + * + * The last but one argument denotes + * the number of quadrature + * points the + * lower-dimensional face + * quadrature formula (the + * one that has been + * projected onto the faces) + * has. + * + * Through the last argument + * anisotropic refinement can be + * respected. + */ static - Quadrature - project_to_all_children (const Quadrature &quadrature); - - /** - * Project the onedimensional - * rule quadrature to - * the straight line connecting - * the points p1 and - * p2. - */ - static - Quadrature - project_to_line(const Quadrature<1>& quadrature, - const Point& p1, - const Point& p2); - - /** - * Since the - * project_to_all_faces() and - * project_to_all_subfaces() - * functions chain together the - * quadrature points and weights - * of all projections of a face - * quadrature formula to the - * faces or subfaces of a cell, - * we need a way to identify - * where the starting index of - * the points and weights for a - * particular face or subface - * is. This class provides this: - * there are static member - * functions that generate - * objects of this type, given - * face or subface indices, and - * you can then use the generated - * object in place of an integer - * that denotes the offset of a - * given dataset. - * - * @author Wolfgang Bangerth, 2003 - */ - class DataSetDescriptor - { - public: - /** - * Default constructor. This - * doesn't do much except - * generating an invalid - * index, since you didn't - * give a valid descriptor of - * the cell, face, or subface - * you wanted. - */ - DataSetDescriptor (); - - /** - * Static function to - * generate the offset of a - * cell. Since we only have - * one cell per quadrature - * object, this offset is of - * course zero, but we carry - * this function around for - * consistency with the other - * static functions. - */ - static DataSetDescriptor cell (); - - /** - * Static function to generate an - * offset object for a given face of a - * cell with the given face - * orientation, flip and rotation. This - * function of course is only allowed - * if dim>=2, and the face - * orientation, flip and rotation are - * ignored if the space dimension - * equals 2. - * - * The last argument denotes - * the number of quadrature - * points the - * lower-dimensional face - * quadrature formula (the - * one that has been - * projected onto the faces) - * has. - */ - static - DataSetDescriptor - face (const unsigned int face_no, - const bool face_orientation, - const bool face_flip, - const bool face_rotation, - const unsigned int n_quadrature_points); - - /** - * Static function to generate an - * offset object for a given subface of - * a cell with the given face - * orientation, flip and rotation. This - * function of course is only allowed - * if dim>=2, and the face - * orientation, flip and rotation are - * ignored if the space dimension - * equals 2. - * - * The last but one argument denotes - * the number of quadrature - * points the - * lower-dimensional face - * quadrature formula (the - * one that has been - * projected onto the faces) - * has. - * - * Through the last argument - * anisotropic refinement can be - * respected. - */ - static - DataSetDescriptor - subface (const unsigned int face_no, - const unsigned int subface_no, - const bool face_orientation, - const bool face_flip, - const bool face_rotation, - const unsigned int n_quadrature_points, - const internal::SubfaceCase ref_case=internal::SubfaceCase::case_isotropic); - - /** - * Conversion operator to an - * integer denoting the - * offset of the first - * element of this dataset in - * the set of quadrature - * formulas all projected - * onto faces and - * subfaces. This conversion - * operator allows us to use - * offset descriptor objects - * in place of integer - * offsets. - */ - operator unsigned int () const; - - private: - /** - * Store the integer offset - * for a given cell, face, or - * subface. - */ - const unsigned int dataset_offset; - - /** - * This is the real - * constructor, but it is - * private and thus only - * available to the static - * member functions above. - */ - DataSetDescriptor (const unsigned int dataset_offset); - }; + DataSetDescriptor + subface (const unsigned int face_no, + const unsigned int subface_no, + const bool face_orientation, + const bool face_flip, + const bool face_rotation, + const unsigned int n_quadrature_points, + const internal::SubfaceCase ref_case=internal::SubfaceCase::case_isotropic); + + /** + * Conversion operator to an + * integer denoting the + * offset of the first + * element of this dataset in + * the set of quadrature + * formulas all projected + * onto faces and + * subfaces. This conversion + * operator allows us to use + * offset descriptor objects + * in place of integer + * offsets. + */ + operator unsigned int () const; private: - /** - * Given a quadrature object in - * 2d, reflect all quadrature - * points at the main diagonal - * and return them with their - * original weights. - * - * This function is necessary for - * projecting a 2d quadrature - * rule onto the faces of a 3d - * cube, since there we need both - * orientations. - */ - static Quadrature<2> reflect (const Quadrature<2> &q); - - /** - * Given a quadrature object in - * 2d, rotate all quadrature - * points by @p n_times * 90 degrees - * counterclockwise - * and return them with their - * original weights. - * - * This function is necessary for - * projecting a 2d quadrature - * rule onto the faces of a 3d - * cube, since there we need all - * rotations to account for - * face_flip and face_rotation - * of non-standard faces. - */ - static Quadrature<2> rotate (const Quadrature<2> &q, - const unsigned int n_times); + /** + * Store the integer offset + * for a given cell, face, or + * subface. + */ + const unsigned int dataset_offset; + + /** + * This is the real + * constructor, but it is + * private and thus only + * available to the static + * member functions above. + */ + DataSetDescriptor (const unsigned int dataset_offset); + }; + + private: + /** + * Given a quadrature object in + * 2d, reflect all quadrature + * points at the main diagonal + * and return them with their + * original weights. + * + * This function is necessary for + * projecting a 2d quadrature + * rule onto the faces of a 3d + * cube, since there we need both + * orientations. + */ + static Quadrature<2> reflect (const Quadrature<2> &q); + + /** + * Given a quadrature object in + * 2d, rotate all quadrature + * points by @p n_times * 90 degrees + * counterclockwise + * and return them with their + * original weights. + * + * This function is necessary for + * projecting a 2d quadrature + * rule onto the faces of a 3d + * cube, since there we need all + * rotations to account for + * face_flip and face_rotation + * of non-standard faces. + */ + static Quadrature<2> rotate (const Quadrature<2> &q, + const unsigned int n_times); }; /*@}*/ diff --cc deal.II/include/deal.II/base/smartpointer.h index f580e80acc,afb56e0f90..1efb98f93d --- a/deal.II/include/deal.II/base/smartpointer.h +++ b/deal.II/include/deal.II/base/smartpointer.h @@@ -58,194 -58,194 +58,194 @@@ DEAL_II_NAMESPACE_OPE template class SmartPointer { - public: - /** - * Standard constructor for null - * pointer. The id of this - * pointer is set to the name of - * the class P. - */ - SmartPointer (); - - /* - * Copy constructor for - * SmartPointer. We do now - * copy the object subscribed to - * from tt, but subscribe - * ourselves to it again. - */ - template - SmartPointer (const SmartPointer &tt); - - /* - * Copy constructor for - * SmartPointer. We do now - * copy the object subscribed to - * from tt, but subscribe - * ourselves to it again. - */ - SmartPointer (const SmartPointer &tt); - - /** - * Constructor taking a normal - * pointer. If possible, i.e. if - * the pointer is not a null - * pointer, the constructor - * subscribes to the given object - * to lock it, i.e. to prevent - * its destruction before the end - * of its use. - * - * The id is used in the - * call to - * Subscriptor::subscribe(id) and - * by ~SmartPointer() in the call - * to Subscriptor::unsubscribe(). - */ - SmartPointer (T *t, const char* id); - - /** - * Constructor taking a normal - * pointer. If possible, i.e. if - * the pointer is not a null - * pointer, the constructor - * subscribes to the given object - * to lock it, i.e. to prevent - * its destruction before the end - * of its use. The id of this - * pointer is set to the name of - * the class P. - */ - SmartPointer (T *t); - - - /** - * Destructor, removing the - * subscription. - */ - ~SmartPointer(); - - /** - * Assignment operator for normal - * pointers. The pointer - * subscribes to the new object - * automatically and unsubscribes - * to an old one if it exists. It - * will not try to subscribe to a - * null-pointer, but still - * delete the old subscription. - */ - SmartPointer & operator= (T *tt); - - /** - * Assignment operator for - * SmartPointer. The pointer - * subscribes to the new object - * automatically and unsubscribes - * to an old one if it exists. - */ - template - SmartPointer & operator= (const SmartPointer &tt); - - /** - * Assignment operator for - * SmartPointer. The pointer - * subscribes to the new object - * automatically and unsubscribes - * to an old one if it exists. - */ - SmartPointer & operator= (const SmartPointer &tt); - - /** - * Delete the object pointed to - * and set the pointer to zero. - */ - void clear (); - - /** - * Conversion to normal pointer. - */ - operator T* () const; - - /** - * Dereferencing operator. This - * operator throws an - * ExcNotInitialized if the - * pointer is a null pointer. - */ - T& operator * () const; - - /** - * Dereferencing operator. This - * operator throws an - * ExcNotInitialized if the - * pointer is a null pointer. - */ - T * operator -> () const; - - /** - * Exchange the pointers of this - * object and the argument. Since - * both the objects to which is - * pointed are subscribed to - * before and after, we do not - * have to change their - * subscription counters. - * - * Note that this function (with - * two arguments) and the - * respective functions where one - * of the arguments is a pointer - * and the other one is a C-style - * pointer are implemented in - * global namespace. - */ - template - void swap (SmartPointer &tt); - - /** - * Swap pointers between this - * object and the pointer - * given. As this releases the - * object pointed to presently, - * we reduce its subscription - * count by one, and increase it - * at the object which we will - * point to in the future. - * - * Note that we indeed need a - * reference of a pointer, as we - * want to change the pointer - * variable which we are given. - */ - void swap (T *&tt); - - /** - * Return an estimate of the - * amount of memory (in bytes) - * used by this class. Note in - * particular, that this only - * includes the amount of memory - * used by this object, not - * by the object pointed to. - */ - std::size_t memory_consumption () const; - - private: - /** - * Pointer to the object we want - * to subscribt to. Since it is - * often necessary to follow this - * pointer when debugging, we - * have deliberately chosen a - * short name. - */ - T * t; - /** - * The identification for the - * subscriptor. - */ - const char* const id; + public: + /** + * Standard constructor for null + * pointer. The id of this + * pointer is set to the name of + * the class P. + */ + SmartPointer (); + + /* + * Copy constructor for + * SmartPointer. We do now + * copy the object subscribed to + * from tt, but subscribe + * ourselves to it again. + */ + template + SmartPointer (const SmartPointer &tt); + + /* + * Copy constructor for + * SmartPointer. We do now + * copy the object subscribed to + * from tt, but subscribe + * ourselves to it again. + */ + SmartPointer (const SmartPointer &tt); + + /** + * Constructor taking a normal + * pointer. If possible, i.e. if + * the pointer is not a null + * pointer, the constructor + * subscribes to the given object + * to lock it, i.e. to prevent + * its destruction before the end + * of its use. + * + * The id is used in the + * call to + * Subscriptor::subscribe(id) and + * by ~SmartPointer() in the call + * to Subscriptor::unsubscribe(). + */ + SmartPointer (T *t, const char *id); + + /** + * Constructor taking a normal + * pointer. If possible, i.e. if + * the pointer is not a null + * pointer, the constructor + * subscribes to the given object + * to lock it, i.e. to prevent + * its destruction before the end + * of its use. The id of this + * pointer is set to the name of + * the class P. + */ + SmartPointer (T *t); + + + /** + * Destructor, removing the + * subscription. + */ + ~SmartPointer(); + + /** + * Assignment operator for normal + * pointers. The pointer + * subscribes to the new object + * automatically and unsubscribes + * to an old one if it exists. It + * will not try to subscribe to a + * null-pointer, but still + * delete the old subscription. + */ + SmartPointer &operator= (T *tt); + + /** + * Assignment operator for + * SmartPointer. The pointer + * subscribes to the new object + * automatically and unsubscribes + * to an old one if it exists. + */ + template + SmartPointer &operator= (const SmartPointer &tt); + + /** + * Assignment operator for + * SmartPointer. The pointer + * subscribes to the new object + * automatically and unsubscribes + * to an old one if it exists. + */ + SmartPointer &operator= (const SmartPointer &tt); + + /** + * Delete the object pointed to + * and set the pointer to zero. + */ + void clear (); + + /** + * Conversion to normal pointer. + */ + operator T *() const; + + /** + * Dereferencing operator. This + * operator throws an + * ExcNotInitialized if the + * pointer is a null pointer. + */ + T &operator * () const; + + /** + * Dereferencing operator. This + * operator throws an + * ExcNotInitialized if the + * pointer is a null pointer. + */ + T *operator -> () const; + + /** + * Exchange the pointers of this + * object and the argument. Since + * both the objects to which is + * pointed are subscribed to + * before and after, we do not + * have to change their + * subscription counters. + * + * Note that this function (with + * two arguments) and the + * respective functions where one + * of the arguments is a pointer + * and the other one is a C-style + * pointer are implemented in + * global namespace. + */ + template + void swap (SmartPointer &tt); + + /** + * Swap pointers between this + * object and the pointer + * given. As this releases the + * object pointed to presently, + * we reduce its subscription + * count by one, and increase it + * at the object which we will + * point to in the future. + * + * Note that we indeed need a + * reference of a pointer, as we + * want to change the pointer + * variable which we are given. + */ - void swap (T *&tt); ++ void swap (T *&tt); + + /** + * Return an estimate of the + * amount of memory (in bytes) + * used by this class. Note in + * particular, that this only + * includes the amount of memory + * used by this object, not + * by the object pointed to. + */ + std::size_t memory_consumption () const; + + private: + /** + * Pointer to the object we want + * to subscribt to. Since it is + * often necessary to follow this + * pointer when debugging, we + * have deliberately chosen a + * short name. + */ + T *t; + /** + * The identification for the + * subscriptor. + */ + const char *const id; }; diff --cc deal.II/include/deal.II/base/utilities.h index 7d1b02b985,30df1f3884..355d46a74d --- a/deal.II/include/deal.II/base/utilities.h +++ b/deal.II/include/deal.II/base/utilities.h @@@ -265,15 -265,15 +265,15 @@@ namespace Utilitie Iterator lower_bound (Iterator first, Iterator last, - const T &val); + const T &val); - /** - * The same function as above, but taking - * an argument that is used to compare - * individual elements of the sequence of - * objects pointed to by the iterators. - */ + /** + * The same function as above, but taking + * an argument that is used to compare + * individual elements of the sequence of + * objects pointed to by the iterators. + */ template Iterator lower_bound (Iterator first, @@@ -602,43 -602,43 +602,43 @@@ void destroy_communicator (Epetra_Comm &communicator); - /** - * Return the number of MPI processes - * there exist in the given communicator - * object. If this is a sequential job, - * it returns 1. - */ + /** + * Return the number of MPI processes + * there exist in the given communicator + * object. If this is a sequential job, + * it returns 1. + */ unsigned int get_n_mpi_processes (const Epetra_Comm &mpi_communicator); - /** - * Return the number of the present MPI - * process in the space of processes - * described by the given - * communicator. This will be a unique - * value for each process between zero - * and (less than) the number of all - * processes (given by - * get_n_mpi_processes()). - */ + /** + * Return the number of the present MPI + * process in the space of processes + * described by the given + * communicator. This will be a unique + * value for each process between zero + * and (less than) the number of all + * processes (given by + * get_n_mpi_processes()). + */ unsigned int get_this_mpi_process (const Epetra_Comm &mpi_communicator); - /** - * Given a Trilinos Epetra map, create a - * new map that has the same subdivision - * of elements to processors but uses the - * given communicator object instead of - * the one stored in the first - * argument. In essence, this means that - * we create a map that communicates - * among the same processors in the same - * way, but using a separate channel. - * - * This function is typically used with a - * communicator that has been obtained by - * the duplicate_communicator() function. - */ + /** + * Given a Trilinos Epetra map, create a + * new map that has the same subdivision + * of elements to processors but uses the + * given communicator object instead of + * the one stored in the first + * argument. In essence, this means that + * we create a map that communicates + * among the same processors in the same + * way, but using a separate channel. + * + * This function is typically used with a + * communicator that has been obtained by + * the duplicate_communicator() function. + */ Epetra_Map - duplicate_map (const Epetra_BlockMap &map, + duplicate_map (const Epetra_BlockMap &map, const Epetra_Comm &comm); } diff --cc deal.II/include/deal.II/dofs/block_info.h index a8a874055a,9738685202..27341acc0b --- a/deal.II/include/deal.II/dofs/block_info.h +++ b/deal.II/include/deal.II/dofs/block_info.h @@@ -88,147 -88,147 +88,147 @@@ namespace h */ class BlockInfo : public Subscriptor { - public: - /** - * @brief Fill the object with values - * describing block structure - * of the DoFHandler. - * - * This function will also clear - * the local() indices. - */ - template - void initialize(const DoFHandler&, bool levels_only = false, bool multigrid = false); - - /** - * @brief Fill the object with values - * describing level block - * structure of the - * MGDoFHandler. If - * levels_only is false, - * the other initialize() is - * called as well. - * - * This function will also clear - * the local() indices. - */ - template - void initialize(const MGDoFHandler&, bool levels_only = false); - - /** - * @brief Initialize block structure - * on cells and compute - * renumbering between cell - * dofs and block cell dofs. - */ - template - void initialize_local(const DoFHandler&); - - /** - * Access the BlockIndices - * structure of the global - * system. - */ - const BlockIndices& global() const; - - /** - * Access BlockIndices for the - * local system on a cell. - */ - const BlockIndices& local() const; - - /** - * Access the BlockIndices - * structure of a level in the - * multilevel hierarchy. - */ - const BlockIndices& level(unsigned int level) const; - - /** - * Return the index after local - * renumbering. - * - * The input of this function is - * an index between zero and the - * number of dofs per cell, - * numbered in local block - * ordering, that is first all - * indices of the first system - * block, then all of the second - * block and so forth. The - * function then outputs the index - * in the standard local - * numbering of DoFAccessor. - */ - unsigned int renumber (const unsigned int i) const; - - /** - * The number of base elements. - */ - unsigned int n_base_elements() const; - - /** - * Return the base element of - * this index. - */ - unsigned int base_element (const unsigned int i) const; - - /** - * Write a summary of the block - * structure to the stream. - */ - template - void - print(OS& stream) const; - - /** - * Determine an estimate for the - * memory consumption (in bytes) - * of this object. - */ - std::size_t memory_consumption () const; - - /** - * Read or write the data of this object to or - * from a stream for the purpose of serialization - */ - template - void serialize (Archive & ar, - const unsigned int version); - - private: - /** - * @brief The block structure - * of the global system. - */ - BlockIndices bi_global; - /** - * @brief The multilevel block structure. - */ - std::vector levels; - - /** - * @brief The block structure - * of the cell systems. - */ - BlockIndices bi_local; - - /** - * The base element associated - * with each block. - */ - std::vector base_elements; - - /** - * A vector containing the - * renumbering from the - * standard order of degrees of - * freedom on a cell to a - * component wise - * ordering. Filled by - * initialize(). - */ - std::vector local_renumbering; + public: + /** + * @brief Fill the object with values + * describing block structure + * of the DoFHandler. + * + * This function will also clear + * the local() indices. + */ + template - void initialize(const DoFHandler &); ++ void initialize(const DoFHandler &, bool levels_only = false, bool multigrid = false); + + /** + * @brief Fill the object with values + * describing level block + * structure of the + * MGDoFHandler. If + * levels_only is false, + * the other initialize() is + * called as well. + * + * This function will also clear + * the local() indices. + */ + template + void initialize(const MGDoFHandler &, bool levels_only = false); + + /** + * @brief Initialize block structure + * on cells and compute + * renumbering between cell + * dofs and block cell dofs. + */ + template + void initialize_local(const DoFHandler &); + + /** + * Access the BlockIndices + * structure of the global + * system. + */ + const BlockIndices &global() const; + + /** + * Access BlockIndices for the + * local system on a cell. + */ + const BlockIndices &local() const; + + /** + * Access the BlockIndices + * structure of a level in the + * multilevel hierarchy. + */ + const BlockIndices &level(unsigned int level) const; + + /** + * Return the index after local + * renumbering. + * + * The input of this function is + * an index between zero and the + * number of dofs per cell, + * numbered in local block + * ordering, that is first all + * indices of the first system + * block, then all of the second + * block and so forth. The + * function then outputs the index + * in the standard local + * numbering of DoFAccessor. + */ + unsigned int renumber (const unsigned int i) const; + + /** + * The number of base elements. + */ + unsigned int n_base_elements() const; + + /** + * Return the base element of + * this index. + */ + unsigned int base_element (const unsigned int i) const; + + /** + * Write a summary of the block + * structure to the stream. + */ + template + void + print(OS &stream) const; + + /** + * Determine an estimate for the + * memory consumption (in bytes) + * of this object. + */ + std::size_t memory_consumption () const; + + /** + * Read or write the data of this object to or + * from a stream for the purpose of serialization + */ + template + void serialize (Archive &ar, + const unsigned int version); + + private: + /** + * @brief The block structure + * of the global system. + */ + BlockIndices bi_global; + /** + * @brief The multilevel block structure. + */ + std::vector levels; + + /** + * @brief The block structure + * of the cell systems. + */ + BlockIndices bi_local; + + /** + * The base element associated + * with each block. + */ + std::vector base_elements; + + /** + * A vector containing the + * renumbering from the + * standard order of degrees of + * freedom on a cell to a + * component wise + * ordering. Filled by + * initialize(). + */ + std::vector local_renumbering; }; diff --cc deal.II/include/deal.II/dofs/dof_accessor.h index 8c1afa1947,a0fd1a8c2c..1fe3425024 --- a/deal.II/include/deal.II/dofs/dof_accessor.h +++ b/deal.II/include/deal.II/dofs/dof_accessor.h @@@ -174,626 -174,616 +174,626 @@@ namespace interna template class DoFAccessor : public dealii::internal::DoFAccessor::Inheritance::BaseClass { - public: - - /** - * A static variable that allows users of - * this class to discover the value of - * the second template argument. - */ - static const unsigned int dimension=DH::dimension; - - /** - * A static variable that allows users of - * this class to discover the value of - * the third template argument. - */ - static const unsigned int space_dimension=DH::space_dimension; - - /** - * Declare a typedef to the base - * class to make accessing some - * of the exception classes - * simpler. - */ - typedef - typename dealii::internal::DoFAccessor::Inheritance::BaseClass - BaseClass; - - /** - * Data type passed by the iterator class. - */ - typedef DH AccessorData; - - /** - * @name Constructors - */ - /** - * @{ - */ - - /** - * Default constructor. Provides - * an accessor that can't be - * used. - */ - DoFAccessor (); - - /** - * Constructor - */ - DoFAccessor (const Triangulation *tria, - const int level, - const int index, - const DH *local_data); - - /** - * Conversion constructor. This - * constructor exists to make certain - * constructs simpler to write in - * dimension independent code. For - * example, it allows assigning a face - * iterator to a line iterator, an - * operation that is useful in 2d but - * doesn't make any sense in 3d. The - * constructor here exists for the - * purpose of making the code conform to - * C++ but it will unconditionally abort; - * in other words, assigning a face - * iterator to a line iterator is better - * put into an if-statement that checks - * that the dimension is two, and assign - * to a quad iterator in 3d (an operator - * that, without this constructor would - * be illegal if we happen to compile for - * 2d). - */ - template - DoFAccessor (const InvalidAccessor &); - - /** - * Another conversion operator - * between objects that don't - * make sense, just like the - * previous one. - */ - template - DoFAccessor (const DoFAccessor &); - - /** - * @} - */ - - /** - * Return a handle on the - * DoFHandler object which we - * are using. - */ - const DH & - get_dof_handler () const; - - /** - * Implement the copy operator needed - * for the iterator classes. - */ - void copy_from (const DoFAccessor &a); - - /** - * Copy operator used by the - * iterator class. Keeps the - * previously set dof handler, - * but sets the object - * coordinates of the TriaAccessor. - */ - void copy_from (const TriaAccessorBase &da); - - /** - * Return an iterator pointing to - * the the parent. - */ - TriaIterator > - parent () const; - - /** - * @name Accessing sub-objects - */ - /** - * @{ - */ - - /** - * Return an iterator pointing to - * the the @p c-th child. - */ - TriaIterator > - child (const unsigned int c) const; - - /** - * Pointer to the @p ith line - * bounding this object. If the - * current object is a line itself, - * then the only valid index is - * @p i equals to zero, and the - * function returns an iterator - * to itself. - */ - typename dealii::internal::DoFHandler::Iterators::line_iterator - line (const unsigned int i) const; - - /** - * Pointer to the @p ith quad - * bounding this object. If the - * current object is a quad itself, - * then the only valid index is - * @p i equals to zero, and the - * function returns an iterator - * to itself. - */ - typename dealii::internal::DoFHandler::Iterators::quad_iterator - quad (const unsigned int i) const; - - /** - * @} - */ - - /** - * @name Accessing the DoF indices of this object - */ - /** - * @{ - */ - - /** - * Return the indices of the dofs of this - * object in the standard ordering: dofs - * on vertex 0, dofs on vertex 1, etc, - * dofs on line 0, dofs on line 1, etc, - * dofs on quad 0, etc. - * - * The vector has to have the - * right size before being passed - * to this function. - * - * This function is most often - * used on active objects (edges, - * faces, cells). It can be used - * on non-active objects as well - * (i.e. objects that have - * children), but only if the - * finite element under - * consideration has degrees of - * freedom exclusively on - * vertices. Otherwise, the - * function doesn't make much - * sense, since for example - * inactive edges do not have - * degrees of freedom associated - * with them at all. - * - * The last argument denotes the - * finite element index. For the - * standard ::DoFHandler class, - * this value must be equal to - * its default value since that - * class only supports the same - * finite element on all cells - * anyway. - * - * However, for hp objects - * (i.e. the hp::DoFHandler - * class), different finite - * element objects may be used on - * different cells. On faces - * between two cells, as well as - * vertices, there may therefore - * be two sets of degrees of - * freedom, one for each of the - * finite elements used on the - * adjacent cells. In order to - * specify which set of degrees - * of freedom to work on, the - * last argument is used to - * disambiguate. Finally, if this - * function is called for a cell - * object, there can only be a - * single set of degrees of - * freedom, and fe_index has to - * match the result of - * active_fe_index(). - * - * For cells, there is only a - * single possible finite element - * index (namely the one for that - * cell, returned by - * cell-@>active_fe_index. Consequently, - * the derived DoFCellAccessor - * class has an overloaded - * version of this function that - * calls the present function - * with - * cell-@>active_fe_index - * as last argument. - */ - void get_dof_indices (std::vector &dof_indices, - const unsigned int fe_index = DH::default_fe_index) const; - - void get_mg_dof_indices (const int level, std::vector& dof_indices, const unsigned int fe_index = DH::default_fe_index) const; - - /** - * Global DoF index of the i - * degree associated with the @p vertexth - * vertex of the present cell. - * - * The last argument denotes the - * finite element index. For the - * standard ::DoFHandler class, - * this value must be equal to - * its default value since that - * class only supports the same - * finite element on all cells - * anyway. - * - * However, for hp objects - * (i.e. the hp::DoFHandler - * class), different finite - * element objects may be used on - * different cells. On faces - * between two cells, as well as - * vertices, there may therefore - * be two sets of degrees of - * freedom, one for each of the - * finite elements used on the - * adjacent cells. In order to - * specify which set of degrees - * of freedom to work on, the - * last argument is used to - * disambiguate. Finally, if this - * function is called for a cell - * object, there can only be a - * single set of degrees of - * freedom, and fe_index has to - * match the result of - * active_fe_index(). - */ - unsigned int vertex_dof_index (const unsigned int vertex, - const unsigned int i, - const unsigned int fe_index = DH::default_fe_index) const; - - unsigned int mg_vertex_dof_index (const int level, const unsigned int vertex, const unsigned int i, const unsigned int fe_index = DH::default_fe_index) const; - - /** - * Index of the ith degree - * of freedom of this object. - * - * The last argument denotes the - * finite element index. For the - * standard ::DoFHandler class, - * this value must be equal to - * its default value since that - * class only supports the same - * finite element on all cells - * anyway. - * - * However, for hp objects - * (i.e. the hp::DoFHandler - * class), different finite - * element objects may be used on - * different cells. On faces - * between two cells, as well as - * vertices, there may therefore - * be two sets of degrees of - * freedom, one for each of the - * finite elements used on the - * adjacent cells. In order to - * specify which set of degrees - * of freedom to work on, the - * last argument is used to - * disambiguate. Finally, if this - * function is called for a cell - * object, there can only be a - * single set of degrees of - * freedom, and fe_index has to - * match the result of - * active_fe_index(). - * - * @note While the get_dof_indices() - * function returns an array that - * contains the indices of all degrees of - * freedom that somehow live on this - * object (i.e. on the vertices, edges or - * interior of this object), the current - * dof_index() function only considers - * the DoFs that really belong to this - * particular object's interior. In other - * words, as an example, if the current - * object refers to a quad (a cell in 2d, - * a face in 3d) and the finite element - * associated with it is a bilinear one, - * then the get_dof_indices() will return - * an array of size 4 while dof_index() - * will produce an exception because no - * degrees are defined in the interior of - * the face. - */ - unsigned int dof_index (const unsigned int i, - const unsigned int fe_index = DH::default_fe_index) const; - - unsigned int mg_dof_index (const int level, const unsigned int i) const; - - /** - * @} - */ - - /** - * @name Accessing the finite element associated with this object - */ - /** - * @{ - */ - - /** - * Return the number of finite - * elements that are active on a - * given object. - * - * For non-hp DoFHandler objects, - * the answer is of course always - * one. However, for - * hp::DoFHandler objects, this - * isn't the case: If this is a - * cell, the answer is of course - * one. If it is a face, the - * answer may be one or two, - * depending on whether the two - * adjacent cells use the same - * finite element or not. If it - * is an edge in 3d, the possible - * return value may be one or any - * other value larger than that. - */ - unsigned int - n_active_fe_indices () const; - - /** - * Return the @p n-th active fe - * index on this object. For - * cells and all non-hp objects, - * there is only a single active - * fe index, so the argument must - * be equal to zero. For - * lower-dimensional hp objects, - * there are - * n_active_fe_indices() active - * finite elements, and this - * function can be queried for - * their indices. - */ - unsigned int - nth_active_fe_index (const unsigned int n) const; - - /** - * Return true if the finite - * element with given index is - * active on the present - * object. For non-hp DoF - * accessors, this is of course - * the case only if @p fe_index - * equals zero. For cells, it is - * the case if @p fe_index equals - * active_fe_index() of this - * cell. For faces and other - * lower-dimensional objects, - * there may be more than one @p - * fe_index that are active on - * any given object (see - * n_active_fe_indices()). - */ - bool - fe_index_is_active (const unsigned int fe_index) const; - - /** - * Return a reference to the finite - * element used on this object with the - * given @p fe_index. @p fe_index must be - * used on this object, - * i.e. fe_index_is_active(fe_index) - * must return true. - */ - const FiniteElement & - get_fe (const unsigned int fe_index) const; - - /** - * @} - */ - - /** - * Exceptions for child classes - * - * @ingroup Exceptions - */ - DeclException0 (ExcInvalidObject); - /** - * Exception - * - * @ingroup Exceptions - */ - DeclException0 (ExcVectorNotEmpty); - /** - * Exception - * - * @ingroup Exceptions - */ - DeclException0 (ExcVectorDoesNotMatch); - /** - * Exception - * - * @ingroup Exceptions - */ - DeclException0 (ExcMatrixDoesNotMatch); - /** - * A function has been called for - * a cell which should be active, - * but is refined. @ref GlossActive - * - * @ingroup Exceptions - */ - DeclException0 (ExcNotActive); - /** - * Exception - * - * @ingroup Exceptions - */ - DeclException0 (ExcCantCompareIterators); - - protected: - - /** - * Store the address of the DoFHandler object - * to be accessed. - */ - DH *dof_handler; - - /** - * Compare for equality. - */ - bool operator == (const DoFAccessor &) const; - - /** - * Compare for inequality. - */ - bool operator != (const DoFAccessor &) const; - - /** - * Reset the DoF handler pointer. - */ - void set_dof_handler (DH *dh); - - /** - * Set the index of the - * ith degree of freedom - * of this object to @p index. - * - * The last argument denotes the - * finite element index. For the - * standard ::DoFHandler class, - * this value must be equal to - * its default value since that - * class only supports the same - * finite element on all cells - * anyway. - * - * However, for hp objects - * (i.e. the hp::DoFHandler - * class), different finite - * element objects may be used on - * different cells. On faces - * between two cells, as well as - * vertices, there may therefore - * be two sets of degrees of - * freedom, one for each of the - * finite elements used on the - * adjacent cells. In order to - * specify which set of degrees - * of freedom to work on, the - * last argument is used to - * disambiguate. Finally, if this - * function is called for a cell - * object, there can only be a - * single set of degrees of - * freedom, and fe_index has to - * match the result of - * active_fe_index(). - */ - void set_dof_index (const unsigned int i, - const unsigned int index, - const unsigned int fe_index = DH::default_fe_index) const; - - void set_mg_dof_index (const int level, const unsigned int i, const unsigned int index) const; - - /** - * Set the global index of the i - * degree on the @p vertex-th vertex of - * the present cell to @p index. - * - * The last argument denotes the - * finite element index. For the - * standard ::DoFHandler class, - * this value must be equal to - * its default value since that - * class only supports the same - * finite element on all cells - * anyway. - * - * However, for hp objects - * (i.e. the hp::DoFHandler - * class), different finite - * element objects may be used on - * different cells. On faces - * between two cells, as well as - * vertices, there may therefore - * be two sets of degrees of - * freedom, one for each of the - * finite elements used on the - * adjacent cells. In order to - * specify which set of degrees - * of freedom to work on, the - * last argument is used to - * disambiguate. Finally, if this - * function is called for a cell - * object, there can only be a - * single set of degrees of - * freedom, and fe_index has to - * match the result of - * active_fe_index(). - */ - void set_vertex_dof_index (const unsigned int vertex, - const unsigned int i, - const unsigned int index, - const unsigned int fe_index = DH::default_fe_index) const; - - void set_mg_vertex_dof_index (const int level, const unsigned int vertex, const unsigned int i, const unsigned int index, const unsigned int fe_index = DH::default_fe_index) const; - - /** - * Iterator classes need to be friends - * because they need to access operator== - * and operator!=. - */ - template friend class TriaRawIterator; - - - private: - /** - * Copy operator. This is normally used - * in a context like iterator a,b; - * *a=*b;. Presumably, the intent - * here is to copy the object pointed to - * by @p b to the object pointed to by - * @p a. However, the result of - * dereferencing an iterator is not an - * object but an accessor; consequently, - * this operation is not useful for - * iterators on triangulations. We - * declare this function here private, - * thus it may not be used from outside. - * Furthermore it is not implemented and - * will give a linker error if used - * anyway. - */ - DoFAccessor & - operator = (const DoFAccessor &da); - - /** - * Make the DoFHandler class a friend so - * that it can call the set_xxx() - * functions. - */ - template friend class DoFHandler; - template friend class hp::DoFHandler; - - friend struct dealii::internal::DoFHandler::Policy::Implementation; - friend struct dealii::internal::DoFHandler::Implementation; - friend struct dealii::internal::hp::DoFHandler::Implementation; - friend struct dealii::internal::DoFCellAccessor::Implementation; + public: + + /** + * A static variable that allows users of + * this class to discover the value of + * the second template argument. + */ + static const unsigned int dimension=DH::dimension; + + /** + * A static variable that allows users of + * this class to discover the value of + * the third template argument. + */ + static const unsigned int space_dimension=DH::space_dimension; + + /** + * Declare a typedef to the base + * class to make accessing some + * of the exception classes + * simpler. + */ + typedef + typename dealii::internal::DoFAccessor::Inheritance::BaseClass + BaseClass; + + /** + * Data type passed by the iterator class. + */ + typedef DH AccessorData; + + /** + * @name Constructors + */ + /** + * @{ + */ + + /** + * Default constructor. Provides + * an accessor that can't be + * used. + */ + DoFAccessor (); + + /** + * Constructor + */ + DoFAccessor (const Triangulation *tria, + const int level, + const int index, + const DH *local_data); + + /** + * Conversion constructor. This + * constructor exists to make certain + * constructs simpler to write in + * dimension independent code. For + * example, it allows assigning a face + * iterator to a line iterator, an + * operation that is useful in 2d but + * doesn't make any sense in 3d. The + * constructor here exists for the + * purpose of making the code conform to + * C++ but it will unconditionally abort; + * in other words, assigning a face + * iterator to a line iterator is better + * put into an if-statement that checks + * that the dimension is two, and assign + * to a quad iterator in 3d (an operator + * that, without this constructor would + * be illegal if we happen to compile for + * 2d). + */ + template + DoFAccessor (const InvalidAccessor &); + + /** + * Another conversion operator + * between objects that don't + * make sense, just like the + * previous one. + */ + template + DoFAccessor (const DoFAccessor &); + + /** + * @} + */ + + /** + * Return a handle on the + * DoFHandler object which we + * are using. + */ + const DH & + get_dof_handler () const; + + /** + * Implement the copy operator needed + * for the iterator classes. + */ + void copy_from (const DoFAccessor &a); + + /** + * Copy operator used by the + * iterator class. Keeps the + * previously set dof handler, + * but sets the object + * coordinates of the TriaAccessor. + */ + void copy_from (const TriaAccessorBase &da); + + /** + * Return an iterator pointing to + * the the parent. + */ + TriaIterator > + parent () const; + + /** + * @name Accessing sub-objects + */ + /** + * @{ + */ + + /** + * Return an iterator pointing to + * the the @p c-th child. + */ + TriaIterator > + child (const unsigned int c) const; + + /** + * Pointer to the @p ith line + * bounding this object. If the + * current object is a line itself, + * then the only valid index is + * @p i equals to zero, and the + * function returns an iterator + * to itself. + */ + typename dealii::internal::DoFHandler::Iterators::line_iterator + line (const unsigned int i) const; + + /** + * Pointer to the @p ith quad + * bounding this object. If the + * current object is a quad itself, + * then the only valid index is + * @p i equals to zero, and the + * function returns an iterator + * to itself. + */ + typename dealii::internal::DoFHandler::Iterators::quad_iterator + quad (const unsigned int i) const; + + /** + * @} + */ + + /** + * @name Accessing the DoF indices of this object + */ + /** + * @{ + */ + + /** + * Return the indices of the dofs of this + * object in the standard ordering: dofs + * on vertex 0, dofs on vertex 1, etc, + * dofs on line 0, dofs on line 1, etc, + * dofs on quad 0, etc. + * + * The vector has to have the + * right size before being passed + * to this function. + * + * This function is most often + * used on active objects (edges, + * faces, cells). It can be used + * on non-active objects as well + * (i.e. objects that have + * children), but only if the + * finite element under + * consideration has degrees of + * freedom exclusively on + * vertices. Otherwise, the + * function doesn't make much + * sense, since for example + * inactive edges do not have + * degrees of freedom associated + * with them at all. + * + * The last argument denotes the + * finite element index. For the + * standard ::DoFHandler class, + * this value must be equal to + * its default value since that + * class only supports the same + * finite element on all cells + * anyway. + * + * However, for hp objects + * (i.e. the hp::DoFHandler + * class), different finite + * element objects may be used on + * different cells. On faces + * between two cells, as well as + * vertices, there may therefore + * be two sets of degrees of + * freedom, one for each of the + * finite elements used on the + * adjacent cells. In order to + * specify which set of degrees + * of freedom to work on, the + * last argument is used to + * disambiguate. Finally, if this + * function is called for a cell + * object, there can only be a + * single set of degrees of + * freedom, and fe_index has to + * match the result of + * active_fe_index(). + * + * For cells, there is only a + * single possible finite element + * index (namely the one for that + * cell, returned by + * cell-@>active_fe_index. Consequently, + * the derived DoFCellAccessor + * class has an overloaded + * version of this function that + * calls the present function + * with + * cell-@>active_fe_index + * as last argument. + */ + void get_dof_indices (std::vector &dof_indices, + const unsigned int fe_index = DH::default_fe_index) const; + ++ void get_mg_dof_indices (const int level, std::vector &dof_indices, const unsigned int fe_index = DH::default_fe_index) const; ++ + /** + * Global DoF index of the i + * degree associated with the @p vertexth + * vertex of the present cell. + * + * The last argument denotes the + * finite element index. For the + * standard ::DoFHandler class, + * this value must be equal to + * its default value since that + * class only supports the same + * finite element on all cells + * anyway. + * + * However, for hp objects + * (i.e. the hp::DoFHandler + * class), different finite + * element objects may be used on + * different cells. On faces + * between two cells, as well as + * vertices, there may therefore + * be two sets of degrees of + * freedom, one for each of the + * finite elements used on the + * adjacent cells. In order to + * specify which set of degrees + * of freedom to work on, the + * last argument is used to + * disambiguate. Finally, if this + * function is called for a cell + * object, there can only be a + * single set of degrees of + * freedom, and fe_index has to + * match the result of + * active_fe_index(). + */ + unsigned int vertex_dof_index (const unsigned int vertex, + const unsigned int i, + const unsigned int fe_index = DH::default_fe_index) const; + ++ unsigned int mg_vertex_dof_index (const int level, const unsigned int vertex, const unsigned int i, const unsigned int fe_index = DH::default_fe_index) const; ++ + /** + * Index of the ith degree + * of freedom of this object. + * + * The last argument denotes the + * finite element index. For the + * standard ::DoFHandler class, + * this value must be equal to + * its default value since that + * class only supports the same + * finite element on all cells + * anyway. + * + * However, for hp objects + * (i.e. the hp::DoFHandler + * class), different finite + * element objects may be used on + * different cells. On faces + * between two cells, as well as + * vertices, there may therefore + * be two sets of degrees of + * freedom, one for each of the + * finite elements used on the + * adjacent cells. In order to + * specify which set of degrees + * of freedom to work on, the + * last argument is used to + * disambiguate. Finally, if this + * function is called for a cell + * object, there can only be a + * single set of degrees of + * freedom, and fe_index has to + * match the result of + * active_fe_index(). + * + * @note While the get_dof_indices() + * function returns an array that + * contains the indices of all degrees of + * freedom that somehow live on this + * object (i.e. on the vertices, edges or + * interior of this object), the current + * dof_index() function only considers + * the DoFs that really belong to this + * particular object's interior. In other + * words, as an example, if the current + * object refers to a quad (a cell in 2d, + * a face in 3d) and the finite element + * associated with it is a bilinear one, + * then the get_dof_indices() will return + * an array of size 4 while dof_index() + * will produce an exception because no + * degrees are defined in the interior of + * the face. + */ + unsigned int dof_index (const unsigned int i, + const unsigned int fe_index = DH::default_fe_index) const; + ++ unsigned int mg_dof_index (const int level, const unsigned int i) const; ++ + /** + * @} + */ + + /** + * @name Accessing the finite element associated with this object + */ + /** + * @{ + */ + + /** + * Return the number of finite + * elements that are active on a + * given object. + * + * For non-hp DoFHandler objects, + * the answer is of course always + * one. However, for + * hp::DoFHandler objects, this + * isn't the case: If this is a + * cell, the answer is of course + * one. If it is a face, the + * answer may be one or two, + * depending on whether the two + * adjacent cells use the same + * finite element or not. If it + * is an edge in 3d, the possible + * return value may be one or any + * other value larger than that. + */ + unsigned int + n_active_fe_indices () const; + + /** + * Return the @p n-th active fe + * index on this object. For + * cells and all non-hp objects, + * there is only a single active + * fe index, so the argument must + * be equal to zero. For + * lower-dimensional hp objects, + * there are + * n_active_fe_indices() active + * finite elements, and this + * function can be queried for + * their indices. + */ + unsigned int + nth_active_fe_index (const unsigned int n) const; + + /** + * Return true if the finite + * element with given index is + * active on the present + * object. For non-hp DoF + * accessors, this is of course + * the case only if @p fe_index + * equals zero. For cells, it is + * the case if @p fe_index equals + * active_fe_index() of this + * cell. For faces and other + * lower-dimensional objects, + * there may be more than one @p + * fe_index that are active on + * any given object (see + * n_active_fe_indices()). + */ + bool + fe_index_is_active (const unsigned int fe_index) const; + + /** + * Return a reference to the finite + * element used on this object with the + * given @p fe_index. @p fe_index must be + * used on this object, + * i.e. fe_index_is_active(fe_index) + * must return true. + */ + const FiniteElement & + get_fe (const unsigned int fe_index) const; + + /** + * @} + */ + + /** + * Exceptions for child classes + * + * @ingroup Exceptions + */ + DeclException0 (ExcInvalidObject); + /** + * Exception + * + * @ingroup Exceptions + */ + DeclException0 (ExcVectorNotEmpty); + /** + * Exception + * + * @ingroup Exceptions + */ + DeclException0 (ExcVectorDoesNotMatch); + /** + * Exception + * + * @ingroup Exceptions + */ + DeclException0 (ExcMatrixDoesNotMatch); + /** + * A function has been called for + * a cell which should be active, + * but is refined. @ref GlossActive + * + * @ingroup Exceptions + */ + DeclException0 (ExcNotActive); + /** + * Exception + * + * @ingroup Exceptions + */ + DeclException0 (ExcCantCompareIterators); + + protected: + + /** + * Store the address of the DoFHandler object + * to be accessed. + */ + DH *dof_handler; + + /** + * Compare for equality. + */ + bool operator == (const DoFAccessor &) const; + + /** + * Compare for inequality. + */ + bool operator != (const DoFAccessor &) const; + + /** + * Reset the DoF handler pointer. + */ + void set_dof_handler (DH *dh); + + /** + * Set the index of the + * ith degree of freedom + * of this object to @p index. + * + * The last argument denotes the + * finite element index. For the + * standard ::DoFHandler class, + * this value must be equal to + * its default value since that + * class only supports the same + * finite element on all cells + * anyway. + * + * However, for hp objects + * (i.e. the hp::DoFHandler + * class), different finite + * element objects may be used on + * different cells. On faces + * between two cells, as well as + * vertices, there may therefore + * be two sets of degrees of + * freedom, one for each of the + * finite elements used on the + * adjacent cells. In order to + * specify which set of degrees + * of freedom to work on, the + * last argument is used to + * disambiguate. Finally, if this + * function is called for a cell + * object, there can only be a + * single set of degrees of + * freedom, and fe_index has to + * match the result of + * active_fe_index(). + */ + void set_dof_index (const unsigned int i, + const unsigned int index, + const unsigned int fe_index = DH::default_fe_index) const; + ++ void set_mg_dof_index (const int level, const unsigned int i, const unsigned int index) const; ++ + /** + * Set the global index of the i + * degree on the @p vertex-th vertex of + * the present cell to @p index. + * + * The last argument denotes the + * finite element index. For the + * standard ::DoFHandler class, + * this value must be equal to + * its default value since that + * class only supports the same + * finite element on all cells + * anyway. + * + * However, for hp objects + * (i.e. the hp::DoFHandler + * class), different finite + * element objects may be used on + * different cells. On faces + * between two cells, as well as + * vertices, there may therefore + * be two sets of degrees of + * freedom, one for each of the + * finite elements used on the + * adjacent cells. In order to + * specify which set of degrees + * of freedom to work on, the + * last argument is used to + * disambiguate. Finally, if this + * function is called for a cell + * object, there can only be a + * single set of degrees of + * freedom, and fe_index has to + * match the result of + * active_fe_index(). + */ + void set_vertex_dof_index (const unsigned int vertex, + const unsigned int i, + const unsigned int index, + const unsigned int fe_index = DH::default_fe_index) const; + ++ void set_mg_vertex_dof_index (const int level, const unsigned int vertex, const unsigned int i, const unsigned int index, const unsigned int fe_index = DH::default_fe_index) const; ++ + /** + * Iterator classes need to be friends + * because they need to access operator== + * and operator!=. + */ + template friend class TriaRawIterator; + + + private: + /** + * Copy operator. This is normally used + * in a context like iterator a,b; + * *a=*b;. Presumably, the intent + * here is to copy the object pointed to + * by @p b to the object pointed to by + * @p a. However, the result of + * dereferencing an iterator is not an + * object but an accessor; consequently, + * this operation is not useful for + * iterators on triangulations. We + * declare this function here private, + * thus it may not be used from outside. + * Furthermore it is not implemented and + * will give a linker error if used + * anyway. + */ + DoFAccessor & + operator = (const DoFAccessor &da); + + /** + * Make the DoFHandler class a friend so + * that it can call the set_xxx() + * functions. + */ + template friend class DoFHandler; + template friend class hp::DoFHandler; + + friend struct dealii::internal::DoFHandler::Policy::Implementation; + friend struct dealii::internal::DoFHandler::Implementation; + friend struct dealii::internal::hp::DoFHandler::Implementation; + friend struct dealii::internal::DoFCellAccessor::Implementation; }; @@@ -1468,680 -1458,678 +1468,680 @@@ protected template class DoFCellAccessor : public DoFAccessor { - public: - /** - * Extract dimension from DH. - */ - static const unsigned int dim = DH::dimension; - - /** - * Extract space dimension from DH. - */ - static const unsigned int spacedim = DH::space_dimension; - - /** - * Declare the data type that - * this accessor class expects to - * get passed from the iterator - * classes. - */ - typedef typename DoFAccessor::AccessorData AccessorData; - - /** - * Declare a typedef to the base - * class to make accessing some - * of the exception classes - * simpler. - */ - typedef DoFAccessor BaseClass; - - /** - * Define the type of the - * container this is part of. - */ - typedef DH Container; - - /** - * @name Constructors - */ - /** - * @{ - */ - - /** - * Constructor - */ - DoFCellAccessor (const Triangulation *tria, - const int level, - const int index, - const AccessorData *local_data); - - /** - * Conversion constructor. This - * constructor exists to make certain - * constructs simpler to write in - * dimension independent code. For - * example, it allows assigning a face - * iterator to a line iterator, an - * operation that is useful in 2d but - * doesn't make any sense in 3d. The - * constructor here exists for the - * purpose of making the code conform to - * C++ but it will unconditionally abort; - * in other words, assigning a face - * iterator to a line iterator is better - * put into an if-statement that checks - * that the dimension is two, and assign - * to a quad iterator in 3d (an operator - * that, without this constructor would - * be illegal if we happen to compile for - * 2d). - */ - template - DoFCellAccessor (const InvalidAccessor &); - - /** - * Another conversion operator - * between objects that don't - * make sense, just like the - * previous one. - */ - template - DoFCellAccessor (const DoFAccessor &); - - /** - * @} - */ - - /** - * Return the parent as a DoF - * cell iterator. This - * function is needed since the - * parent function of the base - * class returns a cell accessor - * without access to the DoF - * data. - */ - typename dealii::internal::DoFHandler::Iterators::cell_iterator - parent () const; - - /** - * @name Accessing sub-objects and neighbors - */ - /** - * @{ - */ - - /** - * Return the @p ith neighbor as - * a DoF cell iterator. This - * function is needed since the - * neighbor function of the base - * class returns a cell accessor - * without access to the DoF - * data. - */ - typename dealii::internal::DoFHandler::Iterators::cell_iterator - neighbor (const unsigned int) const; - - /** - * Return the @p ith child as a - * DoF cell iterator. This - * function is needed since the - * child function of the base - * class returns a cell accessor - * without access to the DoF - * data. - */ - typename dealii::internal::DoFHandler::Iterators::cell_iterator - child (const unsigned int) const; - - /** - * Return an iterator to the @p ith face - * of this cell. - * - * This function is not implemented in - * 1D, and maps to DoFAccessor::line - * in 2D. - */ - typename dealii::internal::DoFHandler::Iterators::face_iterator - face (const unsigned int i) const; - - /** - * Return the result of the - * @p neighbor_child_on_subface - * function of the base class, - * but convert it so that one can - * also access the DoF data (the - * function in the base class - * only returns an iterator with - * access to the triangulation - * data). - */ - typename dealii::internal::DoFHandler::Iterators::cell_iterator - neighbor_child_on_subface (const unsigned int face_no, - const unsigned int subface_no) const; - - /** - * @} - */ - - /** - * @name Extracting values from global vectors - */ - /** - * @{ - */ - - /** - * Return the values of the given vector - * restricted to the dofs of this - * cell in the standard ordering: dofs - * on vertex 0, dofs on vertex 1, etc, - * dofs on line 0, dofs on line 1, etc, - * dofs on quad 0, etc. - * - * The vector has to have the - * right size before being passed - * to this function. This - * function is only callable for - * active cells. - * - * The input vector may be either - * a Vector, - * Vector, or a - * BlockVector, or a - * PETSc or Trilinos vector if - * deal.II is compiled to support - * these libraries. It is in the - * responsibility of the caller - * to assure that the types of - * the numbers stored in input - * and output vectors are - * compatible and with similar - * accuracy. - */ - template - void get_dof_values (const InputVector &values, - Vector &local_values) const; - - /** - * Return the values of the given vector - * restricted to the dofs of this - * cell in the standard ordering: dofs - * on vertex 0, dofs on vertex 1, etc, - * dofs on line 0, dofs on line 1, etc, - * dofs on quad 0, etc. - * - * The vector has to have the - * right size before being passed - * to this function. This - * function is only callable for - * active cells. - * - * The input vector may be either - * a Vector, - * Vector, or a - * BlockVector, or a - * PETSc or Trilinos vector if - * deal.II is compiled to support - * these libraries. It is in the - * responsibility of the caller - * to assure that the types of - * the numbers stored in input - * and output vectors are - * compatible and with similar - * accuracy. - */ - template - void get_dof_values (const InputVector &values, - ForwardIterator local_values_begin, - ForwardIterator local_values_end) const; - - /** - * Return the values of the given vector - * restricted to the dofs of this - * cell in the standard ordering: dofs - * on vertex 0, dofs on vertex 1, etc, - * dofs on line 0, dofs on line 1, etc, - * dofs on quad 0, etc. - * - * The vector has to have the - * right size before being passed - * to this function. This - * function is only callable for - * active cells. - * - * The input vector may be either a - * Vector, - * Vector, or a - * BlockVector, or a PETSc or - * Trilinos vector if deal.II is - * compiled to support these - * libraries. It is in the - * responsibility of the caller to - * assure that the types of the numbers - * stored in input and output vectors - * are compatible and with similar - * accuracy. The ConstraintMatrix - * passed as an argument to this - * function makes sure that constraints - * are correctly distributed when the - * dof values are calculated. - */ - template - void get_dof_values (const ConstraintMatrix &constraints, - const InputVector &values, - ForwardIterator local_values_begin, - ForwardIterator local_values_end) const; - - /** - * This function is the counterpart to - * get_dof_values(): it takes a vector - * of values for the degrees of freedom - * of the cell pointed to by this iterator - * and writes these values into the global - * data vector @p values. This function - * is only callable for active cells. - * - * Note that for continuous finite - * elements, calling this function affects - * the dof values on neighboring cells as - * well. It may also violate continuity - * requirements for hanging nodes, if - * neighboring cells are less refined than - * the present one. These requirements - * are not taken care of and must be - * enforced by the user afterwards. - * - * The vector has to have the - * right size before being passed - * to this function. - * - * The output vector may be either a - * Vector, - * Vector, or a - * BlockVector, or a - * PETSc vector if deal.II is compiled to - * support these libraries. It is in the - * responsibility of the caller to assure - * that the types of the numbers stored - * in input and output vectors are - * compatible and with similar accuracy. - */ - template - void set_dof_values (const Vector &local_values, - OutputVector &values) const; - - /** - * Return the interpolation of - * the given finite element - * function to the present - * cell. In the simplest case, - * the cell is a terminal one, - * i.e. has no children; then, - * the returned value is the - * vector of nodal values on that - * cell. You could then as well - * get the desired values through - * the @p get_dof_values - * function. In the other case, - * when the cell has children, we - * use the restriction matrices - * provided by the finite element - * class to compute the - * interpolation from the - * children to the present cell. - * - * It is assumed that both - * vectors already have the right - * size beforehand. - * - * Unlike the get_dof_values() - * function, this function works - * on cells rather than to lines, - * quads, and hexes, since - * interpolation is presently - * only provided for cells by the - * finite element classes. - */ - template - void get_interpolated_dof_values (const InputVector &values, - Vector &interpolated_values) const; - - /** - * This, again, is the - * counterpart to - * get_interpolated_dof_values(): - * you specify the dof values on - * a cell and these are - * interpolated to the children - * of the present cell and set on - * the terminal cells. - * - * In principle, it works as - * follows: if the cell pointed - * to by this object is terminal, - * then the dof values are set in - * the global data vector by - * calling the set_dof_values() - * function; otherwise, the - * values are prolonged to each - * of the children and this - * function is called for each of - * them. - * - * Using the - * get_interpolated_dof_values() - * and this function, you can - * compute the interpolation of a - * finite element function to a - * coarser grid by first getting - * the interpolated solution on a - * cell of the coarse grid and - * afterwards redistributing it - * using this function. - * - * Note that for continuous - * finite elements, calling this - * function affects the dof - * values on neighboring cells as - * well. It may also violate - * continuity requirements for - * hanging nodes, if neighboring - * cells are less refined than - * the present one, or if their - * children are less refined than - * the children of this - * cell. These requirements are - * not taken care of and must be - * enforced by the user - * afterward. - * - * It is assumed that both - * vectors already have the right - * size beforehand. This function - * relies on the existence of a - * natural interpolation property - * of finite element spaces of a - * cell to its children, denoted - * by the prolongation matrices - * of finite element classes. For - * some elements, the spaces on - * coarse and fine grids are not - * nested, in which case the - * interpolation to a child is - * not the identity; refer to the - * documentation of the - * respective finite element - * class for a description of - * what the prolongation matrices - * represent in this case. - * - * Unlike the set_dof_values() - * function, this function is - * associated to cells rather - * than to lines, quads, and - * hexes, since interpolation is - * presently only provided for - * cells by the finite element - * objects. - * - * The output vector may be either a - * Vector, - * Vector, or a - * BlockVector, or a - * PETSc vector if deal.II is compiled to - * support these libraries. It is in the - * responsibility of the caller to assure - * that the types of the numbers stored - * in input and output vectors are - * compatible and with similar accuracy. - */ - template - void set_dof_values_by_interpolation (const Vector &local_values, - OutputVector &values) const; - - /** - * Distribute a local (cell - * based) vector to a global one - * by mapping the local numbering - * of the degrees of freedom to - * the global one and entering - * the local values into the - * global vector. - * - * The elements are - * added up to the - * elements in the global vector, - * rather than just set, since - * this is usually what one - * wants. - */ - template - void - distribute_local_to_global (const Vector &local_source, - OutputVector &global_destination) const; - - /** - * Distribute a local (cell based) - * vector in iterator format to a - * global one by mapping the local - * numbering of the degrees of freedom - * to the global one and entering the - * local values into the global vector. - * - * The elements are added up - * to the elements in the global - * vector, rather than just set, since - * this is usually what one wants. - */ - template - void - distribute_local_to_global (ForwardIterator local_source_begin, - ForwardIterator local_source_end, - OutputVector &global_destination) const; - - /** - * Distribute a local (cell based) - * vector in iterator format to a - * global one by mapping the local - * numbering of the degrees of freedom - * to the global one and entering the - * local values into the global vector. - * - * The elements are added up - * to the elements in the global - * vector, rather than just set, since - * this is usually what one - * wants. Moreover, the - * ConstraintMatrix passed to this - * function makes sure that also - * constraints are eliminated in this - * process. - */ - template - void - distribute_local_to_global (const ConstraintMatrix &constraints, - ForwardIterator local_source_begin, - ForwardIterator local_source_end, - OutputVector &global_destination) const; - - /** - * This function does much the - * same as the - * distribute_local_to_global(Vector,Vector) - * function, but operates on - * matrices instead of - * vectors. If the matrix type is - * a sparse matrix then it is - * supposed to have non-zero - * entry slots where required. - */ - template - void - distribute_local_to_global (const FullMatrix &local_source, - OutputMatrix &global_destination) const; - - /** - * This function does what the two - * distribute_local_to_global - * functions with vector and matrix - * argument do, but all at once. - */ - template - void - distribute_local_to_global (const FullMatrix &local_matrix, - const Vector &local_vector, - OutputMatrix &global_matrix, - OutputVector &global_vector) const; - - /** - * @} - */ - - /** - * @name Accessing the DoF indices of this object - */ - /** - * @{ - */ - - /** - * Return the indices of the dofs of this - * quad in the standard ordering: dofs - * on vertex 0, dofs on vertex 1, etc, - * dofs on line 0, dofs on line 1, etc, - * dofs on quad 0, etc. - * - * It is assumed that the vector already - * has the right size beforehand. - * - * This function reimplements the - * same function in the base - * class. The functions in the - * base classes are available for - * all geometric objects, - * i.e. even in 3d they can be - * used to access the dof indices - * of edges, for example. On the - * other hand, the most common - * case is clearly the use on - * cells, which is why we cache - * the array for each cell, but - * not edge. To retrieve the - * cached values, rather than - * collect the necessary - * information every time, this - * function overwrites the one in - * the base class. - * - * This function is most often - * used on active objects (edges, - * faces, cells). It can be used - * on non-active objects as well - * (i.e. objects that have - * children), but only if the - * finite element under - * consideration has degrees of - * freedom exclusively on - * vertices. Otherwise, the - * function doesn't make much - * sense, since for example - * inactive edges do not have - * degrees of freedom associated - * with them at all. - */ - void get_dof_indices (std::vector &dof_indices) const; - - void get_mg_dof_indices (std::vector& dof_indices) const; - - /** - * @} - */ - - /** - * @name Accessing the finite element associated with this object - */ - /** - * @{ - */ - - /** - * Return the finite element that - * is used on the cell pointed to - * by this iterator. For non-hp - * DoF handlers, this is of - * course always the same - * element, independent of the - * cell we are presently on, but - * for hp DoF handlers, this may - * change from cell to cell. - */ - const FiniteElement & - get_fe () const; - - /** - * Returns the index inside the - * hp::FECollection of the FiniteElement - * used for this cell. - */ - unsigned int active_fe_index () const; - - /** - * Sets the index of the FiniteElement used for - * this cell. - */ - void set_active_fe_index (const unsigned int i); - /** - * @} - */ - - /** - * Set the DoF indices of this - * cell to the given values. This - * function bypasses the DoF - * cache, if one exists for the - * given DoF handler class. - */ - void set_dof_indices (const std::vector &dof_indices); - - /** - * Update the cache in which we - * store the dof indices of this - * cell. - */ - void update_cell_dof_indices_cache () const; - - private: - /** - * Copy operator. This is normally used - * in a context like iterator a,b; - * *a=*b;. Presumably, the intent - * here is to copy the object pointed to - * by @p b to the object pointed to by - * @p a. However, the result of - * dereferencing an iterator is not an - * object but an accessor; consequently, - * this operation is not useful for - * iterators on triangulations. We - * declare this function here private, - * thus it may not be used from outside. - * Furthermore it is not implemented and - * will give a linker error if used - * anyway. - */ - DoFCellAccessor & - operator = (const DoFCellAccessor &da); - - /** - * Make the DoFHandler class a - * friend so that it can call the - * update_cell_dof_indices_cache() - * function - */ - template friend class DoFHandler; - friend struct dealii::internal::DoFCellAccessor::Implementation; + public: + /** + * Extract dimension from DH. + */ + static const unsigned int dim = DH::dimension; + + /** + * Extract space dimension from DH. + */ + static const unsigned int spacedim = DH::space_dimension; + + /** + * Declare the data type that + * this accessor class expects to + * get passed from the iterator + * classes. + */ + typedef typename DoFAccessor::AccessorData AccessorData; + + /** + * Declare a typedef to the base + * class to make accessing some + * of the exception classes + * simpler. + */ + typedef DoFAccessor BaseClass; + + /** + * Define the type of the + * container this is part of. + */ + typedef DH Container; + + /** + * @name Constructors + */ + /** + * @{ + */ + + /** + * Constructor + */ + DoFCellAccessor (const Triangulation *tria, + const int level, + const int index, + const AccessorData *local_data); + + /** + * Conversion constructor. This + * constructor exists to make certain + * constructs simpler to write in + * dimension independent code. For + * example, it allows assigning a face + * iterator to a line iterator, an + * operation that is useful in 2d but + * doesn't make any sense in 3d. The + * constructor here exists for the + * purpose of making the code conform to + * C++ but it will unconditionally abort; + * in other words, assigning a face + * iterator to a line iterator is better + * put into an if-statement that checks + * that the dimension is two, and assign + * to a quad iterator in 3d (an operator + * that, without this constructor would + * be illegal if we happen to compile for + * 2d). + */ + template + DoFCellAccessor (const InvalidAccessor &); + + /** + * Another conversion operator + * between objects that don't + * make sense, just like the + * previous one. + */ + template + DoFCellAccessor (const DoFAccessor &); + + /** + * @} + */ + + /** - * Return the parent as a DoF - * cell iterator. This - * function is needed since the - * parent function of the base - * class returns a cell accessor - * without access to the DoF - * data. - */ ++ * Return the parent as a DoF ++ * cell iterator. This ++ * function is needed since the ++ * parent function of the base ++ * class returns a cell accessor ++ * without access to the DoF ++ * data. ++ */ + typename dealii::internal::DoFHandler::Iterators::cell_iterator + parent () const; + + /** + * @name Accessing sub-objects and neighbors + */ + /** + * @{ + */ + + /** + * Return the @p ith neighbor as + * a DoF cell iterator. This + * function is needed since the + * neighbor function of the base + * class returns a cell accessor + * without access to the DoF + * data. + */ + typename dealii::internal::DoFHandler::Iterators::cell_iterator + neighbor (const unsigned int) const; + + /** + * Return the @p ith child as a + * DoF cell iterator. This + * function is needed since the + * child function of the base + * class returns a cell accessor + * without access to the DoF + * data. + */ + typename dealii::internal::DoFHandler::Iterators::cell_iterator + child (const unsigned int) const; + + /** + * Return an iterator to the @p ith face + * of this cell. + * + * This function is not implemented in + * 1D, and maps to DoFAccessor::line + * in 2D. + */ + typename dealii::internal::DoFHandler::Iterators::face_iterator + face (const unsigned int i) const; + + /** + * Return the result of the + * @p neighbor_child_on_subface + * function of the base class, + * but convert it so that one can + * also access the DoF data (the + * function in the base class + * only returns an iterator with + * access to the triangulation + * data). + */ + typename dealii::internal::DoFHandler::Iterators::cell_iterator + neighbor_child_on_subface (const unsigned int face_no, + const unsigned int subface_no) const; + + /** + * @} + */ + + /** + * @name Extracting values from global vectors + */ + /** + * @{ + */ + + /** + * Return the values of the given vector + * restricted to the dofs of this + * cell in the standard ordering: dofs + * on vertex 0, dofs on vertex 1, etc, + * dofs on line 0, dofs on line 1, etc, + * dofs on quad 0, etc. + * + * The vector has to have the + * right size before being passed + * to this function. This + * function is only callable for + * active cells. + * + * The input vector may be either + * a Vector, + * Vector, or a + * BlockVector, or a + * PETSc or Trilinos vector if + * deal.II is compiled to support + * these libraries. It is in the + * responsibility of the caller + * to assure that the types of + * the numbers stored in input + * and output vectors are + * compatible and with similar + * accuracy. + */ + template + void get_dof_values (const InputVector &values, + Vector &local_values) const; + + /** + * Return the values of the given vector + * restricted to the dofs of this + * cell in the standard ordering: dofs + * on vertex 0, dofs on vertex 1, etc, + * dofs on line 0, dofs on line 1, etc, + * dofs on quad 0, etc. + * + * The vector has to have the + * right size before being passed + * to this function. This + * function is only callable for + * active cells. + * + * The input vector may be either + * a Vector, + * Vector, or a + * BlockVector, or a + * PETSc or Trilinos vector if + * deal.II is compiled to support + * these libraries. It is in the + * responsibility of the caller + * to assure that the types of + * the numbers stored in input + * and output vectors are + * compatible and with similar + * accuracy. + */ + template + void get_dof_values (const InputVector &values, + ForwardIterator local_values_begin, + ForwardIterator local_values_end) const; + + /** + * Return the values of the given vector + * restricted to the dofs of this + * cell in the standard ordering: dofs + * on vertex 0, dofs on vertex 1, etc, + * dofs on line 0, dofs on line 1, etc, + * dofs on quad 0, etc. + * + * The vector has to have the + * right size before being passed + * to this function. This + * function is only callable for + * active cells. + * + * The input vector may be either a + * Vector, + * Vector, or a + * BlockVector, or a PETSc or + * Trilinos vector if deal.II is + * compiled to support these + * libraries. It is in the + * responsibility of the caller to + * assure that the types of the numbers + * stored in input and output vectors + * are compatible and with similar + * accuracy. The ConstraintMatrix + * passed as an argument to this + * function makes sure that constraints + * are correctly distributed when the + * dof values are calculated. + */ + template + void get_dof_values (const ConstraintMatrix &constraints, + const InputVector &values, + ForwardIterator local_values_begin, + ForwardIterator local_values_end) const; + + /** + * This function is the counterpart to + * get_dof_values(): it takes a vector + * of values for the degrees of freedom + * of the cell pointed to by this iterator + * and writes these values into the global + * data vector @p values. This function + * is only callable for active cells. + * + * Note that for continuous finite + * elements, calling this function affects + * the dof values on neighboring cells as + * well. It may also violate continuity + * requirements for hanging nodes, if + * neighboring cells are less refined than + * the present one. These requirements + * are not taken care of and must be + * enforced by the user afterwards. + * + * The vector has to have the + * right size before being passed + * to this function. + * + * The output vector may be either a + * Vector, + * Vector, or a + * BlockVector, or a + * PETSc vector if deal.II is compiled to + * support these libraries. It is in the + * responsibility of the caller to assure + * that the types of the numbers stored + * in input and output vectors are + * compatible and with similar accuracy. + */ + template + void set_dof_values (const Vector &local_values, + OutputVector &values) const; + + /** + * Return the interpolation of + * the given finite element + * function to the present + * cell. In the simplest case, + * the cell is a terminal one, + * i.e. has no children; then, + * the returned value is the + * vector of nodal values on that + * cell. You could then as well + * get the desired values through + * the @p get_dof_values + * function. In the other case, + * when the cell has children, we + * use the restriction matrices + * provided by the finite element + * class to compute the + * interpolation from the + * children to the present cell. + * + * It is assumed that both + * vectors already have the right + * size beforehand. + * + * Unlike the get_dof_values() + * function, this function works + * on cells rather than to lines, + * quads, and hexes, since + * interpolation is presently + * only provided for cells by the + * finite element classes. + */ + template + void get_interpolated_dof_values (const InputVector &values, + Vector &interpolated_values) const; + + /** + * This, again, is the + * counterpart to + * get_interpolated_dof_values(): + * you specify the dof values on + * a cell and these are + * interpolated to the children + * of the present cell and set on + * the terminal cells. + * + * In principle, it works as + * follows: if the cell pointed + * to by this object is terminal, + * then the dof values are set in + * the global data vector by + * calling the set_dof_values() + * function; otherwise, the + * values are prolonged to each + * of the children and this + * function is called for each of + * them. + * + * Using the + * get_interpolated_dof_values() + * and this function, you can + * compute the interpolation of a + * finite element function to a + * coarser grid by first getting + * the interpolated solution on a + * cell of the coarse grid and + * afterwards redistributing it + * using this function. + * + * Note that for continuous + * finite elements, calling this + * function affects the dof + * values on neighboring cells as + * well. It may also violate + * continuity requirements for + * hanging nodes, if neighboring + * cells are less refined than + * the present one, or if their + * children are less refined than + * the children of this + * cell. These requirements are + * not taken care of and must be + * enforced by the user + * afterward. + * + * It is assumed that both + * vectors already have the right + * size beforehand. This function + * relies on the existence of a + * natural interpolation property + * of finite element spaces of a + * cell to its children, denoted + * by the prolongation matrices + * of finite element classes. For + * some elements, the spaces on + * coarse and fine grids are not + * nested, in which case the + * interpolation to a child is + * not the identity; refer to the + * documentation of the + * respective finite element + * class for a description of + * what the prolongation matrices + * represent in this case. + * + * Unlike the set_dof_values() + * function, this function is + * associated to cells rather + * than to lines, quads, and + * hexes, since interpolation is + * presently only provided for + * cells by the finite element + * objects. + * + * The output vector may be either a + * Vector, + * Vector, or a + * BlockVector, or a + * PETSc vector if deal.II is compiled to + * support these libraries. It is in the + * responsibility of the caller to assure + * that the types of the numbers stored + * in input and output vectors are + * compatible and with similar accuracy. + */ + template + void set_dof_values_by_interpolation (const Vector &local_values, + OutputVector &values) const; + + /** + * Distribute a local (cell + * based) vector to a global one + * by mapping the local numbering + * of the degrees of freedom to + * the global one and entering + * the local values into the + * global vector. + * + * The elements are + * added up to the + * elements in the global vector, + * rather than just set, since + * this is usually what one + * wants. + */ + template + void + distribute_local_to_global (const Vector &local_source, + OutputVector &global_destination) const; + + /** + * Distribute a local (cell based) + * vector in iterator format to a + * global one by mapping the local + * numbering of the degrees of freedom + * to the global one and entering the + * local values into the global vector. + * + * The elements are added up + * to the elements in the global + * vector, rather than just set, since + * this is usually what one wants. + */ + template + void + distribute_local_to_global (ForwardIterator local_source_begin, + ForwardIterator local_source_end, + OutputVector &global_destination) const; + + /** + * Distribute a local (cell based) + * vector in iterator format to a + * global one by mapping the local + * numbering of the degrees of freedom + * to the global one and entering the + * local values into the global vector. + * + * The elements are added up + * to the elements in the global + * vector, rather than just set, since + * this is usually what one + * wants. Moreover, the + * ConstraintMatrix passed to this + * function makes sure that also + * constraints are eliminated in this + * process. + */ + template + void + distribute_local_to_global (const ConstraintMatrix &constraints, + ForwardIterator local_source_begin, + ForwardIterator local_source_end, + OutputVector &global_destination) const; + + /** + * This function does much the + * same as the + * distribute_local_to_global(Vector,Vector) + * function, but operates on + * matrices instead of + * vectors. If the matrix type is + * a sparse matrix then it is + * supposed to have non-zero + * entry slots where required. + */ + template + void + distribute_local_to_global (const FullMatrix &local_source, + OutputMatrix &global_destination) const; + + /** + * This function does what the two + * distribute_local_to_global + * functions with vector and matrix + * argument do, but all at once. + */ + template + void + distribute_local_to_global (const FullMatrix &local_matrix, + const Vector &local_vector, + OutputMatrix &global_matrix, + OutputVector &global_vector) const; + + /** + * @} + */ + + /** + * @name Accessing the DoF indices of this object + */ + /** + * @{ + */ + + /** + * Return the indices of the dofs of this + * quad in the standard ordering: dofs + * on vertex 0, dofs on vertex 1, etc, + * dofs on line 0, dofs on line 1, etc, + * dofs on quad 0, etc. + * + * It is assumed that the vector already + * has the right size beforehand. + * + * This function reimplements the + * same function in the base + * class. The functions in the + * base classes are available for + * all geometric objects, + * i.e. even in 3d they can be + * used to access the dof indices + * of edges, for example. On the + * other hand, the most common + * case is clearly the use on + * cells, which is why we cache + * the array for each cell, but + * not edge. To retrieve the + * cached values, rather than + * collect the necessary + * information every time, this + * function overwrites the one in + * the base class. + * + * This function is most often + * used on active objects (edges, + * faces, cells). It can be used + * on non-active objects as well + * (i.e. objects that have + * children), but only if the + * finite element under + * consideration has degrees of + * freedom exclusively on + * vertices. Otherwise, the + * function doesn't make much + * sense, since for example + * inactive edges do not have + * degrees of freedom associated + * with them at all. + */ + void get_dof_indices (std::vector &dof_indices) const; + ++ void get_mg_dof_indices (std::vector &dof_indices) const; ++ + /** + * @} + */ + + /** + * @name Accessing the finite element associated with this object + */ + /** + * @{ + */ + + /** + * Return the finite element that + * is used on the cell pointed to + * by this iterator. For non-hp + * DoF handlers, this is of + * course always the same + * element, independent of the + * cell we are presently on, but + * for hp DoF handlers, this may + * change from cell to cell. + */ + const FiniteElement & + get_fe () const; + + /** + * Returns the index inside the + * hp::FECollection of the FiniteElement + * used for this cell. + */ + unsigned int active_fe_index () const; + + /** + * Sets the index of the FiniteElement used for + * this cell. + */ + void set_active_fe_index (const unsigned int i); + /** + * @} + */ + + /** + * Set the DoF indices of this + * cell to the given values. This + * function bypasses the DoF + * cache, if one exists for the + * given DoF handler class. + */ + void set_dof_indices (const std::vector &dof_indices); + + /** + * Update the cache in which we + * store the dof indices of this + * cell. + */ + void update_cell_dof_indices_cache () const; + + private: + /** + * Copy operator. This is normally used + * in a context like iterator a,b; + * *a=*b;. Presumably, the intent + * here is to copy the object pointed to + * by @p b to the object pointed to by + * @p a. However, the result of + * dereferencing an iterator is not an + * object but an accessor; consequently, + * this operation is not useful for + * iterators on triangulations. We + * declare this function here private, + * thus it may not be used from outside. + * Furthermore it is not implemented and + * will give a linker error if used + * anyway. + */ + DoFCellAccessor & + operator = (const DoFCellAccessor &da); + + /** + * Make the DoFHandler class a + * friend so that it can call the + * update_cell_dof_indices_cache() + * function + */ + template friend class DoFHandler; + friend struct dealii::internal::DoFCellAccessor::Implementation; }; diff --cc deal.II/include/deal.II/dofs/dof_accessor.templates.h index ea84b5dbec,e098bf08df..6138ef2982 --- a/deal.II/include/deal.II/dofs/dof_accessor.templates.h +++ b/deal.II/include/deal.II/dofs/dof_accessor.templates.h @@@ -1452,12 -1451,6 +1451,13 @@@ DoFAccessor::dof_index (const u } +template +inline - unsigned int DoFAccessor::mg_dof_index (const int level, const unsigned int i) const { ++unsigned int DoFAccessor::mg_dof_index (const int level, const unsigned int i) const ++{ + return this->dof_handler->template get_dof_index (level, this->present_index, 0, i); +} + template inline @@@ -1544,17 -1537,6 +1544,18 @@@ DoFAccessor::vertex_dof_ } +template +inline +unsigned - DoFAccessor::mg_vertex_dof_index (const int level, const unsigned int vertex, const unsigned int i, const unsigned int fe_index) const { ++DoFAccessor::mg_vertex_dof_index (const int level, const unsigned int vertex, const unsigned int i, const unsigned int fe_index) const ++{ + Assert (this->dof_handler != 0, ExcInvalidObject ()); + Assert (&this->dof_handler->get_fe () != 0, ExcInvalidObject ()); + Assert (vertex < GeometryInfo::vertices_per_cell, ExcIndexRange (vertex, 0, GeometryInfo::vertices_per_cell)); + Assert (i < this->dof_handler->get_fe ()[fe_index].dofs_per_vertex, ExcIndexRange (i, 0, this->dof_handler->get_fe ()[fe_index].dofs_per_vertex)); + return this->dof_handler->mg_vertex_dofs[this->vertex_index (vertex)].get_index (level, i); +} + template inline @@@ -1573,24 -1555,8 +1574,26 @@@ DoFAccessor::set_vertex_ } +template +inline +void - DoFAccessor::set_mg_vertex_dof_index (const int level, const unsigned int vertex, const unsigned int i, const unsigned int index, const unsigned int fe_index) const { ++DoFAccessor::set_mg_vertex_dof_index (const int level, const unsigned int vertex, const unsigned int i, const unsigned int index, const unsigned int fe_index) const ++{ + Assert (this->dof_handler != 0, ExcInvalidObject ()); + Assert (&this->dof_handler->get_fe () != 0, ExcInvalidObject ()); + Assert (vertex < GeometryInfo::vertices_per_cell, ExcIndexRange (vertex, 0, GeometryInfo::vertices_per_cell)); + Assert (i < this->dof_handler->get_fe ()[fe_index].dofs_per_vertex, ExcIndexRange (i, 0, this->dof_handler->get_fe ()[fe_index].dofs_per_vertex)); + this->dof_handler->mg_vertex_dofs[this->vertex_index (vertex)].set_index (level, i, index); +} +template +inline +void - DoFAccessor::set_mg_dof_index (const int level, const unsigned int i, const unsigned int index) const { ++DoFAccessor::set_mg_dof_index (const int level, const unsigned int i, const unsigned int index) const ++{ + this->dof_handler->template set_dof_index (level, this->present_index, 0, i, index); +} namespace internal @@@ -1691,114 -1657,57 +1694,117 @@@ namespace interna template void get_dof_indices (const dealii::DoFAccessor<3,DH> &accessor, - std::vector &dof_indices, - const unsigned int fe_index) + std::vector &dof_indices, + const unsigned int fe_index) { const unsigned int dofs_per_vertex = accessor.get_fe(fe_index).dofs_per_vertex, - dofs_per_line = accessor.get_fe(fe_index).dofs_per_line, - dofs_per_quad = accessor.get_fe(fe_index).dofs_per_quad, - dofs_per_hex = accessor.get_fe(fe_index).dofs_per_hex; + dofs_per_line = accessor.get_fe(fe_index).dofs_per_line, + dofs_per_quad = accessor.get_fe(fe_index).dofs_per_quad, + dofs_per_hex = accessor.get_fe(fe_index).dofs_per_hex; std::vector::iterator next = dof_indices.begin(); for (unsigned int vertex=0; vertex<8; ++vertex) - for (unsigned int d=0; ddof_index(accessor.get_fe(fe_index). - adjust_line_dof_index_for_line_orientation(d, - accessor.line_orientation(line)),fe_index); - // now copy dof numbers from the face. for - // faces with the wrong orientation, we - // have already made sure that we're ok by - // picking the correct lines and vertices - // (this happens automatically in the - // line() and vertex() functions). however, - // if the face is in wrong orientation, we - // look at it in flipped orientation and we - // will have to adjust the shape function - // indices that we see to correspond to the - // correct (cell-local) ordering. The same - // applies, if the face_rotation or - // face_orientation is non-standard + for (unsigned int d=0; ddof_index(accessor.get_fe(fe_index). + adjust_line_dof_index_for_line_orientation(d, + accessor.line_orientation(line)),fe_index); + // now copy dof numbers from the face. for + // faces with the wrong orientation, we + // have already made sure that we're ok by + // picking the correct lines and vertices + // (this happens automatically in the + // line() and vertex() functions). however, + // if the face is in wrong orientation, we + // look at it in flipped orientation and we + // will have to adjust the shape function + // indices that we see to correspond to the + // correct (cell-local) ordering. The same + // applies, if the face_rotation or + // face_orientation is non-standard for (unsigned int quad=0; quad<6; ++quad) - for (unsigned int d=0; ddof_index(accessor.get_fe(fe_index). - adjust_quad_dof_index_for_face_orientation(d, - accessor.face_orientation(quad), - accessor.face_flip(quad), - accessor.face_rotation(quad)), - fe_index); + for (unsigned int d=0; ddof_index(accessor.get_fe(fe_index). + adjust_quad_dof_index_for_face_orientation(d, + accessor.face_orientation(quad), + accessor.face_flip(quad), + accessor.face_rotation(quad)), + fe_index); for (unsigned int d=0; d - void get_mg_dof_indices (const dealii::DoFAccessor<1, DH>& accessor, const int level, std::vector& dof_indices, const unsigned int fe_index) { - const FiniteElement& fe = accessor.get_dof_handler ().get_fe ()[fe_index]; ++ void get_mg_dof_indices (const dealii::DoFAccessor<1, DH> &accessor, const int level, std::vector &dof_indices, const unsigned int fe_index) ++ { ++ const FiniteElement &fe = accessor.get_dof_handler ().get_fe ()[fe_index]; + std::vector::iterator next = dof_indices.begin (); + + for (unsigned int vertex = 0; vertex < GeometryInfo<1>::vertices_per_cell; ++vertex) + for (unsigned int dof = 0; dof < fe.dofs_per_vertex; ++dof) + *next++ = accessor.mg_vertex_dof_index (level, vertex, dof); + + for (unsigned int dof = 0; dof < fe.dofs_per_line; ++dof) + *next++ = accessor.mg_dof_index (level, dof); + + Assert (next == dof_indices.end (), ExcInternalError ()); + } + + template - void get_mg_dof_indices (const dealii::DoFAccessor<2, DH>& accessor, const int level, std::vector& dof_indices, const unsigned int fe_index) { - const FiniteElement& fe = accessor.get_dof_handler ().get_fe ()[fe_index]; ++ void get_mg_dof_indices (const dealii::DoFAccessor<2, DH> &accessor, const int level, std::vector &dof_indices, const unsigned int fe_index) ++ { ++ const FiniteElement &fe = accessor.get_dof_handler ().get_fe ()[fe_index]; + std::vector::iterator next = dof_indices.begin (); + + for (unsigned int vertex = 0; vertex < GeometryInfo<2>::vertices_per_cell; ++vertex) + for (unsigned int dof = 0; dof < fe.dofs_per_vertex; ++dof) + *next++ = accessor.mg_vertex_dof_index (level, vertex, dof); + + for (unsigned int line = 0; line < GeometryInfo<2>::lines_per_cell; ++line) + for (unsigned int dof = 0; dof < fe.dofs_per_line; ++dof) + *next++ = accessor.line (line)->mg_dof_index (level, dof); + + for (unsigned int dof = 0; dof < fe.dofs_per_quad; ++dof) + *next++ = accessor.mg_dof_index (level, dof); + + Assert (next == dof_indices.end (), ExcInternalError ()); + } + + template - void get_mg_dof_indices (const dealii::DoFAccessor<3, DH>& accessor, const int level, std::vector& dof_indices, const unsigned int fe_index) { - const FiniteElement& fe = accessor.get_dof_handler ().get_fe ()[fe_index]; ++ void get_mg_dof_indices (const dealii::DoFAccessor<3, DH> &accessor, const int level, std::vector &dof_indices, const unsigned int fe_index) ++ { ++ const FiniteElement &fe = accessor.get_dof_handler ().get_fe ()[fe_index]; + std::vector::iterator next = dof_indices.begin (); + + for (unsigned int vertex = 0; vertex < GeometryInfo<3>::vertices_per_cell; ++vertex) + for (unsigned int dof = 0; dof < fe.dofs_per_vertex; ++dof) + *next++ = accessor.mg_vertex_dof_index (level, vertex, dof); + + for (unsigned int line = 0; line < GeometryInfo<3>::lines_per_cell; ++line) + for (unsigned int dof = 0; dof < fe.dofs_per_line; ++dof) + *next++ = accessor.line (line)->mg_dof_index (level, dof); + + for (unsigned int quad = 0; quad < GeometryInfo<3>::quads_per_cell; ++quad) + for (unsigned int dof = 0; dof < fe.dofs_per_quad; ++dof) + *next++ = accessor.quad (quad)->mg_dof_index (level, dof); + + for (unsigned int dof = 0; dof < fe.dofs_per_hex; ++dof) + *next++ = accessor.mg_dof_index (level, dof); + + Assert (next == dof_indices.end (), ExcInternalError ()); + } } } @@@ -1867,34 -1776,9 +1873,39 @@@ DoFAccessor::get_dof_indi dealii::internal::DoFAccessor::get_dof_indices (*this, dof_indices, fe_index); } +template +inline - void DoFAccessor::get_mg_dof_indices (const int level, std::vector& dof_indices, const unsigned int fe_index) const { ++void DoFAccessor::get_mg_dof_indices (const int level, std::vector &dof_indices, const unsigned int fe_index) const ++{ + Assert (this->dof_handler != 0, ExcInvalidObject ()); + Assert (&this->dof_handler->get_fe () != 0, ExcInvalidObject ()); - switch (structdim) { - case 1: { - Assert (dof_indices.size () == 2 * this->dof_handler->get_fe ()[fe_index].dofs_per_vertex + this->dof_handler->get_fe ()[fe_index].dofs_per_line, ExcVectorDoesNotMatch ()); - break; ++ switch (structdim) ++ { ++ case 1: ++ { ++ Assert (dof_indices.size () == 2 * this->dof_handler->get_fe ()[fe_index].dofs_per_vertex + this->dof_handler->get_fe ()[fe_index].dofs_per_line, ExcVectorDoesNotMatch ()); ++ break; + } - case 2: { - Assert (dof_indices.size () == 4 * (this->dof_handler->get_fe ()[fe_index].dofs_per_vertex + this->dof_handler->get_fe ()[fe_index].dofs_per_line) + this->dof_handler->get_fe ()[fe_index].dofs_per_quad, ExcVectorDoesNotMatch ()); - break; ++ case 2: ++ { ++ Assert (dof_indices.size () == 4 * (this->dof_handler->get_fe ()[fe_index].dofs_per_vertex + this->dof_handler->get_fe ()[fe_index].dofs_per_line) + this->dof_handler->get_fe ()[fe_index].dofs_per_quad, ExcVectorDoesNotMatch ()); ++ break; + } - case 3: { - Assert (dof_indices.size () == 8 * this->dof_handler->get_fe ()[fe_index].dofs_per_vertex + 12 * this->dof_handler->get_fe ()[fe_index].dofs_per_line + 6 * this->dof_handler->get_fe ()[fe_index].dofs_per_quad + this->dof_handler->get_fe ()[fe_index].dofs_per_hex, ExcVectorDoesNotMatch ()); - break; ++ case 3: ++ { ++ Assert (dof_indices.size () == 8 * this->dof_handler->get_fe ()[fe_index].dofs_per_vertex + 12 * this->dof_handler->get_fe ()[fe_index].dofs_per_line + 6 * this->dof_handler->get_fe ()[fe_index].dofs_per_quad + this->dof_handler->get_fe ()[fe_index].dofs_per_hex, ExcVectorDoesNotMatch ()); ++ break; + } + + default: - Assert (false, ExcNotImplemented ()); - } ++ Assert (false, ExcNotImplemented ()); ++ } + + internal::DoFAccessor::get_mg_dof_indices (*this, level, dof_indices, fe_index); +} template inline @@@ -3336,12 -3220,6 +3347,13 @@@ get_dof_indices (std::vector +inline - void DoFCellAccessor::get_mg_dof_indices (std::vector& dof_indices) const { ++void DoFCellAccessor::get_mg_dof_indices (std::vector &dof_indices) const ++{ + DoFAccessor::get_mg_dof_indices (this->level (), dof_indices); +} + template template diff --cc deal.II/include/deal.II/dofs/dof_handler.h index 71cf88d10b,707eaedf8e..39335c7abc --- a/deal.II/include/deal.II/dofs/dof_handler.h +++ b/deal.II/include/deal.II/dofs/dof_handler.h @@@ -170,788 -170,748 +170,789 @@@ namespace interna template class DoFHandler : public Subscriptor { - typedef dealii::internal::DoFHandler::Iterators > IteratorSelector; - public: - typedef typename IteratorSelector::CellAccessor cell_accessor; - typedef typename IteratorSelector::FaceAccessor face_accessor; - - typedef typename IteratorSelector::line_iterator line_iterator; - typedef typename IteratorSelector::active_line_iterator active_line_iterator; - - typedef typename IteratorSelector::quad_iterator quad_iterator; - typedef typename IteratorSelector::active_quad_iterator active_quad_iterator; - - typedef typename IteratorSelector::hex_iterator hex_iterator; - typedef typename IteratorSelector::active_hex_iterator active_hex_iterator; - - typedef typename IteratorSelector::cell_iterator cell_iterator; - typedef typename IteratorSelector::active_cell_iterator active_cell_iterator; - - typedef typename IteratorSelector::face_iterator face_iterator; - typedef typename IteratorSelector::active_face_iterator active_face_iterator; - - /** - * Alias the @p FunctionMap type - * declared elsewhere. - */ - typedef typename dealii::FunctionMap::type FunctionMap; - - /** - * Make the dimension available - * in function templates. - */ - static const unsigned int dimension = dim; - - /** - * Make the space dimension available - * in function templates. - */ - static const unsigned int space_dimension = spacedim; - - /** - * When the arrays holding the - * DoF indices are set up, but - * before they are filled with - * actual values, they are set to - * an invalid value, in order to - * monitor possible - * problems. This invalid value - * is the constant defined here. - * - * Please note that you should - * not rely on it having a - * certain value, but rather take - * its symbolic name. - */ - static const unsigned int invalid_dof_index = numbers::invalid_unsigned_int; - - /** - * The default index of the - * finite element to be used on a - * given cell. Since the present - * class only supports the same - * finite element to be used on - * all cells, the index of the - * finite element needs to be the - * same on all cells anyway, and - * by convention we pick zero for - * this value. The situation is - * different for hp objects - * (i.e. the hp::DoFHandler - * class) where different finite - * element indices may be used on - * different cells, and the - * default index there - * corresponds to an invalid - * value. - */ - static const unsigned int default_fe_index = 0; - - /** - * Standard constructor, not - * initializing any data. After - * constructing an object with - * this constructor, use - * initialize() to make a valid - * DoFHandler. - */ - DoFHandler (); - - /** - * Constructor. Take @p tria as the - * triangulation to work on. - */ - DoFHandler ( const Triangulation &tria); - - /** - * Destructor. - */ - virtual ~DoFHandler (); - - /** - * Assign a Triangulation and a - * FiniteElement to the - * DoFHandler and compute the - * distribution of degrees of - * freedom over the mesh. - */ - void initialize(const Triangulation& tria, - const FiniteElement& fe); - - /** - * Go through the triangulation and - * distribute the degrees of freedoms - * needed for the given finite element - * according to the given distribution - * method. The purpose of this function - * is first discussed in the introduction - * to the step-2 tutorial program. - * - * A pointer of the transferred - * finite element is - * stored. Therefore, the - * lifetime of the finite element - * object shall be longer than - * that of this object. If you - * don't want this behaviour, you - * may want to call the @p clear - * member function which also - * releases the lock of this - * object to the finite element. - */ - virtual void distribute_dofs (const FiniteElement &fe); - - virtual void distribute_mg_dofs (const FiniteElement& fe); - - /** - * After distribute_dofs() with - * an FESystem element, the block - * structure of global and level - * vectors is stored in a - * BlockInfo object accessible - * with block_info(). This - * function initializes the local - * block structure on each cell - * in the same object. - */ - void initialize_local_block_info(); - - /** - * Clear all data of this object and - * especially delete the lock this object - * has to the finite element used the last - * time when @p distribute_dofs was called. - */ - virtual void clear (); - - /** - * Renumber degrees of freedom based on - * a list of new dof numbers for all the - * dofs. - * - * This function is called by - * the functions in - * DoFRenumbering function - * after computing the ordering - * of the degrees of freedom. - * This function is called, for - * example, by the functions in - * the DoFRenumbering - * namespace, but it can of - * course also be called from - * user code. - * - * @arg new_number This array - * must have a size equal to - * the number of degrees of - * freedom owned by the current - * processor, i.e. the size - * must be equal to what - * n_locally_owned_dofs() - * returns. If only one - * processor participates in - * storing the current mesh, - * then this equals the total - * number of degrees of - * freedom, i.e. the result of - * n_dofs(). The contents of - * this array are the new - * global indices for each - * freedom listed in the - * IndexSet returned by - * locally_owned_dofs(). In the - * case of a sequential mesh - * this means that the array is - * a list of new indices for - * each of the degrees of - * freedom on the current - * mesh. In the case that we - * have a - * parallel::distributed::Triangulation - * underlying this DoFHandler - * object, the array is a list - * of new indices for all the - * locally owned degrees of - * freedom, enumerated in the - * same order as the currently - * locally owned DoFs. In other - * words, assume that degree of - * freedom i is - * currently locally owned, - * then - * new_numbers[locally_owned_dofs().index_within_set(i)] - * returns the new global DoF - * index of - * i. Since the - * IndexSet of - * locally_owned_dofs() is - * complete in the sequential - * case, the latter convention - * for the content of the array - * reduces to the former in the - * case that only one processor - * participates in the mesh. - */ - void renumber_dofs (const std::vector &new_numbers); - - /** - * @deprecated Use - * CompressedSparsityPattern instead of - * initializing SparsityPattern with this - * value, see the discussion in step-2 - * and the @ref Sparsity module. - * - * Return the maximum number of - * degrees of freedom a degree of freedom - * in the given triangulation with the - * given finite element may couple with. - * This is the maximum number of entries - * per line in the system matrix; this - * information can therefore be used upon - * construction of the SparsityPattern - * object. - * - * The returned number is not really the - * maximum number but an estimate based - * on the finite element and the maximum - * number of cells meeting at a vertex. - * The number holds for the constrained - * matrix as well. - * - * The determination of the number of - * couplings can be done by simple - * picture drawing. An example can be - * found in the implementation of this - * function. - * - * Note that this function is most often - * used to determine the maximal row - * length for sparsity - * patterns. Unfortunately, while the - * estimates returned by this function - * are rather accurate in 1d and 2d, they - * are often significantly too high in - * 3d, leading the SparsityPattern class - * to allocate much too much memory in - * some cases. Unless someone comes - * around to improving the present - * function for 3d, there is not very - * much one can do about these cases. The - * typical way to work around this - * problem is to use an intermediate - * compressed sparsity pattern that only - * allocates memory on demand. Refer to - * the step-2 and step-11 example - * programs on how to do this. The problem - * is also discussed in the documentation - * of the module on @ref Sparsity. - */ - unsigned int max_couplings_between_dofs () const; - - /** - * @deprecated Use - * CompressedSparsityPattern - * instead of initializing - * SparsityPattern with this - * value. - * - * Return the number of degrees of freedom - * located on the boundary another dof on - * the boundary can couple with. - * - * The number is the same as for - * max_couplings_between_dofs() in one - * dimension less. - */ - unsigned int max_couplings_between_boundary_dofs () const; - - /*--------------------------------------*/ - - /** - * @name Cell iterator functions - */ - /*@{*/ - /** - * Iterator to the first used - * cell on level @p level. - */ - cell_iterator begin (const unsigned int level = 0) const; - - /** - * Iterator to the first active - * cell on level @p level. - */ - active_cell_iterator begin_active(const unsigned int level = 0) const; - - /** - * Iterator past the end; this - * iterator serves for - * comparisons of iterators with - * past-the-end or - * before-the-beginning states. - */ - cell_iterator end () const; - - /** - * Return an iterator which is - * the first iterator not on - * level. If @p level is the - * last level, then this returns - * end(). - */ - cell_iterator end (const unsigned int level) const; - - /** - * Return an active iterator - * which is the first iterator - * not on level. If @p level is - * the last level, then this - * returns end(). - */ - active_cell_iterator end_active (const unsigned int level) const; - - //@} - - /*---------------------------------------*/ - - - /** - * Return the global number of - * degrees of freedom. If the - * current object handles all - * degrees of freedom itself - * (even if you may intend to - * solve your linear system in - * parallel, such as in step-17 - * or step-18), then this number - * equals the number of locally - * owned degrees of freedom since - * this object doesn't know - * anything about what you want - * to do with it and believes - * that it owns every degree of - * freedom it knows about. - * - * On the other hand, if this - * object operates on a - * parallel::distributed::Triangulation - * object, then this function - * returns the global number of - * degrees of freedom, - * accumulated over all - * processors. - * - * In either case, included in - * the returned number are those - * DoFs which are constrained by - * hanging nodes, see @ref constraints. - */ - unsigned int n_dofs () const; - - unsigned int n_dofs (const unsigned int level) const; - - /** - * Return the number of degrees of freedom - * located on the boundary. - */ - unsigned int n_boundary_dofs () const; - - /** - * Return the number of degrees - * of freedom located on those - * parts of the boundary which - * have a boundary indicator - * listed in the given set. The - * reason that a @p map rather - * than a @p set is used is the - * same as described in the - * section on the - * @p make_boundary_sparsity_pattern - * function. - */ - unsigned int - n_boundary_dofs (const FunctionMap &boundary_indicators) const; - - /** - * Same function, but with - * different data type of the - * argument, which is here simply - * a list of the boundary - * indicators under - * consideration. - */ - unsigned int - n_boundary_dofs (const std::set &boundary_indicators) const; - - /** - * Access to an object informing - * of the block structure of the - * dof handler. - * - * If an FESystem is used in - * distribute_dofs(), degrees of - * freedom naturally split into - * several @ref GlossBlock - * "blocks". For each base element - * as many blocks appear as its - * multiplicity. - * - * At the end of - * distribute_dofs(), the number - * of degrees of freedom in each - * block is counted, and stored - * in a BlockInfo object, which - * can be accessed here. In an - * MGDoFHandler, the same is done - * on each level. Additionally, - * the block structure on each - * cell can be generated in this - * object by calling - * initialize_local_block_info(). - */ - const BlockInfo& block_info() const; - - - /** - * Return the number of - * degrees of freedom that - * belong to this - * process. - * - * If this is a sequential job, - * then the result equals that - * produced by n_dofs(). On the - * other hand, if we are - * operating on a - * parallel::distributed::Triangulation, - * then it includes only the - * degrees of freedom that the - * current processor owns. Note - * that in this case this does - * not include all degrees of - * freedom that have been - * distributed on the current - * processor's image of the mesh: - * in particular, some of the - * degrees of freedom on the - * interface between the cells - * owned by this processor and - * cells owned by other - * processors may be theirs, and - * degrees of freedom on ghost - * cells are also not necessarily - * included. - */ - unsigned int n_locally_owned_dofs() const; - - /** - * Return an IndexSet describing - * the set of locally owned DoFs - * as a subset of - * 0..n_dofs(). The number of - * elements of this set equals - * n_locally_owned_dofs(). - */ - const IndexSet & locally_owned_dofs() const; - - - /** - * Returns a vector that - * stores the locally owned - * DoFs of each processor. If - * you are only interested in - * the number of elements - * each processor owns then - * n_locally_owned_dofs_per_processor() is - * a better choice. - * - * If this is a sequential job, - * then the vector has a single - * element that equals the - * IndexSet representing the - * entire range [0,n_dofs()]. - */ - const std::vector & - locally_owned_dofs_per_processor () const; - - /** - * Return a vector that - * stores the number of - * degrees of freedom each - * processor that - * participates in this - * triangulation owns - * locally. The sum of all - * these numbers equals the - * number of degrees of - * freedom that exist - * globally, i.e. what - * n_dofs() returns. - * - * Each element of the vector - * returned by this function - * equals the number of - * elements of the - * corresponding sets - * returned by - * global_dof_indices(). - * - * If this is a sequential job, - * then the vector has a single - * element equal to n_dofs(). - */ - const std::vector & - n_locally_owned_dofs_per_processor () const; - - /** - * Return a constant reference to - * the selected finite element - * object. - */ - const FiniteElement & get_fe () const; - - /** - * Return a constant reference to - * the triangulation underlying - * this object. - */ - const Triangulation & get_tria () const; - - /** - * Determine an estimate for the - * memory consumption (in bytes) - * of this object. - * - * This function is made virtual, - * since a dof handler object - * might be accessed through a - * pointers to this base class, - * although the actual object - * might be a derived class. - */ - virtual std::size_t memory_consumption () const; - - /** - * Write the data of this object to a - * stream for the purpose of - * serialization. - */ - template - void save (Archive & ar, const unsigned int version) const; - - /** - * Read the data of this object from a - * stream for the purpose of - * serialization. - */ - template - void load (Archive & ar, const unsigned int version); - - BOOST_SERIALIZATION_SPLIT_MEMBER() - - /** - * We are trying to renumber the - * degrees of freedom, but - * somehow did not count - * correctly. - * - * @ingroup Exceptions - */ - DeclException0 (ExcRenumberingIncomplete); - /** - * Exception - * @ingroup Exceptions - */ - DeclException0 (ExcGridsDoNotMatch); - /** - * Exception - * @ingroup Exceptions - */ - DeclException0 (ExcInvalidBoundaryIndicator); - /** - * Exception - * @ingroup Exceptions - */ - DeclException1 (ExcNewNumbersNotConsecutive, - int, - << "The given list of new dof indices is not consecutive: " - << "the index " << arg1 << " does not exist."); - /** - * Exception - * @ingroup Exceptions - */ - DeclException1 (ExcInvalidLevel, - int, - << "The given level " << arg1 - << " is not in the valid range!"); - /** - * Exception - * @ingroup Exceptions - */ - DeclException0 (ExcFacesHaveNoLevel); - /** - * The triangulation level you - * accessed is empty. - * @ingroup Exceptions - */ - DeclException1 (ExcEmptyLevel, - int, - << "You tried to do something on level " << arg1 - << ", but this level is empty."); - - - protected: - /** - * The object containing - * information on the block structure. - */ - BlockInfo block_info_object; - - /** - * Array to store the indices for - * degrees of freedom located at - * vertices. - */ - std::vector vertex_dofs; - - - - /** - * Address of the triangulation to - * work on. - */ - SmartPointer,DoFHandler > - tria; - - /** - * Store a pointer to the finite element - * given latest for the distribution of - * dofs. In order to avoid destruction of - * the object before the lifetime of - * the DoF handler, we subscribe to - * the finite element object. To unlock - * the FE before the end of the lifetime - * of this DoF handler, use the clear() - * function (this clears all data of - * this object as well, though). - */ - SmartPointer,DoFHandler > - selected_fe; - - /** - * An object that describes how degrees - * of freedom should be distributed and - * renumbered. - */ - std_cxx1x::shared_ptr > policy; - - /** - * A structure that contains all - * sorts of numbers that - * characterize the degrees of - * freedom this object works on. - * - * For most members of this - * structure, there is an - * accessor function in this - * class that returns its value. - */ - dealii::internal::DoFHandler::NumberCache number_cache; - + typedef dealii::internal::DoFHandler::Iterators > IteratorSelector; + public: + typedef typename IteratorSelector::CellAccessor cell_accessor; + typedef typename IteratorSelector::FaceAccessor face_accessor; + + typedef typename IteratorSelector::line_iterator line_iterator; + typedef typename IteratorSelector::active_line_iterator active_line_iterator; + + typedef typename IteratorSelector::quad_iterator quad_iterator; + typedef typename IteratorSelector::active_quad_iterator active_quad_iterator; + + typedef typename IteratorSelector::hex_iterator hex_iterator; + typedef typename IteratorSelector::active_hex_iterator active_hex_iterator; + + typedef typename IteratorSelector::cell_iterator cell_iterator; + typedef typename IteratorSelector::active_cell_iterator active_cell_iterator; + + typedef typename IteratorSelector::face_iterator face_iterator; + typedef typename IteratorSelector::active_face_iterator active_face_iterator; + + /** + * Alias the @p FunctionMap type + * declared elsewhere. + */ + typedef typename dealii::FunctionMap::type FunctionMap; + + /** + * Make the dimension available + * in function templates. + */ + static const unsigned int dimension = dim; + + /** + * Make the space dimension available + * in function templates. + */ + static const unsigned int space_dimension = spacedim; + + /** + * When the arrays holding the + * DoF indices are set up, but + * before they are filled with + * actual values, they are set to + * an invalid value, in order to + * monitor possible + * problems. This invalid value + * is the constant defined here. + * + * Please note that you should + * not rely on it having a + * certain value, but rather take + * its symbolic name. + */ + static const unsigned int invalid_dof_index = numbers::invalid_unsigned_int; + + /** + * The default index of the + * finite element to be used on a + * given cell. Since the present + * class only supports the same + * finite element to be used on + * all cells, the index of the + * finite element needs to be the + * same on all cells anyway, and + * by convention we pick zero for + * this value. The situation is + * different for hp objects + * (i.e. the hp::DoFHandler + * class) where different finite + * element indices may be used on + * different cells, and the + * default index there + * corresponds to an invalid + * value. + */ + static const unsigned int default_fe_index = 0; + + /** + * Standard constructor, not + * initializing any data. After + * constructing an object with + * this constructor, use + * initialize() to make a valid + * DoFHandler. + */ + DoFHandler (); + + /** + * Constructor. Take @p tria as the + * triangulation to work on. + */ + DoFHandler ( const Triangulation &tria); + + /** + * Destructor. + */ + virtual ~DoFHandler (); + + /** + * Assign a Triangulation and a + * FiniteElement to the + * DoFHandler and compute the + * distribution of degrees of + * freedom over the mesh. + */ + void initialize(const Triangulation &tria, + const FiniteElement &fe); + + /** + * Go through the triangulation and + * distribute the degrees of freedoms + * needed for the given finite element + * according to the given distribution + * method. The purpose of this function + * is first discussed in the introduction + * to the step-2 tutorial program. + * + * A pointer of the transferred + * finite element is + * stored. Therefore, the + * lifetime of the finite element + * object shall be longer than + * that of this object. If you + * don't want this behaviour, you + * may want to call the @p clear + * member function which also + * releases the lock of this + * object to the finite element. + */ + virtual void distribute_dofs (const FiniteElement &fe); + ++ virtual void distribute_mg_dofs (const FiniteElement &fe); ++ + /** + * After distribute_dofs() with + * an FESystem element, the block + * structure of global and level + * vectors is stored in a + * BlockInfo object accessible + * with block_info(). This + * function initializes the local + * block structure on each cell + * in the same object. + */ + void initialize_local_block_info(); + + /** + * Clear all data of this object and + * especially delete the lock this object + * has to the finite element used the last + * time when @p distribute_dofs was called. + */ + virtual void clear (); + + /** + * Renumber degrees of freedom based on + * a list of new dof numbers for all the + * dofs. + * + * This function is called by + * the functions in + * DoFRenumbering function + * after computing the ordering + * of the degrees of freedom. + * This function is called, for + * example, by the functions in + * the DoFRenumbering + * namespace, but it can of + * course also be called from + * user code. + * + * @arg new_number This array + * must have a size equal to + * the number of degrees of + * freedom owned by the current + * processor, i.e. the size + * must be equal to what + * n_locally_owned_dofs() + * returns. If only one + * processor participates in + * storing the current mesh, + * then this equals the total + * number of degrees of + * freedom, i.e. the result of + * n_dofs(). The contents of + * this array are the new + * global indices for each + * freedom listed in the + * IndexSet returned by + * locally_owned_dofs(). In the + * case of a sequential mesh + * this means that the array is + * a list of new indices for + * each of the degrees of + * freedom on the current + * mesh. In the case that we + * have a + * parallel::distributed::Triangulation + * underlying this DoFHandler + * object, the array is a list + * of new indices for all the + * locally owned degrees of + * freedom, enumerated in the + * same order as the currently + * locally owned DoFs. In other + * words, assume that degree of + * freedom i is + * currently locally owned, + * then + * new_numbers[locally_owned_dofs().index_within_set(i)] + * returns the new global DoF + * index of + * i. Since the + * IndexSet of + * locally_owned_dofs() is + * complete in the sequential + * case, the latter convention + * for the content of the array + * reduces to the former in the + * case that only one processor + * participates in the mesh. + */ + void renumber_dofs (const std::vector &new_numbers); + + /** + * @deprecated Use + * CompressedSparsityPattern instead of + * initializing SparsityPattern with this + * value, see the discussion in step-2 + * and the @ref Sparsity module. + * + * Return the maximum number of + * degrees of freedom a degree of freedom + * in the given triangulation with the + * given finite element may couple with. + * This is the maximum number of entries + * per line in the system matrix; this + * information can therefore be used upon + * construction of the SparsityPattern + * object. + * + * The returned number is not really the + * maximum number but an estimate based + * on the finite element and the maximum + * number of cells meeting at a vertex. + * The number holds for the constrained + * matrix as well. + * + * The determination of the number of + * couplings can be done by simple + * picture drawing. An example can be + * found in the implementation of this + * function. + * + * Note that this function is most often + * used to determine the maximal row + * length for sparsity + * patterns. Unfortunately, while the + * estimates returned by this function + * are rather accurate in 1d and 2d, they + * are often significantly too high in + * 3d, leading the SparsityPattern class + * to allocate much too much memory in + * some cases. Unless someone comes + * around to improving the present + * function for 3d, there is not very + * much one can do about these cases. The + * typical way to work around this + * problem is to use an intermediate + * compressed sparsity pattern that only + * allocates memory on demand. Refer to + * the step-2 and step-11 example + * programs on how to do this. The problem + * is also discussed in the documentation + * of the module on @ref Sparsity. + */ + unsigned int max_couplings_between_dofs () const; + + /** + * @deprecated Use + * CompressedSparsityPattern + * instead of initializing + * SparsityPattern with this + * value. + * + * Return the number of degrees of freedom + * located on the boundary another dof on + * the boundary can couple with. + * + * The number is the same as for + * max_couplings_between_dofs() in one + * dimension less. + */ + unsigned int max_couplings_between_boundary_dofs () const; + + /*--------------------------------------*/ + + /** + * @name Cell iterator functions + */ + /*@{*/ + /** + * Iterator to the first used + * cell on level @p level. + */ + cell_iterator begin (const unsigned int level = 0) const; + + /** + * Iterator to the first active + * cell on level @p level. + */ + active_cell_iterator begin_active(const unsigned int level = 0) const; + + /** + * Iterator past the end; this + * iterator serves for + * comparisons of iterators with + * past-the-end or + * before-the-beginning states. + */ + cell_iterator end () const; + + /** + * Return an iterator which is + * the first iterator not on + * level. If @p level is the + * last level, then this returns + * end(). + */ + cell_iterator end (const unsigned int level) const; + + /** + * Return an active iterator + * which is the first iterator + * not on level. If @p level is + * the last level, then this + * returns end(). + */ + active_cell_iterator end_active (const unsigned int level) const; + + //@} + + /*---------------------------------------*/ + + + /** + * Return the global number of + * degrees of freedom. If the + * current object handles all + * degrees of freedom itself + * (even if you may intend to + * solve your linear system in + * parallel, such as in step-17 + * or step-18), then this number + * equals the number of locally + * owned degrees of freedom since + * this object doesn't know + * anything about what you want + * to do with it and believes + * that it owns every degree of + * freedom it knows about. + * + * On the other hand, if this + * object operates on a + * parallel::distributed::Triangulation + * object, then this function + * returns the global number of + * degrees of freedom, + * accumulated over all + * processors. + * + * In either case, included in + * the returned number are those + * DoFs which are constrained by + * hanging nodes, see @ref constraints. + */ + unsigned int n_dofs () const; + ++ unsigned int n_dofs (const unsigned int level) const; ++ + /** + * Return the number of degrees of freedom + * located on the boundary. + */ + unsigned int n_boundary_dofs () const; + + /** + * Return the number of degrees + * of freedom located on those + * parts of the boundary which + * have a boundary indicator + * listed in the given set. The + * reason that a @p map rather + * than a @p set is used is the + * same as described in the + * section on the + * @p make_boundary_sparsity_pattern + * function. + */ + unsigned int + n_boundary_dofs (const FunctionMap &boundary_indicators) const; + + /** + * Same function, but with + * different data type of the + * argument, which is here simply + * a list of the boundary + * indicators under + * consideration. + */ + unsigned int + n_boundary_dofs (const std::set &boundary_indicators) const; + + /** + * Access to an object informing + * of the block structure of the + * dof handler. + * + * If an FESystem is used in + * distribute_dofs(), degrees of + * freedom naturally split into + * several @ref GlossBlock + * "blocks". For each base element + * as many blocks appear as its + * multiplicity. + * + * At the end of + * distribute_dofs(), the number + * of degrees of freedom in each + * block is counted, and stored + * in a BlockInfo object, which + * can be accessed here. In an + * MGDoFHandler, the same is done + * on each level. Additionally, + * the block structure on each + * cell can be generated in this + * object by calling + * initialize_local_block_info(). + */ + const BlockInfo &block_info() const; + + + /** + * Return the number of + * degrees of freedom that + * belong to this + * process. + * + * If this is a sequential job, + * then the result equals that + * produced by n_dofs(). On the + * other hand, if we are + * operating on a + * parallel::distributed::Triangulation, + * then it includes only the + * degrees of freedom that the + * current processor owns. Note + * that in this case this does + * not include all degrees of + * freedom that have been + * distributed on the current + * processor's image of the mesh: + * in particular, some of the + * degrees of freedom on the + * interface between the cells + * owned by this processor and + * cells owned by other + * processors may be theirs, and + * degrees of freedom on ghost + * cells are also not necessarily + * included. + */ + unsigned int n_locally_owned_dofs() const; + + /** + * Return an IndexSet describing + * the set of locally owned DoFs + * as a subset of + * 0..n_dofs(). The number of + * elements of this set equals + * n_locally_owned_dofs(). + */ + const IndexSet &locally_owned_dofs() const; + + + /** + * Returns a vector that + * stores the locally owned + * DoFs of each processor. If + * you are only interested in + * the number of elements + * each processor owns then + * n_locally_owned_dofs_per_processor() is + * a better choice. + * + * If this is a sequential job, + * then the vector has a single + * element that equals the + * IndexSet representing the + * entire range [0,n_dofs()]. + */ + const std::vector & + locally_owned_dofs_per_processor () const; + + /** + * Return a vector that + * stores the number of + * degrees of freedom each + * processor that + * participates in this + * triangulation owns + * locally. The sum of all + * these numbers equals the + * number of degrees of + * freedom that exist + * globally, i.e. what + * n_dofs() returns. + * + * Each element of the vector + * returned by this function + * equals the number of + * elements of the + * corresponding sets + * returned by + * global_dof_indices(). + * + * If this is a sequential job, + * then the vector has a single + * element equal to n_dofs(). + */ + const std::vector & + n_locally_owned_dofs_per_processor () const; + + /** + * Return a constant reference to + * the selected finite element + * object. + */ + const FiniteElement &get_fe () const; + + /** + * Return a constant reference to + * the triangulation underlying + * this object. + */ + const Triangulation &get_tria () const; + + /** + * Determine an estimate for the + * memory consumption (in bytes) + * of this object. + * + * This function is made virtual, + * since a dof handler object + * might be accessed through a + * pointers to this base class, + * although the actual object + * might be a derived class. + */ + virtual std::size_t memory_consumption () const; + + /** + * Write the data of this object to a + * stream for the purpose of + * serialization. + */ + template + void save (Archive &ar, const unsigned int version) const; + + /** + * Read the data of this object from a + * stream for the purpose of + * serialization. + */ + template + void load (Archive &ar, const unsigned int version); + + BOOST_SERIALIZATION_SPLIT_MEMBER() + + /** + * We are trying to renumber the + * degrees of freedom, but + * somehow did not count + * correctly. + * + * @ingroup Exceptions + */ + DeclException0 (ExcRenumberingIncomplete); + /** + * Exception + * @ingroup Exceptions + */ + DeclException0 (ExcGridsDoNotMatch); + /** + * Exception + * @ingroup Exceptions + */ + DeclException0 (ExcInvalidBoundaryIndicator); + /** + * Exception + * @ingroup Exceptions + */ + DeclException1 (ExcNewNumbersNotConsecutive, + int, + << "The given list of new dof indices is not consecutive: " + << "the index " << arg1 << " does not exist."); + /** + * Exception + * @ingroup Exceptions + */ + DeclException1 (ExcInvalidLevel, + int, + << "The given level " << arg1 + << " is not in the valid range!"); + /** + * Exception + * @ingroup Exceptions + */ + DeclException0 (ExcFacesHaveNoLevel); + /** + * The triangulation level you + * accessed is empty. + * @ingroup Exceptions + */ + DeclException1 (ExcEmptyLevel, + int, + << "You tried to do something on level " << arg1 + << ", but this level is empty."); + + + protected: + /** + * The object containing + * information on the block structure. + */ + BlockInfo block_info_object; + + /** + * Array to store the indices for + * degrees of freedom located at + * vertices. + */ + std::vector vertex_dofs; + + + + /** + * Address of the triangulation to + * work on. + */ + SmartPointer,DoFHandler > + tria; + + /** + * Store a pointer to the finite element + * given latest for the distribution of + * dofs. In order to avoid destruction of + * the object before the lifetime of + * the DoF handler, we subscribe to + * the finite element object. To unlock + * the FE before the end of the lifetime + * of this DoF handler, use the clear() + * function (this clears all data of + * this object as well, though). + */ + SmartPointer,DoFHandler > + selected_fe; + + /** + * An object that describes how degrees + * of freedom should be distributed and + * renumbered. + */ + std_cxx1x::shared_ptr > policy; + + /** + * A structure that contains all + * sorts of numbers that + * characterize the degrees of + * freedom this object works on. + * + * For most members of this + * structure, there is an + * accessor function in this + * class that returns its value. + */ + dealii::internal::DoFHandler::NumberCache number_cache; + + private: + + /** + * Copy constructor. I can see no reason + * why someone might want to use it, so + * I don't provide it. Since this class + * has pointer members, making it private + * prevents the compiler to provide it's + * own, incorrect one if anyone chose to + * copy such an object. + */ + DoFHandler (const DoFHandler &); + + /** + * Copy operator. I can see no reason + * why someone might want to use it, so + * I don't provide it. Since this class + * has pointer members, making it private + * prevents the compiler to provide it's + * own, incorrect one if anyone chose to + * copy such an object. + */ + DoFHandler &operator = (const DoFHandler &); + ++ class MGVertexDoFs ++ { + private: ++ unsigned int coarsest_level; ++ unsigned int finest_level; ++ unsigned int *indices; ++ unsigned int *indices_offset; + - /** - * Copy constructor. I can see no reason - * why someone might want to use it, so - * I don't provide it. Since this class - * has pointer members, making it private - * prevents the compiler to provide it's - * own, incorrect one if anyone chose to - * copy such an object. - */ - DoFHandler (const DoFHandler &); - - /** - * Copy operator. I can see no reason - * why someone might want to use it, so - * I don't provide it. Since this class - * has pointer members, making it private - * prevents the compiler to provide it's - * own, incorrect one if anyone chose to - * copy such an object. - */ - DoFHandler & operator = (const DoFHandler &); - - class MGVertexDoFs { - private: - unsigned int coarsest_level; - unsigned int finest_level; - unsigned int* indices; - unsigned int* indices_offset; - - public: - DeclException0 (ExcNoMemory); - MGVertexDoFs (); - ~MGVertexDoFs (); - unsigned int get_coarsest_level () const; - unsigned int get_finest_level () const; - unsigned int get_index (const unsigned int level, const unsigned int dof_number) const; - void init (const unsigned int coarsest_level, const unsigned int finest_level, const unsigned int dofs_per_vertex); - void set_index (const unsigned int level, const unsigned int dof_number, const unsigned int index); - }; - - void clear_mg_space (); - - /** - * Free all used memory. - */ - void clear_space (); - - void reserve_space (); - - template - unsigned int get_dof_index (const unsigned int obj_level, const unsigned int obj_index, const unsigned int fe_index, const unsigned int local_index) const; - - template - void set_dof_index (const unsigned int obj_level, const unsigned int obj_index, const unsigned int fe_index, const unsigned int local_index, const unsigned int global_index) const; - - /** - * Space to store the DoF numbers - * for the different - * levels. Analogous to the - * levels[] tree of the - * Triangulation objects. - */ - std::vector*> levels; - - std::vector*> mg_levels; - - /** - * Space to store DoF numbers of - * faces. They are not stored in - * levels since faces - * are not organized - * hierarchically, but in a flat - * array. - */ - dealii::internal::DoFHandler::DoFFaces *faces; - - dealii::internal::DoFHandler::DoFFaces* mg_faces; - - std::vector mg_vertex_dofs; - - std::vector mg_used_dofs; - - /** - * Make accessor objects friends. - */ - template friend class DoFAccessor; - template friend class DoFCellAccessor; - friend struct dealii::internal::DoFAccessor::Implementation; - friend struct dealii::internal::DoFCellAccessor::Implementation; - - friend struct dealii::internal::DoFHandler::Implementation; - friend struct dealii::internal::DoFHandler::Policy::Implementation; ++ public: ++ DeclException0 (ExcNoMemory); ++ MGVertexDoFs (); ++ ~MGVertexDoFs (); ++ unsigned int get_coarsest_level () const; ++ unsigned int get_finest_level () const; ++ unsigned int get_index (const unsigned int level, const unsigned int dof_number) const; ++ void init (const unsigned int coarsest_level, const unsigned int finest_level, const unsigned int dofs_per_vertex); ++ void set_index (const unsigned int level, const unsigned int dof_number, const unsigned int index); ++ }; ++ ++ void clear_mg_space (); ++ + /** + * Free all used memory. + */ + void clear_space (); + ++ void reserve_space (); ++ ++ template ++ unsigned int get_dof_index (const unsigned int obj_level, const unsigned int obj_index, const unsigned int fe_index, const unsigned int local_index) const; ++ ++ template ++ void set_dof_index (const unsigned int obj_level, const unsigned int obj_index, const unsigned int fe_index, const unsigned int local_index, const unsigned int global_index) const; ++ + /** + * Space to store the DoF numbers + * for the different + * levels. Analogous to the + * levels[] tree of the + * Triangulation objects. + */ + std::vector*> levels; + ++ std::vector*> mg_levels; ++ + /** + * Space to store DoF numbers of + * faces. They are not stored in + * levels since faces + * are not organized + * hierarchically, but in a flat + * array. + */ + dealii::internal::DoFHandler::DoFFaces *faces; + ++ dealii::internal::DoFHandler::DoFFaces *mg_faces; ++ ++ std::vector mg_vertex_dofs; ++ ++ std::vector mg_used_dofs; ++ + /** + * Make accessor objects friends. + */ + template friend class DoFAccessor; + template friend class DoFCellAccessor; + friend struct dealii::internal::DoFAccessor::Implementation; + friend struct dealii::internal::DoFCellAccessor::Implementation; + + friend struct dealii::internal::DoFHandler::Implementation; + friend struct dealii::internal::DoFHandler::Policy::Implementation; }; @@@ -977,12 -937,6 +978,13 @@@ DoFHandler::n_dofs () con return number_cache.n_global_dofs; } +template +inline - unsigned int DoFHandler::n_dofs (const unsigned int level) const { ++unsigned int DoFHandler::n_dofs (const unsigned int level) const ++{ + Assert (level < mg_used_dofs.size (), ExcInvalidLevel (level)); + return mg_used_dofs[level]; +} template unsigned int @@@ -1109,21 -1063,6 +1111,23 @@@ void DoFHandler::load (Ar } +template +inline - unsigned int DoFHandler::MGVertexDoFs::get_index (const unsigned int level, const unsigned int dof_number) const { ++unsigned int DoFHandler::MGVertexDoFs::get_index (const unsigned int level, const unsigned int dof_number) const ++{ + Assert ((level >= coarsest_level) && (level <= finest_level), ExcInvalidLevel (level)); + return indices[indices_offset[level - coarsest_level] + dof_number]; +} + + +template +inline - void DoFHandler::MGVertexDoFs::set_index (const unsigned int level, const unsigned int dof_number, const unsigned int index) { ++void DoFHandler::MGVertexDoFs::set_index (const unsigned int level, const unsigned int dof_number, const unsigned int index) ++{ + Assert ((level >= coarsest_level) && (level <= finest_level), ExcInvalidLevel (level)); + indices[indices_offset[level - coarsest_level] + dof_number] = index; +} + #endif // DOXYGEN DEAL_II_NAMESPACE_CLOSE diff --cc deal.II/include/deal.II/dofs/dof_handler_policy.h index c5c0f80d84,07e0005303..4dfb1c85d5 --- a/deal.II/include/deal.II/dofs/dof_handler_policy.h +++ b/deal.II/include/deal.II/dofs/dof_handler_policy.h @@@ -53,28 -53,28 +53,28 @@@ namespace interna template class PolicyBase { - public: - /** - * Destructor. - */ - virtual ~PolicyBase (); - - /** - * Distribute degrees of freedom on - * the object given as last argument. - */ - virtual - NumberCache - distribute_dofs (dealii::DoFHandler &dof_handler) const = 0; - - /** - * Renumber degrees of freedom as - * specified by the first argument. - */ - virtual - NumberCache - renumber_dofs (const std::vector &new_numbers, - dealii::DoFHandler &dof_handler) const = 0; + public: + /** + * Destructor. + */ + virtual ~PolicyBase (); + + /** + * Distribute degrees of freedom on + * the object given as last argument. + */ + virtual + NumberCache + distribute_dofs (dealii::DoFHandler &dof_handler) const = 0; + + /** + * Renumber degrees of freedom as + * specified by the first argument. + */ + virtual + NumberCache - renumber_dofs (const std::vector &new_numbers, ++ renumber_dofs (const std::vector &new_numbers, + dealii::DoFHandler &dof_handler) const = 0; }; @@@ -87,23 -87,23 +87,23 @@@ template class Sequential : public PolicyBase { - public: - /** - * Distribute degrees of freedom on - * the object given as last argument. - */ - virtual - NumberCache - distribute_dofs (dealii::DoFHandler &dof_handler) const; - - /** - * Renumber degrees of freedom as - * specified by the first argument. - */ - virtual - NumberCache - renumber_dofs (const std::vector &new_numbers, - dealii::DoFHandler &dof_handler) const; + public: + /** + * Distribute degrees of freedom on + * the object given as last argument. + */ + virtual + NumberCache + distribute_dofs (dealii::DoFHandler &dof_handler) const; + + /** + * Renumber degrees of freedom as + * specified by the first argument. + */ + virtual + NumberCache - renumber_dofs (const std::vector &new_numbers, ++ renumber_dofs (const std::vector &new_numbers, + dealii::DoFHandler &dof_handler) const; }; @@@ -117,23 -117,23 +117,23 @@@ template class ParallelDistributed : public PolicyBase { - public: - /** - * Distribute degrees of freedom on - * the object given as last argument. - */ - virtual - NumberCache - distribute_dofs (dealii::DoFHandler &dof_handler) const; - - /** - * Renumber degrees of freedom as - * specified by the first argument. - */ - virtual - NumberCache - renumber_dofs (const std::vector &new_numbers, - dealii::DoFHandler &dof_handler) const; + public: + /** + * Distribute degrees of freedom on + * the object given as last argument. + */ + virtual + NumberCache + distribute_dofs (dealii::DoFHandler &dof_handler) const; + + /** + * Renumber degrees of freedom as + * specified by the first argument. + */ + virtual + NumberCache - renumber_dofs (const std::vector &new_numbers, ++ renumber_dofs (const std::vector &new_numbers, + dealii::DoFHandler &dof_handler) const; }; } } diff --cc deal.II/include/deal.II/dofs/dof_renumbering.h index 1cbc12da7f,f554afd675..d5f789cf89 --- a/deal.II/include/deal.II/dofs/dof_renumbering.h +++ b/deal.II/include/deal.II/dofs/dof_renumbering.h @@@ -872,19 -872,19 +872,19 @@@ namespace DoFRenumberin void block_wise (hp::DoFHandler &dof_handler); - /** - * Sort the degrees of freedom by - * block. It does the same - * thing as the above function, - * only that it does this for one - * single level of a multi-level - * discretization. The - * non-multigrid part of the - * MGDoFHandler is not touched. - */ + /** + * Sort the degrees of freedom by + * block. It does the same + * thing as the above function, + * only that it does this for one + * single level of a multi-level + * discretization. The + * non-multigrid part of the + * MGDoFHandler is not touched. + */ template void - block_wise (MGDoFHandler &dof_handler, + block_wise (MGDoFHandler &dof_handler, const unsigned int level); @@@ -1133,127 -1133,127 +1133,127 @@@ void downstream (MGDoFHandler &dof_handler, const unsigned int level, - const Point &direction, + const Point &direction, const bool dof_wise_renumbering = false); - /** - * @deprecated Use downstream() - * instead. - */ + /** + * @deprecated Use downstream() + * instead. + */ template void downstream_dg (MGDoFHandler &dof_handler, const unsigned int level, - const Point &direction); + const Point &direction); - /** - * @deprecated The new function - * of this name computes the - * renumbering and its inverse at - * the same time. So, at least if - * you need both, you should use - * the other one. - * - * Computes the renumbering - * vector needed by the - * downstream_dg() function. Does - * not perform the renumbering on - * the DoFHandler dofs but - * returns the renumbering - * vector. - */ + /** + * @deprecated The new function + * of this name computes the + * renumbering and its inverse at + * the same time. So, at least if + * you need both, you should use + * the other one. + * + * Computes the renumbering + * vector needed by the + * downstream_dg() function. Does + * not perform the renumbering on + * the DoFHandler dofs but + * returns the renumbering + * vector. + */ template void - compute_downstream_dg (std::vector& new_dof_indices, - const DH& dof_handler, - const Point& direction); - - /** - * Computes the renumbering - * vector needed by the - * downstream_dg() function. Does - * not perform the renumbering on - * the DoFHandler dofs but - * returns the renumbering - * vector. - */ + compute_downstream_dg (std::vector &new_dof_indices, + const DH &dof_handler, + const Point &direction); + + /** + * Computes the renumbering + * vector needed by the + * downstream_dg() function. Does + * not perform the renumbering on + * the DoFHandler dofs but + * returns the renumbering + * vector. + */ template void - compute_downstream (std::vector& new_dof_indices, - std::vector& reverse, - const DH& dof_handler, - const Point& direction, + compute_downstream (std::vector &new_dof_indices, + std::vector &reverse, + const DH &dof_handler, + const Point &direction, const bool dof_wise_renumbering); - /** - * @deprecated Use - * compute_downstream() instead - */ + /** + * @deprecated Use + * compute_downstream() instead + */ template void - compute_downstream_dg (std::vector& new_dof_indices, - std::vector& reverse, - const DH& dof_handler, - const Point& direction); - - /** - * Computes the renumbering - * vector needed by the - * downstream_dg() function. Does - * not perform the renumbering on - * the MGDoFHandler dofs but - * returns the renumbering - * vector. - */ + compute_downstream_dg (std::vector &new_dof_indices, + std::vector &reverse, + const DH &dof_handler, + const Point &direction); + + /** + * Computes the renumbering + * vector needed by the + * downstream_dg() function. Does + * not perform the renumbering on + * the MGDoFHandler dofs but + * returns the renumbering + * vector. + */ template void - compute_downstream (std::vector& new_dof_indices, - std::vector& reverse, - const MGDoFHandler& dof_handler, + compute_downstream (std::vector &new_dof_indices, + std::vector &reverse, + const MGDoFHandler &dof_handler, const unsigned int level, - const Point& direction, + const Point &direction, const bool dof_wise_renumbering); - /** - * @deprecated Use - * compute_downstream() instead - */ + /** + * @deprecated Use + * compute_downstream() instead + */ template void - compute_downstream_dg (std::vector& new_dof_indices, - std::vector& reverse, - const MGDoFHandler& dof_handler, + compute_downstream_dg (std::vector &new_dof_indices, + std::vector &reverse, + const MGDoFHandler &dof_handler, const unsigned int level, - const Point& direction); - - /** - * Cell-wise clockwise numbering. - * - * This function produces a - * (counter)clockwise ordering of - * the mesh cells with respect to - * the hub @p center and calls - * cell_wise_dg(). Therefore, it - * only works with Discontinuous - * Galerkin Finite Elements, - * i.e. all degrees of freedom - * have to be associated with the - * interior of the cell. - */ + const Point &direction); + + /** + * Cell-wise clockwise numbering. + * + * This function produces a + * (counter)clockwise ordering of + * the mesh cells with respect to + * the hub @p center and calls + * cell_wise_dg(). Therefore, it + * only works with Discontinuous + * Galerkin Finite Elements, + * i.e. all degrees of freedom + * have to be associated with the + * interior of the cell. + */ template void - clockwise_dg (DH& dof_handler, - const Point& center, + clockwise_dg (DH &dof_handler, + const Point ¢er, const bool counter = false); - /** - * Cell-wise clockwise numbering - * on one level. See the other - * function with the same name. - */ + /** + * Cell-wise clockwise numbering + * on one level. See the other + * function with the same name. + */ template void - clockwise_dg (MGDoFHandler &dof_handler, + clockwise_dg (MGDoFHandler &dof_handler, const unsigned int level, const Point ¢er, const bool counter = false); diff --cc deal.II/include/deal.II/dofs/dof_tools.h index 05ef4b34ad,7fb3d61df6..2fc40bddab --- a/deal.II/include/deal.II/dofs/dof_tools.h +++ b/deal.II/include/deal.II/dofs/dof_tools.h @@@ -1514,133 -1514,133 +1514,133 @@@ namespace DoFTool void extract_subdomain_dofs (const DH &dof_handler, const types::subdomain_id subdomain_id, - std::vector &selected_dofs); + std::vector &selected_dofs); - /** - * Extract the set of global DoF - * indices that are owned by the - * current processor. For regular - * DoFHandler objects, this set - * is the complete set with all - * DoF indices. In either case, - * it equals what - * DoFHandler::locally_owned_dofs() - * returns. - */ + /** + * Extract the set of global DoF + * indices that are owned by the + * current processor. For regular + * DoFHandler objects, this set + * is the complete set with all + * DoF indices. In either case, + * it equals what + * DoFHandler::locally_owned_dofs() + * returns. + */ template void - extract_locally_owned_dofs (const DH & dof_handler, - IndexSet & dof_set); - - - /** - * Extract the set of global DoF - * indices that are active on the - * current DoFHandler. For - * regular DoFHandlers, these are - * all DoF indices, but for - * DoFHandler objects built on - * parallel::distributed::Triangulation - * this set is a superset of - * DoFHandler::locally_owned_dofs() - * and contains all DoF indices - * that live on all locally owned - * cells (including on the - * interface to ghost - * cells). However, it does not - * contain the DoF indices that - * are exclusively defined on - * ghost or artificial cells (see - * @ref GlossArtificialCell "the - * glossary"). - * - * The degrees of freedom identified by - * this function equal those obtained - * from the - * dof_indices_with_subdomain_association() - * function when called with the locally - * owned subdomain id. - */ + extract_locally_owned_dofs (const DH &dof_handler, + IndexSet &dof_set); + + + /** + * Extract the set of global DoF + * indices that are active on the + * current DoFHandler. For + * regular DoFHandlers, these are + * all DoF indices, but for + * DoFHandler objects built on + * parallel::distributed::Triangulation + * this set is a superset of + * DoFHandler::locally_owned_dofs() + * and contains all DoF indices + * that live on all locally owned + * cells (including on the + * interface to ghost + * cells). However, it does not + * contain the DoF indices that + * are exclusively defined on + * ghost or artificial cells (see + * @ref GlossArtificialCell "the + * glossary"). + * + * The degrees of freedom identified by + * this function equal those obtained + * from the + * dof_indices_with_subdomain_association() + * function when called with the locally + * owned subdomain id. + */ template void - extract_locally_active_dofs (const DH & dof_handler, - IndexSet & dof_set); - - /** - * Extract the set of global DoF - * indices that are active on the - * current DoFHandler. For - * regular DoFHandlers, these are - * all DoF indices, but for - * DoFHandler objects built on - * parallel::distributed::Triangulation - * this set is the union of - * DoFHandler::locally_owned_dofs() - * and the DoF indices on all - * ghost cells. In essence, it is - * the DoF indices on all cells - * that are not artificial (see - * @ref GlossArtificialCell "the glossary"). - */ + extract_locally_active_dofs (const DH &dof_handler, + IndexSet &dof_set); + + /** + * Extract the set of global DoF + * indices that are active on the + * current DoFHandler. For + * regular DoFHandlers, these are + * all DoF indices, but for + * DoFHandler objects built on + * parallel::distributed::Triangulation + * this set is the union of + * DoFHandler::locally_owned_dofs() + * and the DoF indices on all + * ghost cells. In essence, it is + * the DoF indices on all cells + * that are not artificial (see + * @ref GlossArtificialCell "the glossary"). + */ template void - extract_locally_relevant_dofs (const DH & dof_handler, - IndexSet & dof_set); - - /** - * For each DoF, return in the output - * array to which subdomain (as given by - * the cell->subdomain_id() function) - * it belongs. The output array is - * supposed to have the right size - * already when calling this function. - * - * Note that degrees of freedom - * associated with faces, edges, and - * vertices may be associated with - * multiple subdomains if they are - * sitting on partition boundaries. In - * these cases, we put them into one of - * the associated partitions in an - * undefined way. This may sometimes lead - * to different numbers of degrees of - * freedom in partitions, even if the - * number of cells is perfectly - * equidistributed. While this is - * regrettable, it is not a problem in - * practice since the number of degrees - * of freedom on partition boundaries is - * asymptotically vanishing as we refine - * the mesh as long as the number of - * partitions is kept constant. - * - * This function returns the association - * of each DoF with one subdomain. If you - * are looking for the association of - * each @em cell with a subdomain, either - * query the - * cell->subdomain_id() - * function, or use the - * GridTools::get_subdomain_association - * function. - * - * Note that this function is of - * questionable use for DoFHandler objects built on - * parallel::distributed::Triangulation - * since in that case ownership of - * individual degrees of freedom by MPI - * processes is controlled by the DoF - * handler object, not based on some - * geometric algorithm in conjunction - * with subdomain id. In particular, the - * degrees of freedom identified by the - * functions in this namespace as - * associated with a subdomain are not - * the same the - * DoFHandler class - * identifies as those it owns. - */ + extract_locally_relevant_dofs (const DH &dof_handler, + IndexSet &dof_set); + + /** + * For each DoF, return in the output + * array to which subdomain (as given by + * the cell->subdomain_id() function) + * it belongs. The output array is + * supposed to have the right size + * already when calling this function. + * + * Note that degrees of freedom + * associated with faces, edges, and + * vertices may be associated with + * multiple subdomains if they are + * sitting on partition boundaries. In + * these cases, we put them into one of + * the associated partitions in an + * undefined way. This may sometimes lead + * to different numbers of degrees of + * freedom in partitions, even if the + * number of cells is perfectly + * equidistributed. While this is + * regrettable, it is not a problem in + * practice since the number of degrees + * of freedom on partition boundaries is + * asymptotically vanishing as we refine + * the mesh as long as the number of + * partitions is kept constant. + * + * This function returns the association + * of each DoF with one subdomain. If you + * are looking for the association of + * each @em cell with a subdomain, either + * query the + * cell->subdomain_id() + * function, or use the + * GridTools::get_subdomain_association + * function. + * + * Note that this function is of + * questionable use for DoFHandler objects built on + * parallel::distributed::Triangulation + * since in that case ownership of + * individual degrees of freedom by MPI + * processes is controlled by the DoF + * handler object, not based on some + * geometric algorithm in conjunction + * with subdomain id. In particular, the + * degrees of freedom identified by the + * functions in this namespace as + * associated with a subdomain are not + * the same the + * DoFHandler class + * identifies as those it owns. + */ template void get_subdomain_association (const DH &dof_handler, @@@ -1768,279 -1768,280 +1768,279 @@@ IndexSet dof_indices_with_subdomain_association (const DH &dof_handler, const types::subdomain_id subdomain); - // @} - /** - * @name Dof indices for patches - * - * Create structures containing a - * large set of degrees of freedom - * for small patches of cells. The - * resulting objects can be used in - * RelaxationBlockSOR and related - * classes to implement Schwarz - * preconditioners and smoothers, - * where the subdomains consist of - * small numbers of cells only. - */ - //@{ - /** - * Create an incidence matrix that - * for every cell on a given level - * of a multilevel DoFHandler flags - * which degrees of freedom are - * associated with the - * corresponding cell. This data - * structure is matrix with as many - * rows as there are cells on a - * given level, as many rows as - * there are degrees of freedom on - * this level, and entries that are - * either true or false. This data - * structure is conveniently - * represented by a SparsityPattern - * object. - * - * @note The ordering of rows - * (cells) follows the ordering of - * the standard cell iterators. - */ - + // @} + /** + * @name Dof indices for patches + * + * Create structures containing a + * large set of degrees of freedom + * for small patches of cells. The + * resulting objects can be used in + * RelaxationBlockSOR and related + * classes to implement Schwarz + * preconditioners and smoothers, + * where the subdomains consist of + * small numbers of cells only. + */ + //@{ + /** + * Create an incidence matrix that + * for every cell on a given level + * of a multilevel DoFHandler flags + * which degrees of freedom are + * associated with the + * corresponding cell. This data + * structure is matrix with as many + * rows as there are cells on a + * given level, as many rows as + * there are degrees of freedom on + * this level, and entries that are + * either true or false. This data + * structure is conveniently + * represented by a SparsityPattern + * object. + * + * @note The ordering of rows + * (cells) follows the ordering of + * the standard cell iterators. + */ template - void make_cell_patches(Sparsity& block_list, - const DH& dof_handler, + void make_cell_patches(Sparsity &block_list, + const DH &dof_handler, const unsigned int level, - const std::vector& selected_dofs = std::vector(), + const std::vector &selected_dofs = std::vector(), unsigned int offset = 0); - /** - * Create an incidence matrix that - * for every vertex on a given level - * of a multilevel DoFHandler flags - * which degrees of freedom are - * associated with the - * adjacent cells. This data - * structure is matrix with as many - * rows as there are vertices on a - * given level, as many rows as - * there are degrees of freedom on - * this level, and entries that are - * either true or false. This data - * structure is conveniently - * represented by a SparsityPattern - * object. - * The sparsity pattern - * may be empty when entering this - * function and will be - * reinitialized to the correct - * size. - * - * The function has some boolean - * arguments (listed below) - * controlling details of the - * generated patches. The default - * settings are those for - * Arnold-Falk-Winther type - * smoothers for divergence and - * curl conforming finite elements - * with essential boundary - * conditions. Other applications - * are possible, in particular - * changing - * boundary_patches for - * non-essential boundary conditions. - * - * @arg block_list: the - * SparsityPattern into which the - * patches will be stored. - * @arg dof_handler: The - * multilevel dof handler - * providing the topology operated - * on. - * @arg - * interior_dofs_only: - * for each patch of cells around - * a vertex, collect only the - * interior degrees of freedom of - * the patch and disregard those - * on the boundary of the - * patch. This is for instance the - * setting for smoothers of - * Arnold-Falk-Winther type. - * @arg boundary_patches: - * include patches around vertices - * at the boundary of the - * domain. If not, only patches - * around interior vertices will - * be generated. - * @arg - * level_boundary_patches: - * same for refinement edges - * towards coarser cells. - * @arg - * single_cell_patches: - * if not true, patches containing - * a single cell are eliminated. - */ - template - void make_vertex_patches(SparsityPattern& block_list, - const DH& dof_handler, - const unsigned int level, - const bool interior_dofs_only, - const bool boundary_patches = false, - const bool level_boundary_patches = false, - const bool single_cell_patches = false); - - /** - * Create an incidence matrix that - * for every cell on a given level - * of a multilevel DoFHandler flags - * which degrees of freedom are - * associated with children of this - * cell. This data - * structure is conveniently - * represented by a SparsityPattern - * object. - - * Create a sparsity pattern which - * in each row lists the degrees of - * freedom associated to the - * cells which are the children of - * the same cell. The - * sparsity pattern may be empty - * when entering this function and - * will be reinitialized to the - * correct size. - * - * The function has some boolean - * arguments (lsited below) - * controlling details of the - * generated patches. The default - * settings are those for - * Arnold-Falk-Winther type - * smoothers for divergence and - * curl conforming finite elements - * with essential boundary - * conditions. Other applications - * are possible, in particular - * changing - * boundary_dofs for - * non-essential boundary - * conditions. - * - * Since the patches are defined - * through refinement, th - * - * @arg block_list: the - * SparsityPattern into which the - * patches will be stored. - * @arg dof_handler: The - * multilevel dof handler - * providing the topology operated - * on. - * @arg - * interior_dofs_only: - * for each patch of cells around - * a vertex, collect only the - * interior degrees of freedom of - * the patch and disregard those - * on the boundary of the - * patch. This is for instance the - * setting for smoothers of - * Arnold-Falk-Winther type. - * @arg boundary_dofs: - * include degrees of freedom, - * which would have excluded by - * interior_dofs_only, - * but are lying on the boundary - * of the domain, and thus need - * smoothing. This parameter has - * no effect if - * interior_dofs_only is false. - */ - template - void make_child_patches(SparsityPattern& block_list, - const DH& dof_handler, - const unsigned int level, - const bool interior_dofs_only, - const bool boundary_dofs = false); - - /** - * Create a block list with only a - * single patch, which in turn - * contains all degrees of freedom - * on the given level. - * - * This function is mostly a - * closure on level 0 for functions - * like make_child_patches() and - * make_vertex_patches(), which may - * produce an empty patch list. - * - * @arg block_list: the - * SparsityPattern into which the - * patches will be stored. - * @arg dof_handler: The - * multilevel dof handler - * providing the topology operated - * on. - * @arg level The grid - * level used for building the list. - * @arg - * interior_dofs_only: - * if true, exclude degrees of freedom on - * the boundary of the domain. - */ + /** + * Create an incidence matrix that + * for every vertex on a given level + * of a multilevel DoFHandler flags + * which degrees of freedom are + * associated with the + * adjacent cells. This data + * structure is matrix with as many + * rows as there are vertices on a + * given level, as many rows as + * there are degrees of freedom on + * this level, and entries that are + * either true or false. This data + * structure is conveniently + * represented by a SparsityPattern + * object. + * The sparsity pattern + * may be empty when entering this + * function and will be + * reinitialized to the correct + * size. + * + * The function has some boolean + * arguments (listed below) + * controlling details of the + * generated patches. The default + * settings are those for + * Arnold-Falk-Winther type + * smoothers for divergence and + * curl conforming finite elements + * with essential boundary + * conditions. Other applications + * are possible, in particular + * changing + * boundary_patches for + * non-essential boundary conditions. + * + * @arg block_list: the + * SparsityPattern into which the + * patches will be stored. + * @arg dof_handler: The + * multilevel dof handler + * providing the topology operated + * on. + * @arg + * interior_dofs_only: + * for each patch of cells around + * a vertex, collect only the + * interior degrees of freedom of + * the patch and disregard those + * on the boundary of the + * patch. This is for instance the + * setting for smoothers of + * Arnold-Falk-Winther type. + * @arg boundary_patches: + * include patches around vertices + * at the boundary of the + * domain. If not, only patches + * around interior vertices will + * be generated. + * @arg + * level_boundary_patches: + * same for refinement edges + * towards coarser cells. + * @arg + * single_cell_patches: + * if not true, patches containing + * a single cell are eliminated. + */ + template + void make_vertex_patches(SparsityPattern &block_list, + const DH &dof_handler, + const unsigned int level, + const bool interior_dofs_only, + const bool boundary_patches = false, + const bool level_boundary_patches = false, + const bool single_cell_patches = false); + + /** + * Create an incidence matrix that + * for every cell on a given level + * of a multilevel DoFHandler flags + * which degrees of freedom are + * associated with children of this + * cell. This data + * structure is conveniently + * represented by a SparsityPattern + * object. + + * Create a sparsity pattern which + * in each row lists the degrees of + * freedom associated to the + * cells which are the children of + * the same cell. The + * sparsity pattern may be empty + * when entering this function and + * will be reinitialized to the + * correct size. + * + * The function has some boolean + * arguments (lsited below) + * controlling details of the + * generated patches. The default + * settings are those for + * Arnold-Falk-Winther type + * smoothers for divergence and + * curl conforming finite elements + * with essential boundary + * conditions. Other applications + * are possible, in particular + * changing + * boundary_dofs for + * non-essential boundary + * conditions. + * + * Since the patches are defined + * through refinement, th + * + * @arg block_list: the + * SparsityPattern into which the + * patches will be stored. + * @arg dof_handler: The + * multilevel dof handler + * providing the topology operated + * on. + * @arg + * interior_dofs_only: + * for each patch of cells around + * a vertex, collect only the + * interior degrees of freedom of + * the patch and disregard those + * on the boundary of the + * patch. This is for instance the + * setting for smoothers of + * Arnold-Falk-Winther type. + * @arg boundary_dofs: + * include degrees of freedom, + * which would have excluded by + * interior_dofs_only, + * but are lying on the boundary + * of the domain, and thus need + * smoothing. This parameter has + * no effect if + * interior_dofs_only is false. + */ template - void make_single_patch(SparsityPattern& block_list, - const DH& dof_handler, + void make_child_patches(SparsityPattern &block_list, + const DH &dof_handler, const unsigned int level, - const bool interior_dofs_only = false); - - //@} - /** - * Extract a vector that represents the - * constant modes of the DoFHandler for the - * components chosen by - * component_mask (see @ref - * GlossComponentMask). The constant modes - * on a discretization are the null space - * of a Laplace operator on the selected - * components with Neumann boundary - * conditions applied. The null space is a - * necessary ingredient for obtaining a - * good AMG preconditioner when using the - * class TrilinosWrappers::PreconditionAMG. - * Since the ML AMG package only works on - * algebraic properties of the respective - * matrix, it has no chance to detect - * whether the matrix comes from a scalar - * or a vector valued problem. However, a - * near null space supplies exactly the - * needed information about these - * components. The null space will consist - * of as many vectors as there are true - * arguments in component_mask - * (see @ref GlossComponentMask), each of - * which will be one in one vector - * component and zero in all others. We - * store this object in a vector of - * vectors, where the outer vector is of - * the size of the number of selected - * components, and each inner vector has as - * many components as there are (locally - * owned) degrees of freedom in the - * selected components. Note that any - * matrix associated with this null space - * must have been constructed using the - * same component_mask argument, - * since the numbering of DoFs is done - * relative to the selected dofs, not to - * all dofs. - * - * The main reason for this - * program is the use of the - * null space with the - * AMG preconditioner. - */ + const bool interior_dofs_only, + const bool boundary_dofs = false); + + /** + * Create a block list with only a + * single patch, which in turn + * contains all degrees of freedom + * on the given level. + * + * This function is mostly a + * closure on level 0 for functions + * like make_child_patches() and + * make_vertex_patches(), which may + * produce an empty patch list. + * + * @arg block_list: the + * SparsityPattern into which the + * patches will be stored. + * @arg dof_handler: The + * multilevel dof handler + * providing the topology operated + * on. + * @arg level The grid + * level used for building the list. + * @arg + * interior_dofs_only: + * if true, exclude degrees of freedom on + * the boundary of the domain. + */ + template + void make_single_patch(SparsityPattern &block_list, + const DH &dof_handler, + const unsigned int level, + const bool interior_dofs_only = false); + + //@} + /** + * Extract a vector that represents the + * constant modes of the DoFHandler for the + * components chosen by + * component_mask (see @ref + * GlossComponentMask). The constant modes + * on a discretization are the null space + * of a Laplace operator on the selected + * components with Neumann boundary + * conditions applied. The null space is a + * necessary ingredient for obtaining a + * good AMG preconditioner when using the + * class TrilinosWrappers::PreconditionAMG. + * Since the ML AMG package only works on + * algebraic properties of the respective + * matrix, it has no chance to detect + * whether the matrix comes from a scalar + * or a vector valued problem. However, a + * near null space supplies exactly the + * needed information about these + * components. The null space will consist + * of as many vectors as there are true + * arguments in component_mask + * (see @ref GlossComponentMask), each of + * which will be one in one vector + * component and zero in all others. We + * store this object in a vector of + * vectors, where the outer vector is of + * the size of the number of selected + * components, and each inner vector has as + * many components as there are (locally + * owned) degrees of freedom in the + * selected components. Note that any + * matrix associated with this null space + * must have been constructed using the + * same component_mask argument, + * since the numbering of DoFs is done + * relative to the selected dofs, not to + * all dofs. + * + * The main reason for this + * program is the use of the + * null space with the + * AMG preconditioner. + */ template void extract_constant_modes (const DH &dof_handler, @@@ -2187,237 -2188,237 +2187,237 @@@ void count_dofs_per_block (const DH &dof, std::vector &dofs_per_block, - const std::vector &target_block + const std::vector &target_block - = std::vector()); - - /** - * @deprecated See the previous - * function with the same name - * for a description. This - * function exists for - * compatibility with older - * versions only. - */ + = std::vector()); + + /** + * @deprecated See the previous + * function with the same name + * for a description. This + * function exists for + * compatibility with older + * versions only. + */ template void - count_dofs_per_component (const DoFHandler& dof_handler, - std::vector& dofs_per_component, + count_dofs_per_component (const DoFHandler &dof_handler, + std::vector &dofs_per_component, std::vector target_component); - /** - * This function can be used when - * different variables shall be - * discretized on different - * grids, where one grid is - * coarser than the other. This - * idea might seem nonsensical at - * first, but has reasonable - * applications in inverse - * (parameter estimation) - * problems, where there might - * not be enough information to - * recover the parameter on the - * same grid as the state - * variable; furthermore, the - * smoothness properties of state - * variable and parameter might - * not be too much related, so - * using different grids might be - * an alternative to using - * stronger regularization of the - * problem. - * - * The basic idea of this - * function is explained in the - * following. Let us, for - * convenience, denote by - * ``parameter grid'' the coarser - * of the two grids, and by - * ``state grid'' the finer of - * the two. We furthermore assume - * that the finer grid can be - * obtained by refinement of the - * coarser one, i.e. the fine - * grid is at least as much - * refined as the coarse grid at - * each point of the - * domain. Then, each shape - * function on the coarse grid - * can be represented as a linear - * combination of shape functions - * on the fine grid (assuming - * identical ansatz - * spaces). Thus, if we - * discretize as usual, using - * shape functions on the fine - * grid, we can consider the - * restriction that the parameter - * variable shall in fact be - * discretized by shape functions - * on the coarse grid as a - * constraint. These constraints - * are linear and happen to have - * the form managed by the - * ``ConstraintMatrix'' class. - * - * The construction of these - * constraints is done as - * follows: for each of the - * degrees of freedom (i.e. shape - * functions) on the coarse grid, - * we compute its representation - * on the fine grid, i.e. how the - * linear combination of shape - * functions on the fine grid - * looks like that resembles the - * shape function on the coarse - * grid. From this information, - * we can then compute the - * constraints which have to hold - * if a solution of a linear - * equation on the fine grid - * shall be representable on the - * coarse grid. The exact - * algorithm how these - * constraints can be computed is - * rather complicated and is best - * understood by reading the - * source code, which contains - * many comments. - * - * Before explaining the use of - * this function, we would like - * to state that the total number - * of degrees of freedom used for - * the discretization is not - * reduced by the use of this - * function, i.e. even though we - * discretize one variable on a - * coarser grid, the total number - * of degrees of freedom is that - * of the fine grid. This seems - * to be counter-productive, - * since it does not give us a - * benefit from using a coarser - * grid. The reason why it may be - * useful to choose this approach - * nonetheless is three-fold: - * first, as stated above, there - * might not be enough - * information to recover a - * parameter on a fine grid, - * i.e. we chose to discretize it - * on the coarse grid not to save - * DoFs, but for other - * reasons. Second, the - * ``ConstraintMatrix'' includes - * the constraints into the - * linear system of equations, by - * which constrained nodes become - * dummy nodes; we may therefore - * exclude them from the linear - * algebra, for example by - * sorting them to the back of - * the DoF numbers and simply - * calling the solver for the - * upper left block of the matrix - * which works on the - * non-constrained nodes only, - * thus actually realizing the - * savings in numerical effort - * from the reduced number of - * actual degrees of freedom. The - * third reason is that for some - * or other reason we have chosen - * to use two different grids, it - * may be actually quite - * difficult to write a function - * that assembles the system - * matrix for finite element - * spaces on different grids; - * using the approach of - * constraints as with this - * function allows to use - * standard techniques when - * discretizing on only one grid - * (the finer one) without having - * to take care of the fact that - * one or several of the variable - * actually belong to different - * grids. - * - * The use of this function is as - * follows: it accepts as - * parameters two DoF Handlers, - * the first of which refers to - * the coarse grid and the second - * of which is the fine grid. On - * both, a finite element is - * represented by the DoF handler - * objects, which will usually - * have several components, which - * may belong to different finite - * elements. The second and - * fourth parameter of this - * function therefore state which - * variable on the coarse grid - * shall be used to restrict the - * stated component on the fine - * grid. Of course, the finite - * elements used for the - * respective components on the - * two grids need to be the - * same. An example may clarify - * this: consider the parameter - * estimation mentioned briefly - * above; there, on the fine grid - * the whole discretization is - * done, thus the variables are - * ``u'', ``q'', and the Lagrange - * multiplier ``lambda'', which - * are discretized using - * continuous linear, piecewise - * constant discontinuous, and - * continuous linear elements, - * respectively. Only the - * parameter ``q'' shall be - * represented on the coarse - * grid, thus the DoFHandler - * object on the coarse grid - * represents only one variable, - * discretized using piecewise - * constant discontinuous - * elements. Then, the parameter - * denoting the component on the - * coarse grid would be zero (the - * only possible choice, since - * the variable on the coarse - * grid is scalar), and one on - * the fine grid (corresponding - * to the variable ``q''; zero - * would be ``u'', two would be - * ``lambda''). Furthermore, an - * object of type IntergridMap - * is needed; this could in - * principle be generated by the - * function itself from the two - * DoFHandler objects, but since - * it is probably available - * anyway in programs that use - * this function, we shall use it - * instead of re-generating - * it. Finally, the computed - * constraints are entered into a - * variable of type - * ConstraintMatrix; the - * constraints are added, - * i.e. previous contents which - * may have, for example, be - * obtained from hanging nodes, - * are not deleted, so that you - * only need one object of this - * type. - */ + /** + * This function can be used when + * different variables shall be + * discretized on different + * grids, where one grid is + * coarser than the other. This + * idea might seem nonsensical at + * first, but has reasonable + * applications in inverse + * (parameter estimation) + * problems, where there might + * not be enough information to + * recover the parameter on the + * same grid as the state + * variable; furthermore, the + * smoothness properties of state + * variable and parameter might + * not be too much related, so + * using different grids might be + * an alternative to using + * stronger regularization of the + * problem. + * + * The basic idea of this + * function is explained in the + * following. Let us, for + * convenience, denote by + * ``parameter grid'' the coarser + * of the two grids, and by + * ``state grid'' the finer of + * the two. We furthermore assume + * that the finer grid can be + * obtained by refinement of the + * coarser one, i.e. the fine + * grid is at least as much + * refined as the coarse grid at + * each point of the + * domain. Then, each shape + * function on the coarse grid + * can be represented as a linear + * combination of shape functions + * on the fine grid (assuming + * identical ansatz + * spaces). Thus, if we + * discretize as usual, using + * shape functions on the fine + * grid, we can consider the + * restriction that the parameter + * variable shall in fact be + * discretized by shape functions + * on the coarse grid as a + * constraint. These constraints + * are linear and happen to have + * the form managed by the + * ``ConstraintMatrix'' class. + * + * The construction of these + * constraints is done as + * follows: for each of the + * degrees of freedom (i.e. shape + * functions) on the coarse grid, + * we compute its representation + * on the fine grid, i.e. how the + * linear combination of shape + * functions on the fine grid + * looks like that resembles the + * shape function on the coarse + * grid. From this information, + * we can then compute the + * constraints which have to hold + * if a solution of a linear + * equation on the fine grid + * shall be representable on the + * coarse grid. The exact + * algorithm how these + * constraints can be computed is + * rather complicated and is best + * understood by reading the + * source code, which contains + * many comments. + * + * Before explaining the use of + * this function, we would like + * to state that the total number + * of degrees of freedom used for + * the discretization is not + * reduced by the use of this + * function, i.e. even though we + * discretize one variable on a + * coarser grid, the total number + * of degrees of freedom is that + * of the fine grid. This seems + * to be counter-productive, + * since it does not give us a + * benefit from using a coarser + * grid. The reason why it may be + * useful to choose this approach + * nonetheless is three-fold: + * first, as stated above, there + * might not be enough + * information to recover a + * parameter on a fine grid, + * i.e. we chose to discretize it + * on the coarse grid not to save + * DoFs, but for other + * reasons. Second, the + * ``ConstraintMatrix'' includes + * the constraints into the + * linear system of equations, by + * which constrained nodes become + * dummy nodes; we may therefore + * exclude them from the linear + * algebra, for example by + * sorting them to the back of + * the DoF numbers and simply + * calling the solver for the + * upper left block of the matrix + * which works on the + * non-constrained nodes only, + * thus actually realizing the + * savings in numerical effort + * from the reduced number of + * actual degrees of freedom. The + * third reason is that for some + * or other reason we have chosen + * to use two different grids, it + * may be actually quite + * difficult to write a function + * that assembles the system + * matrix for finite element + * spaces on different grids; + * using the approach of + * constraints as with this + * function allows to use + * standard techniques when + * discretizing on only one grid + * (the finer one) without having + * to take care of the fact that + * one or several of the variable + * actually belong to different + * grids. + * + * The use of this function is as + * follows: it accepts as + * parameters two DoF Handlers, + * the first of which refers to + * the coarse grid and the second + * of which is the fine grid. On + * both, a finite element is + * represented by the DoF handler + * objects, which will usually + * have several components, which + * may belong to different finite + * elements. The second and + * fourth parameter of this + * function therefore state which + * variable on the coarse grid + * shall be used to restrict the + * stated component on the fine + * grid. Of course, the finite + * elements used for the + * respective components on the + * two grids need to be the + * same. An example may clarify + * this: consider the parameter + * estimation mentioned briefly + * above; there, on the fine grid + * the whole discretization is + * done, thus the variables are + * ``u'', ``q'', and the Lagrange + * multiplier ``lambda'', which + * are discretized using + * continuous linear, piecewise + * constant discontinuous, and + * continuous linear elements, + * respectively. Only the + * parameter ``q'' shall be + * represented on the coarse + * grid, thus the DoFHandler + * object on the coarse grid + * represents only one variable, + * discretized using piecewise + * constant discontinuous + * elements. Then, the parameter + * denoting the component on the + * coarse grid would be zero (the + * only possible choice, since + * the variable on the coarse + * grid is scalar), and one on + * the fine grid (corresponding + * to the variable ``q''; zero + * would be ``u'', two would be + * ``lambda''). Furthermore, an + * object of type IntergridMap + * is needed; this could in + * principle be generated by the + * function itself from the two + * DoFHandler objects, but since + * it is probably available + * anyway in programs that use + * this function, we shall use it + * instead of re-generating + * it. Finally, the computed + * constraints are entered into a + * variable of type + * ConstraintMatrix; the + * constraints are added, + * i.e. previous contents which + * may have, for example, be + * obtained from hanging nodes, + * are not deleted, so that you + * only need one object of this + * type. + */ template void compute_intergrid_constraints (const DoFHandler &coarse_grid, @@@ -2490,89 -2491,140 +2490,89 @@@ template void map_dof_to_boundary_indices (const DH &dof_handler, - std::vector &mapping); + std::vector &mapping); - /** - * Same as the previous function, - * except that only those parts - * of the boundary are considered - * for which the boundary - * indicator is listed in the - * second argument. - * - * See the general doc of this - * class for more information. - */ + /** + * Same as the previous function, + * except that only those parts + * of the boundary are considered + * for which the boundary + * indicator is listed in the + * second argument. + * + * See the general doc of this + * class for more information. + */ template void map_dof_to_boundary_indices (const DH &dof_handler, const std::set &boundary_indicators, std::vector &mapping); - /** - * Return a list of support - * points (see this - * @ref GlossSupport "glossary entry") - * for all the degrees of - * freedom handled by this DoF - * handler object. This function, - * of course, only works if the - * finite element object used by - * the DoF handler object - * actually provides support - * points, i.e. no edge elements - * or the like. Otherwise, an - * exception is thrown. - * - * The given array must have a - * length of as many elements as - * there are degrees of freedom. - */ + /** + * Return a list of support + * points (see this + * @ref GlossSupport "glossary entry") + * for all the degrees of + * freedom handled by this DoF + * handler object. This function, + * of course, only works if the + * finite element object used by + * the DoF handler object + * actually provides support + * points, i.e. no edge elements + * or the like. Otherwise, an + * exception is thrown. + * - * @pre The given array must have a ++ * The given array must have a + * length of as many elements as + * there are degrees of freedom. - * - * @note The precondition to this function - * that the output argument needs to have - * size equal to the total number of degrees - * of freedom makes this function - * unsuitable for the case that the given - * DoFHandler object derives from a - * parallel::distributed::Triangulation object. - * Consequently, this function will produce an - * error if called with such a DoFHandler. + */ template void map_dofs_to_support_points (const Mapping &mapping, const DoFHandler &dof_handler, std::vector > &support_points); - /** - * Same as above for the hp case. - */ + /** - * Same as the previous function but for the hp case. - */ - template - void - map_dofs_to_support_points (const dealii::hp::MappingCollection &mapping, - const hp::DoFHandler &dof_handler, - std::vector > &support_points); - - /** - * This function is a version of the above map_dofs_to_support_points - * function that doesn't simply return a vector of support points (see - * this @ref GlossSupport "glossary entry") with one - * entry for each global degree of freedom, but instead a map that - * maps from the DoFs index to its location. The point of this - * function is that it is also usable in cases where the DoFHandler - * is based on a parallel::distributed::Triangulation object. In such cases, - * each processor will not be able to determine the support point location - * of all DoFs, and worse no processor may be able to hold a vector that - * would contain the locations of all DoFs even if they were known. As - * a consequence, this function constructs a map from those DoFs for which - * we can know the locations (namely, those DoFs that are - * locally relevant (see @ref GlossLocallyRelevantDof "locally relevant DoFs") - * to their locations. - * - * For non-distributed triangulations, the map returned as @p support_points - * is of course dense, i.e., every DoF is to be found in it. - * - * @param mapping The mapping from the reference cell to the real cell on - * which DoFs are defined. - * @param dof_handler The object that describes which DoF indices live on - * which cell of the triangulation. - * @param support_points A map that for every locally relevant DoF index - * contains the corresponding location in real space coordinates. - * Previous content of this object is deleted in this function. ++ * Same as above for the hp case. + */ - template - void - map_dofs_to_support_points (const Mapping &mapping, - const DoFHandler &dof_handler, - std::map > &support_points); - /** - * Same as the previous function but for the hp case. - */ template void map_dofs_to_support_points (const dealii::hp::MappingCollection &mapping, - const hp::DoFHandler &dof_handler, - std::vector > &support_points); - - /** - * This is the opposite function - * to the one above. It generates - * a map where the keys are the - * support points of the degrees - * of freedom, while the values - * are the DoF indices. For a definition - * of support points, see this - * @ref GlossSupport "glossary entry". - * - * Since there is no natural - * order in the space of points - * (except for the 1d case), you - * have to provide a map with an - * explicitly specified - * comparator object. This - * function is therefore - * templatized on the comparator - * object. Previous content of - * the map object is deleted in - * this function. - * - * Just as with the function - * above, it is assumed that the - * finite element in use here - * actually supports the notion - * of support points of all its - * components. - */ + const hp::DoFHandler &dof_handler, - std::map > &support_points); ++ std::vector > &support_points); + + /** + * This is the opposite function + * to the one above. It generates + * a map where the keys are the + * support points of the degrees + * of freedom, while the values + * are the DoF indices. For a definition + * of support points, see this + * @ref GlossSupport "glossary entry". + * + * Since there is no natural + * order in the space of points + * (except for the 1d case), you + * have to provide a map with an + * explicitly specified + * comparator object. This + * function is therefore + * templatized on the comparator + * object. Previous content of + * the map object is deleted in + * this function. + * + * Just as with the function + * above, it is assumed that the + * finite element in use here + * actually supports the notion + * of support points of all its + * components. + */ template void map_support_points_to_dofs (const Mapping &mapping, diff --cc deal.II/include/deal.II/fe/fe_nothing.h index 11c12882d3,e4cfcea543..9ed2d3ca9f --- a/deal.II/include/deal.II/fe/fe_nothing.h +++ b/deal.II/include/deal.II/fe/fe_nothing.h @@@ -75,271 -75,271 +75,271 @@@ DEAL_II_NAMESPACE_OPE template class FE_Nothing : public FiniteElement { - public: - - /** - * Constructor. Argument denotes the - * number of components to give this - * finite element (default = 1). - */ - FE_Nothing (unsigned int n_components = 1); - - /** - * A sort of virtual copy - * constructor. Some places in - * the library, for example the - * constructors of FESystem as - * well as the hp::FECollection - * class, need to make copied of - * finite elements without - * knowing their exact type. They - * do so through this function. - */ - virtual - FiniteElement * - clone() const; - - /** - * Return a string that uniquely - * identifies a finite - * element. In this case it is - * FE_Nothing@. - */ - virtual - std::string - get_name() const; - - /** - * Determine the values a finite - * element should compute on - * initialization of data for - * FEValues. - * - * Given a set of flags - * indicating what quantities are - * requested from a FEValues - * object, update_once() and - * update_each() compute which - * values must really be - * computed. Then, the - * fill_*_values functions - * are called with the result of - * these. - * - * In this case, since the element - * has zero degrees of freedom and - * no information can be computed on - * it, this function simply returns - * the default (empty) set of update - * flags. - */ - - virtual - UpdateFlags - update_once (const UpdateFlags flags) const; - - /** - * Complementary function for - * update_once(). - * - * While update_once() returns - * the values to be computed on - * the unit cell for yielding the - * required data, this function - * determines the values that - * must be recomputed on each - * cell. - * - * Refer to update_once() for - * more details. - */ - virtual - UpdateFlags - update_each (const UpdateFlags flags) const; - - /** - * Return the value of the - * @p ith shape function at the - * point @p p. @p p is a point - * on the reference element. Because the - * current element has no degrees of freedom, - * this function should obviously not be - * called in practice. All this function - * really does, therefore, is trigger an - * exception. - */ - virtual - double - shape_value (const unsigned int i, const Point &p) const; - - /** - * Fill the fields of - * FEValues. This function - * performs all the operations - * needed to compute the data of an - * FEValues object. - * - * In the current case, this function - * returns no meaningful information, - * since the element has no degrees of - * freedom. - */ - virtual - void - fill_fe_values (const Mapping & mapping, - const typename Triangulation::cell_iterator & cell, - const Quadrature & quadrature, - typename Mapping::InternalDataBase & mapping_data, - typename Mapping::InternalDataBase & fedata, - FEValuesData & data, - CellSimilarity::Similarity & cell_similarity) const; - - /** - * Fill the fields of - * FEFaceValues. This function - * performs all the operations - * needed to compute the data of an - * FEFaceValues object. - * - * In the current case, this function - * returns no meaningful information, - * since the element has no degrees of - * freedom. - */ - virtual - void - fill_fe_face_values (const Mapping & mapping, - const typename Triangulation :: cell_iterator & cell, - const unsigned int face, - const Quadrature & quadrature, - typename Mapping :: InternalDataBase & mapping_data, - typename Mapping :: InternalDataBase & fedata, - FEValuesData & data) const; - - /** - * Fill the fields of - * FESubFaceValues. This function - * performs all the operations - * needed to compute the data of an - * FESubFaceValues object. - * - * In the current case, this function - * returns no meaningful information, - * since the element has no degrees of - * freedom. - */ - virtual - void - fill_fe_subface_values (const Mapping & mapping, - const typename Triangulation::cell_iterator & cell, - const unsigned int face, - const unsigned int subface, - const Quadrature & quadrature, - typename Mapping::InternalDataBase & mapping_data, - typename Mapping::InternalDataBase & fedata, - FEValuesData & data) const; - - /** - * Prepare internal data - * structures and fill in values - * independent of the - * cell. Returns a pointer to an - * object of which the caller of - * this function then has to - * assume ownership (which - * includes destruction when it - * is no more needed). - * - * In the current case, this function - * just returns a default pointer, since - * no meaningful data exists for this - * element. - */ - virtual - typename Mapping::InternalDataBase * - get_data (const UpdateFlags update_flags, - const Mapping & mapping, - const Quadrature & quadrature) const; - - /** - * Return whether this element dominates - * the one given as argument when they - * meet at a common face, - * whether it is the other way around, - * whether neither dominates, or if - * either could dominate. - * - * For a definition of domination, see - * FiniteElementBase::Domination and in - * particular the @ref hp_paper "hp paper". - * - * In the current case, this element - * is always assumed to dominate, unless - * it is also of type FE_Nothing(). In - * that situation, either element can - * dominate. - */ - virtual - FiniteElementDomination::Domination - compare_for_face_domination (const FiniteElement & fe_other) const; - - - - virtual - std::vector > - hp_vertex_dof_identities (const FiniteElement &fe_other) const; - - virtual - std::vector > - hp_line_dof_identities (const FiniteElement &fe_other) const; - - virtual - std::vector > - hp_quad_dof_identities (const FiniteElement &fe_other) const; - - virtual - bool - hp_constraints_are_implemented () const; - - /** - * Return the matrix - * interpolating from a face of - * of one element to the face of - * the neighboring element. - * The size of the matrix is - * then source.#dofs_per_face times - * this->#dofs_per_face. - * - * Since the current finite element has no - * degrees of freedom, the interpolation - * matrix is necessarily empty. - */ - - virtual - void - get_face_interpolation_matrix (const FiniteElement &source_fe, - FullMatrix &interpolation_matrix) const; - - - /** - * Return the matrix - * interpolating from a face of - * of one element to the subface of - * the neighboring element. - * The size of the matrix is - * then source.#dofs_per_face times - * this->#dofs_per_face. - * - * Since the current finite element has no - * degrees of freedom, the interpolation - * matrix is necessarily empty. - */ - - virtual - void - get_subface_interpolation_matrix (const FiniteElement & source_fe, - const unsigned int index, - FullMatrix &interpolation_matrix) const; + public: + + /** + * Constructor. Argument denotes the + * number of components to give this + * finite element (default = 1). + */ + FE_Nothing (unsigned int n_components = 1); + + /** + * A sort of virtual copy + * constructor. Some places in + * the library, for example the + * constructors of FESystem as + * well as the hp::FECollection + * class, need to make copied of + * finite elements without + * knowing their exact type. They + * do so through this function. + */ + virtual + FiniteElement * + clone() const; + + /** + * Return a string that uniquely + * identifies a finite + * element. In this case it is + * FE_Nothing@. + */ + virtual + std::string + get_name() const; + + /** + * Determine the values a finite + * element should compute on + * initialization of data for + * FEValues. + * + * Given a set of flags + * indicating what quantities are + * requested from a FEValues + * object, update_once() and + * update_each() compute which + * values must really be + * computed. Then, the + * fill_*_values functions + * are called with the result of + * these. + * + * In this case, since the element + * has zero degrees of freedom and + * no information can be computed on + * it, this function simply returns + * the default (empty) set of update + * flags. + */ + + virtual + UpdateFlags + update_once (const UpdateFlags flags) const; + + /** + * Complementary function for + * update_once(). + * + * While update_once() returns + * the values to be computed on + * the unit cell for yielding the + * required data, this function + * determines the values that + * must be recomputed on each + * cell. + * + * Refer to update_once() for + * more details. + */ + virtual + UpdateFlags + update_each (const UpdateFlags flags) const; + + /** + * Return the value of the + * @p ith shape function at the + * point @p p. @p p is a point + * on the reference element. Because the + * current element has no degrees of freedom, + * this function should obviously not be + * called in practice. All this function + * really does, therefore, is trigger an + * exception. + */ + virtual + double + shape_value (const unsigned int i, const Point &p) const; + + /** + * Fill the fields of + * FEValues. This function + * performs all the operations + * needed to compute the data of an + * FEValues object. + * + * In the current case, this function + * returns no meaningful information, + * since the element has no degrees of + * freedom. + */ + virtual + void + fill_fe_values (const Mapping &mapping, + const typename Triangulation::cell_iterator &cell, + const Quadrature &quadrature, + typename Mapping::InternalDataBase &mapping_data, + typename Mapping::InternalDataBase &fedata, + FEValuesData &data, + CellSimilarity::Similarity &cell_similarity) const; + + /** + * Fill the fields of + * FEFaceValues. This function + * performs all the operations + * needed to compute the data of an + * FEFaceValues object. + * + * In the current case, this function + * returns no meaningful information, + * since the element has no degrees of + * freedom. + */ + virtual + void + fill_fe_face_values (const Mapping &mapping, + const typename Triangulation :: cell_iterator &cell, + const unsigned int face, + const Quadrature & quadrature, + typename Mapping :: InternalDataBase &mapping_data, + typename Mapping :: InternalDataBase &fedata, + FEValuesData &data) const; + + /** + * Fill the fields of + * FESubFaceValues. This function + * performs all the operations + * needed to compute the data of an + * FESubFaceValues object. + * + * In the current case, this function + * returns no meaningful information, + * since the element has no degrees of + * freedom. + */ + virtual + void + fill_fe_subface_values (const Mapping &mapping, + const typename Triangulation::cell_iterator &cell, + const unsigned int face, + const unsigned int subface, + const Quadrature & quadrature, + typename Mapping::InternalDataBase &mapping_data, + typename Mapping::InternalDataBase &fedata, + FEValuesData &data) const; + + /** + * Prepare internal data + * structures and fill in values + * independent of the + * cell. Returns a pointer to an + * object of which the caller of + * this function then has to + * assume ownership (which + * includes destruction when it + * is no more needed). + * + * In the current case, this function + * just returns a default pointer, since + * no meaningful data exists for this + * element. + */ + virtual + typename Mapping::InternalDataBase * + get_data (const UpdateFlags update_flags, + const Mapping &mapping, + const Quadrature &quadrature) const; + + /** + * Return whether this element dominates + * the one given as argument when they + * meet at a common face, + * whether it is the other way around, + * whether neither dominates, or if + * either could dominate. + * + * For a definition of domination, see + * FiniteElementBase::Domination and in + * particular the @ref hp_paper "hp paper". + * + * In the current case, this element + * is always assumed to dominate, unless + * it is also of type FE_Nothing(). In + * that situation, either element can + * dominate. + */ + virtual + FiniteElementDomination::Domination + compare_for_face_domination (const FiniteElement &fe_other) const; + + + + virtual + std::vector > + hp_vertex_dof_identities (const FiniteElement &fe_other) const; + + virtual + std::vector > + hp_line_dof_identities (const FiniteElement &fe_other) const; + + virtual + std::vector > + hp_quad_dof_identities (const FiniteElement &fe_other) const; + + virtual + bool + hp_constraints_are_implemented () const; + + /** + * Return the matrix + * interpolating from a face of + * of one element to the face of + * the neighboring element. + * The size of the matrix is + * then source.#dofs_per_face times + * this->#dofs_per_face. + * + * Since the current finite element has no + * degrees of freedom, the interpolation + * matrix is necessarily empty. + */ + + virtual + void + get_face_interpolation_matrix (const FiniteElement &source_fe, + FullMatrix &interpolation_matrix) const; + + + /** + * Return the matrix + * interpolating from a face of + * of one element to the subface of + * the neighboring element. + * The size of the matrix is + * then source.#dofs_per_face times + * this->#dofs_per_face. + * + * Since the current finite element has no + * degrees of freedom, the interpolation + * matrix is necessarily empty. + */ + + virtual + void + get_subface_interpolation_matrix (const FiniteElement &source_fe, + const unsigned int index, - FullMatrix &interpolation_matrix) const; ++ FullMatrix &interpolation_matrix) const; }; diff --cc deal.II/include/deal.II/fe/fe_poly_tensor.h index 902b295e77,5650e3365c..a1f5498330 --- a/deal.II/include/deal.II/fe/fe_poly_tensor.h +++ b/deal.II/include/deal.II/fe/fe_poly_tensor.h @@@ -114,224 -114,224 +114,224 @@@ DEAL_II_NAMESPACE_OPE template class FE_PolyTensor : public FiniteElement { - public: - /** - * Constructor. - * - * @arg @c degree: constructor - * argument for poly. May be - * different from @p - * fe_data.degree. - */ - FE_PolyTensor (const unsigned int degree, - const FiniteElementData &fe_data, - const std::vector &restriction_is_additive_flags, - const std::vector &nonzero_components); + public: + /** + * Constructor. + * + * @arg @c degree: constructor + * argument for poly. May be + * different from @p + * fe_data.degree. + */ + FE_PolyTensor (const unsigned int degree, + const FiniteElementData &fe_data, + const std::vector &restriction_is_additive_flags, + const std::vector &nonzero_components); - /** - * Since these elements are - * vector valued, an exception is - * thrown. - */ - virtual double shape_value (const unsigned int i, - const Point &p) const; + /** + * Since these elements are + * vector valued, an exception is + * thrown. + */ + virtual double shape_value (const unsigned int i, + const Point &p) const; - virtual double shape_value_component (const unsigned int i, - const Point &p, - const unsigned int component) const; + virtual double shape_value_component (const unsigned int i, + const Point &p, + const unsigned int component) const; - /** - * Since these elements are - * vector valued, an exception is - * thrown. - */ - virtual Tensor<1,dim> shape_grad (const unsigned int i, - const Point &p) const; + /** + * Since these elements are + * vector valued, an exception is + * thrown. + */ + virtual Tensor<1,dim> shape_grad (const unsigned int i, + const Point &p) const; - virtual Tensor<1,dim> shape_grad_component (const unsigned int i, - const Point &p, - const unsigned int component) const; + virtual Tensor<1,dim> shape_grad_component (const unsigned int i, + const Point &p, + const unsigned int component) const; - /** - * Since these elements are - * vector valued, an exception is - * thrown. - */ - virtual Tensor<2,dim> shape_grad_grad (const unsigned int i, - const Point &p) const; + /** + * Since these elements are + * vector valued, an exception is + * thrown. + */ + virtual Tensor<2,dim> shape_grad_grad (const unsigned int i, + const Point &p) const; - virtual Tensor<2,dim> shape_grad_grad_component (const unsigned int i, - const Point &p, - const unsigned int component) const; + virtual Tensor<2,dim> shape_grad_grad_component (const unsigned int i, + const Point &p, + const unsigned int component) const; - /** - * Given flags, - * determines the values which - * must be computed only for the - * reference cell. Make sure, - * that #mapping_type is set by - * the derived class, such that - * this function can operate - * correctly. - */ - virtual UpdateFlags update_once (const UpdateFlags flags) const; - /** - * Given flags, - * determines the values which - * must be computed in each cell - * cell. Make sure, that - * #mapping_type is set by the - * derived class, such that this - * function can operate - * correctly. - */ - virtual UpdateFlags update_each (const UpdateFlags flags) const; + /** + * Given flags, + * determines the values which + * must be computed only for the + * reference cell. Make sure, + * that #mapping_type is set by + * the derived class, such that + * this function can operate + * correctly. + */ + virtual UpdateFlags update_once (const UpdateFlags flags) const; + /** + * Given flags, + * determines the values which + * must be computed in each cell + * cell. Make sure, that + * #mapping_type is set by the + * derived class, such that this + * function can operate + * correctly. + */ + virtual UpdateFlags update_each (const UpdateFlags flags) const; - protected: - /** - * The mapping type to be used to - * map shape functions from the - * reference cell to the mesh - * cell. - */ - MappingType mapping_type; + protected: + /** + * The mapping type to be used to + * map shape functions from the + * reference cell to the mesh + * cell. + */ + MappingType mapping_type; - virtual - typename Mapping::InternalDataBase * - get_data (const UpdateFlags, - const Mapping& mapping, - const Quadrature& quadrature) const ; + virtual + typename Mapping::InternalDataBase * + get_data (const UpdateFlags, + const Mapping &mapping, + const Quadrature &quadrature) const ; - virtual void - fill_fe_values (const Mapping &mapping, - const typename Triangulation::cell_iterator &cell, - const Quadrature &quadrature, - typename Mapping::InternalDataBase &mapping_internal, - typename Mapping::InternalDataBase &fe_internal, - FEValuesData &data, - CellSimilarity::Similarity &cell_similarity) const; + virtual void + fill_fe_values (const Mapping &mapping, + const typename Triangulation::cell_iterator &cell, + const Quadrature &quadrature, - typename Mapping::InternalDataBase &mapping_internal, - typename Mapping::InternalDataBase &fe_internal, ++ typename Mapping::InternalDataBase &mapping_internal, ++ typename Mapping::InternalDataBase &fe_internal, + FEValuesData &data, + CellSimilarity::Similarity &cell_similarity) const; - virtual void - fill_fe_face_values (const Mapping &mapping, - const typename Triangulation::cell_iterator &cell, - const unsigned int face_no, - const Quadrature &quadrature, - typename Mapping::InternalDataBase &mapping_internal, - typename Mapping::InternalDataBase &fe_internal, - FEValuesData& data) const ; + virtual void + fill_fe_face_values (const Mapping &mapping, + const typename Triangulation::cell_iterator &cell, + const unsigned int face_no, + const Quadrature &quadrature, + typename Mapping::InternalDataBase &mapping_internal, + typename Mapping::InternalDataBase &fe_internal, + FEValuesData &data) const ; - virtual void - fill_fe_subface_values (const Mapping &mapping, - const typename Triangulation::cell_iterator &cell, - const unsigned int face_no, - const unsigned int sub_no, - const Quadrature &quadrature, - typename Mapping::InternalDataBase &mapping_internal, - typename Mapping::InternalDataBase &fe_internal, - FEValuesData& data) const ; + virtual void + fill_fe_subface_values (const Mapping &mapping, + const typename Triangulation::cell_iterator &cell, + const unsigned int face_no, + const unsigned int sub_no, + const Quadrature &quadrature, + typename Mapping::InternalDataBase &mapping_internal, + typename Mapping::InternalDataBase &fe_internal, + FEValuesData &data) const ; - /** - * Fields of cell-independent - * data for FE_PolyTensor. Stores - * the values of the shape - * functions and their - * derivatives on the reference - * cell for later use. - * - * All tables are organized in a - * way, that the value for shape - * function i at - * quadrature point k is - * accessed by indices - * (i,k). - */ - class InternalData : public FiniteElement::InternalDataBase - { - public: - /** - * Array with shape function - * values in quadrature - * points. There is one - * row for each shape - * function, containing - * values for each quadrature - * point. - */ - std::vector > > shape_values; + /** + * Fields of cell-independent + * data for FE_PolyTensor. Stores + * the values of the shape + * functions and their + * derivatives on the reference + * cell for later use. + * + * All tables are organized in a + * way, that the value for shape + * function i at + * quadrature point k is + * accessed by indices + * (i,k). + */ + class InternalData : public FiniteElement::InternalDataBase + { + public: + /** + * Array with shape function + * values in quadrature + * points. There is one + * row for each shape + * function, containing + * values for each quadrature + * point. + */ + std::vector > > shape_values; - /** - * Array with shape function - * gradients in quadrature - * points. There is one - * row for each shape - * function, containing - * values for each quadrature - * point. - */ - std::vector< std::vector< DerivativeForm<1, dim, spacedim> > > shape_grads; - }; + /** + * Array with shape function + * gradients in quadrature + * points. There is one + * row for each shape + * function, containing + * values for each quadrature + * point. + */ + std::vector< std::vector< DerivativeForm<1, dim, spacedim> > > shape_grads; + }; - /** - * The polynomial space. Its type - * is given by the template - * parameter POLY. - */ - POLY poly_space; + /** + * The polynomial space. Its type + * is given by the template + * parameter POLY. + */ + POLY poly_space; - /** - * The inverse of the matrix - * aij of node - * values Ni - * applied to polynomial - * pj. This - * matrix is used to convert - * polynomials in the "raw" basis - * provided in #poly_space to the - * basis dual to the node - * functionals on the reference cell. - * - * This object is not filled by - * FE_PolyTensor, but is a chance - * for a derived class to allow - * for reorganization of the - * basis functions. If it is left - * empty, the basis in - * #poly_space is used. - */ - FullMatrix inverse_node_matrix; + /** + * The inverse of the matrix + * aij of node + * values Ni + * applied to polynomial + * pj. This + * matrix is used to convert + * polynomials in the "raw" basis + * provided in #poly_space to the + * basis dual to the node + * functionals on the reference cell. + * + * This object is not filled by + * FE_PolyTensor, but is a chance + * for a derived class to allow + * for reorganization of the + * basis functions. If it is left + * empty, the basis in + * #poly_space is used. + */ + FullMatrix inverse_node_matrix; - /** - * If a shape function is - * computed at a single point, we - * must compute all of them to - * apply #inverse_node_matrix. In - * order to avoid too much - * overhead, we cache the point - * and the function values for - * the next evaluation. - */ - mutable Point cached_point; + /** + * If a shape function is + * computed at a single point, we + * must compute all of them to + * apply #inverse_node_matrix. In + * order to avoid too much + * overhead, we cache the point + * and the function values for + * the next evaluation. + */ + mutable Point cached_point; - /** - * Cached shape function values after - * call to - * shape_value_component(). - */ - mutable std::vector > cached_values; + /** + * Cached shape function values after + * call to + * shape_value_component(). + */ + mutable std::vector > cached_values; - /** - * Cached shape function gradients after - * call to - * shape_grad_component(). - */ - mutable std::vector > cached_grads; + /** + * Cached shape function gradients after + * call to + * shape_grad_component(). + */ + mutable std::vector > cached_grads; - /** - * Cached second derivatives of - * shape functions after call to - * shape_grad_grad_component(). - */ - mutable std::vector > cached_grad_grads; + /** + * Cached second derivatives of + * shape functions after call to + * shape_grad_grad_component(). + */ + mutable std::vector > cached_grad_grads; }; DEAL_II_NAMESPACE_CLOSE diff --cc deal.II/include/deal.II/fe/fe_tools.h index 71ac1114ac,91566ae1b1..9c3e3a7af1 --- a/deal.II/include/deal.II/fe/fe_tools.h +++ b/deal.II/include/deal.II/fe/fe_tools.h @@@ -136,98 -136,98 +136,98 @@@ namespace FETool template class FEFactory : public FEFactoryBase { - public: - /** - * Create a FiniteElement and - * return a pointer to it. - */ - virtual FiniteElement* - get (const unsigned int degree) const; - - /** - * Create a FiniteElement from a - * quadrature formula (currently only - * implemented for FE_Q) and return a - * pointer to it. - */ - virtual FiniteElement* - get (const Quadrature<1> &quad) const; + public: + /** + * Create a FiniteElement and + * return a pointer to it. + */ + virtual FiniteElement * + get (const unsigned int degree) const; + + /** + * Create a FiniteElement from a + * quadrature formula (currently only + * implemented for FE_Q) and return a + * pointer to it. + */ + virtual FiniteElement * + get (const Quadrature<1> &quad) const; }; - /** - * @warning In most cases, you - * will probably want to use - * compute_base_renumbering(). - * - * Compute the vector required to - * renumber the dofs of a cell by - * component. Furthermore, - * compute the vector storing the - * start indices of each - * component in the local block - * vector. - * - * The second vector is organized - * such that there is a vector - * for each base element - * containing the start index for - * each component served by this - * base element. - * - * While the first vector is - * checked to have the correct - * size, the second one is - * reinitialized for convenience. - */ + /** + * @warning In most cases, you + * will probably want to use + * compute_base_renumbering(). + * + * Compute the vector required to + * renumber the dofs of a cell by + * component. Furthermore, + * compute the vector storing the + * start indices of each + * component in the local block + * vector. + * + * The second vector is organized + * such that there is a vector + * for each base element + * containing the start index for + * each component served by this + * base element. + * + * While the first vector is + * checked to have the correct + * size, the second one is + * reinitialized for convenience. + */ template void compute_component_wise( - const FiniteElement& fe, - std::vector& renumbering, - std::vector >& start_indices); - - /** - * Compute the vector required to - * renumber the dofs of a cell by - * block. Furthermore, compute - * the vector storing either the - * start indices or the size of - * each local block vector. - * - * If the @p bool parameter is - * true, @p block_data is filled - * with the start indices of each - * local block. If it is false, - * then the block sizes are - * returned. - * - * @todo Which way does this - * vector map the numbers? - */ + const FiniteElement &fe, + std::vector &renumbering, + std::vector > &start_indices); + + /** + * Compute the vector required to + * renumber the dofs of a cell by + * block. Furthermore, compute + * the vector storing either the + * start indices or the size of + * each local block vector. + * + * If the @p bool parameter is + * true, @p block_data is filled + * with the start indices of each + * local block. If it is false, + * then the block sizes are + * returned. + * + * @todo Which way does this + * vector map the numbers? + */ template void compute_block_renumbering ( - const FiniteElement& fe, - std::vector& renumbering, - std::vector& block_data, - const FiniteElement &fe, ++ const FiniteElement &fe, + std::vector &renumbering, + std::vector &block_data, bool return_start_indices = true); - /** - * @name Generation of local matrices - * @{ - */ - /** - * Gives the interpolation matrix - * that interpolates a @p fe1- - * function to a @p fe2-function on - * each cell. The interpolation_matrix - * needs to be of size - * (fe2.dofs_per_cell, fe1.dofs_per_cell). - * - * Note, that if the finite element - * space @p fe1 is a subset of - * the finite element space - * @p fe2 then the @p interpolation_matrix - * is an embedding matrix. - */ + /** + * @name Generation of local matrices + * @{ + */ + /** + * Gives the interpolation matrix + * that interpolates a @p fe1- + * function to a @p fe2-function on + * each cell. The interpolation_matrix + * needs to be of size + * (fe2.dofs_per_cell, fe1.dofs_per_cell). + * + * Note, that if the finite element + * space @p fe1 is a subset of + * the finite element space + * @p fe2 then the @p interpolation_matrix + * is an embedding matrix. + */ template void get_interpolation_matrix(const FiniteElement &fe1, @@@ -836,75 -836,75 +836,75 @@@ const DH2 &dof2, OutVector &u2); - /** - * Gives the interpolation of a - * the @p dof1-function @p u1 to - * a @p dof2-function @p u2. @p - * dof1 and @p dof2 need to be - * DoFHandlers (or - * hp::DoFHandlers) based on the - * same triangulation. @p - * constraints is a hanging node - * constraints object - * corresponding to @p dof2. This - * object is particular important - * when interpolating onto - * continuous elements on grids - * with hanging nodes (locally - * refined grids). - * - * If the elements @p fe1 and @p fe2 - * are either both continuous or - * both discontinuous then this - * interpolation is the usual point - * interpolation. The same is true - * if @p fe1 is a continuous and - * @p fe2 is a discontinuous finite - * element. For the case that @p fe1 - * is a discontinuous and @p fe2 is - * a continuous finite element - * there is no point interpolation - * defined at the discontinuities. - * Therefore the meanvalue is taken - * at the DoF values on the - * discontinuities. - */ + /** + * Gives the interpolation of a + * the @p dof1-function @p u1 to + * a @p dof2-function @p u2. @p + * dof1 and @p dof2 need to be + * DoFHandlers (or + * hp::DoFHandlers) based on the + * same triangulation. @p + * constraints is a hanging node + * constraints object + * corresponding to @p dof2. This + * object is particular important + * when interpolating onto + * continuous elements on grids + * with hanging nodes (locally + * refined grids). + * + * If the elements @p fe1 and @p fe2 + * are either both continuous or + * both discontinuous then this + * interpolation is the usual point + * interpolation. The same is true + * if @p fe1 is a continuous and + * @p fe2 is a discontinuous finite + * element. For the case that @p fe1 + * is a discontinuous and @p fe2 is + * a continuous finite element + * there is no point interpolation + * defined at the discontinuities. + * Therefore the meanvalue is taken + * at the DoF values on the + * discontinuities. + */ template class DH1, - template class DH2, - class InVector, class OutVector> + template class DH1, + template class DH2, + class InVector, class OutVector> - void interpolate (const DH1 &dof1, + void interpolate (const DH1 &dof1, const InVector &u1, - const DH2 &dof2, + const DH2 &dof2, const ConstraintMatrix &constraints, - OutVector& u2); - - /** - * Gives the interpolation of the - * @p fe1-function @p u1 to a - * @p fe2-function, and - * interpolates this to a second - * @p fe1-function named - * @p u1_interpolated. - * - * Note, that this function does - * not work on continuous - * elements at hanging nodes. For - * that case use the - * @p back_interpolate function, - * below, that takes an - * additional - * @p ConstraintMatrix object. - * - * Furthermore note, that for the - * specific case when the finite - * element space corresponding to - * @p fe1 is a subset of the - * finite element space - * corresponding to @p fe2, this - * function is simply an identity - * mapping. - */ + OutVector &u2); + + /** + * Gives the interpolation of the + * @p fe1-function @p u1 to a + * @p fe2-function, and + * interpolates this to a second + * @p fe1-function named + * @p u1_interpolated. + * + * Note, that this function does + * not work on continuous + * elements at hanging nodes. For + * that case use the + * @p back_interpolate function, + * below, that takes an + * additional + * @p ConstraintMatrix object. + * + * Furthermore note, that for the + * specific case when the finite + * element space corresponding to + * @p fe1 is a subset of the + * finite element space + * corresponding to @p fe2, this + * function is simply an identity + * mapping. + */ template void back_interpolate (const DoFHandler &dof1, const InVector &u1, @@@ -925,229 -925,229 +925,229 @@@ const FiniteElement &fe2, OutVector &u1_interpolated); - /** - * Gives the interpolation of the - * @p dof1-function @p u1 to a - * @p dof2-function, and - * interpolates this to a second - * @p dof1-function named - * @p u1_interpolated. - * @p constraints1 and - * @p constraints2 are the - * hanging node constraints - * corresponding to @p dof1 and - * @p dof2, respectively. These - * objects are particular - * important when continuous - * elements on grids with hanging - * nodes (locally refined grids) - * are involved. - * - * Furthermore note, that for the - * specific case when the finite - * element space corresponding to - * @p dof1 is a subset of the - * finite element space - * corresponding to @p dof2, this - * function is simply an identity - * mapping. - */ + /** + * Gives the interpolation of the + * @p dof1-function @p u1 to a + * @p dof2-function, and + * interpolates this to a second + * @p dof1-function named + * @p u1_interpolated. + * @p constraints1 and + * @p constraints2 are the + * hanging node constraints + * corresponding to @p dof1 and + * @p dof2, respectively. These + * objects are particular + * important when continuous + * elements on grids with hanging + * nodes (locally refined grids) + * are involved. + * + * Furthermore note, that for the + * specific case when the finite + * element space corresponding to + * @p dof1 is a subset of the + * finite element space + * corresponding to @p dof2, this + * function is simply an identity + * mapping. + */ template - void back_interpolate (const DoFHandler& dof1, - const ConstraintMatrix& constraints1, - const InVector& u1, - const DoFHandler& dof2, - const ConstraintMatrix& constraints2, - OutVector& u1_interpolated); - - /** - * Gives $(Id-I_h)z_1$ for a given - * @p dof1-function $z_1$, where $I_h$ - * is the interpolation from @p fe1 - * to @p fe2. The result $(Id-I_h)z_1$ is - * written into @p z1_difference. - * - * Note, that this function does - * not work for continuous - * elements at hanging nodes. For - * that case use the - * @p interpolation_difference - * function, below, that takes an - * additional - * @p ConstraintMatrix object. - */ - void back_interpolate (const DoFHandler &dof1, ++ void back_interpolate (const DoFHandler &dof1, + const ConstraintMatrix &constraints1, + const InVector &u1, - const DoFHandler &dof2, ++ const DoFHandler &dof2, + const ConstraintMatrix &constraints2, + OutVector &u1_interpolated); + + /** + * Gives $(Id-I_h)z_1$ for a given + * @p dof1-function $z_1$, where $I_h$ + * is the interpolation from @p fe1 + * to @p fe2. The result $(Id-I_h)z_1$ is + * written into @p z1_difference. + * + * Note, that this function does + * not work for continuous + * elements at hanging nodes. For + * that case use the + * @p interpolation_difference + * function, below, that takes an + * additional + * @p ConstraintMatrix object. + */ template void interpolation_difference(const DoFHandler &dof1, const InVector &z1, const FiniteElement &fe2, OutVector &z1_difference); - /** - * Gives $(Id-I_h)z_1$ for a given - * @p dof1-function $z_1$, where $I_h$ - * is the interpolation from @p fe1 - * to @p fe2. The result $(Id-I_h)z_1$ is - * written into @p z1_difference. - * @p constraints1 and - * @p constraints2 are the - * hanging node constraints - * corresponding to @p dof1 and - * @p dof2, respectively. These - * objects are particular - * important when continuous - * elements on grids with hanging - * nodes (locally refined grids) - * are involved. - */ + /** + * Gives $(Id-I_h)z_1$ for a given + * @p dof1-function $z_1$, where $I_h$ + * is the interpolation from @p fe1 + * to @p fe2. The result $(Id-I_h)z_1$ is + * written into @p z1_difference. + * @p constraints1 and + * @p constraints2 are the + * hanging node constraints + * corresponding to @p dof1 and + * @p dof2, respectively. These + * objects are particular + * important when continuous + * elements on grids with hanging + * nodes (locally refined grids) + * are involved. + */ template - void interpolation_difference(const DoFHandler& dof1, - const ConstraintMatrix& constraints1, - const InVector& z1, - const DoFHandler& dof2, - const ConstraintMatrix& constraints2, - OutVector& z1_difference); - - - - /** - * $L^2$ projection for - * discontinuous - * elements. Operates the same - * direction as interpolate. - * - * The global projection can be - * computed by local matrices if - * the finite element spaces are - * discontinuous. With continuous - * elements, this is impossible, - * since a global mass matrix - * must be inverted. - */ - void interpolation_difference(const DoFHandler &dof1, ++ void interpolation_difference(const DoFHandler &dof1, + const ConstraintMatrix &constraints1, + const InVector &z1, - const DoFHandler &dof2, ++ const DoFHandler &dof2, + const ConstraintMatrix &constraints2, + OutVector &z1_difference); + + + + /** + * $L^2$ projection for + * discontinuous + * elements. Operates the same + * direction as interpolate. + * + * The global projection can be + * computed by local matrices if + * the finite element spaces are + * discontinuous. With continuous + * elements, this is impossible, + * since a global mass matrix + * must be inverted. + */ template - void project_dg (const DoFHandler& dof1, - const InVector& u1, - const DoFHandler& dof2, - OutVector& u2); - - /** - * Gives the patchwise - * extrapolation of a @p dof1 - * function @p z1 to a @p dof2 - * function @p z2. @p dof1 and - * @p dof2 need to be DoFHandler - * based on the same triangulation. - * - * This function is interesting - * for e.g. extrapolating - * patchwise a piecewise linear - * solution to a piecewise - * quadratic solution. - * - * Note that the resulting field - * does not satisfy continuity - * requirements of the given - * finite elements. - * - * When you use continuous - * elements on grids with hanging - * nodes, please use the - * @p extrapolate function with - * an additional - * ConstraintMatrix argument, - * see below. - * - * Since this function operates - * on patches of cells, it is - * required that the underlying - * grid is refined at least once - * for every coarse grid cell. If - * this is not the case, an - * exception will be raised. - */ + void project_dg (const DoFHandler &dof1, + const InVector &u1, + const DoFHandler &dof2, + OutVector &u2); + + /** + * Gives the patchwise + * extrapolation of a @p dof1 + * function @p z1 to a @p dof2 + * function @p z2. @p dof1 and + * @p dof2 need to be DoFHandler + * based on the same triangulation. + * + * This function is interesting + * for e.g. extrapolating + * patchwise a piecewise linear + * solution to a piecewise + * quadratic solution. + * + * Note that the resulting field + * does not satisfy continuity + * requirements of the given + * finite elements. + * + * When you use continuous + * elements on grids with hanging + * nodes, please use the + * @p extrapolate function with + * an additional + * ConstraintMatrix argument, + * see below. + * + * Since this function operates + * on patches of cells, it is + * required that the underlying + * grid is refined at least once + * for every coarse grid cell. If + * this is not the case, an + * exception will be raised. + */ template - void extrapolate (const DoFHandler& dof1, - const InVector& z1, - const DoFHandler& dof2, - OutVector& z2); - - /** - * Gives the patchwise - * extrapolation of a @p dof1 - * function @p z1 to a @p dof2 - * function @p z2. @p dof1 and - * @p dof2 need to be DoFHandler - * based on the same triangulation. - * @p constraints is a hanging - * node constraints object - * corresponding to - * @p dof2. This object is - * particular important when - * interpolating onto continuous - * elements on grids with hanging - * nodes (locally refined grids). - * - * Otherwise, the same holds as - * for the other @p extrapolate - * function. - */ + void extrapolate (const DoFHandler &dof1, + const InVector &z1, + const DoFHandler &dof2, + OutVector &z2); + + /** + * Gives the patchwise + * extrapolation of a @p dof1 + * function @p z1 to a @p dof2 + * function @p z2. @p dof1 and + * @p dof2 need to be DoFHandler + * based on the same triangulation. + * @p constraints is a hanging + * node constraints object + * corresponding to + * @p dof2. This object is + * particular important when + * interpolating onto continuous + * elements on grids with hanging + * nodes (locally refined grids). + * + * Otherwise, the same holds as + * for the other @p extrapolate + * function. + */ template - void extrapolate (const DoFHandler& dof1, - const InVector& z1, - const DoFHandler& dof2, - const ConstraintMatrix& constraints, - OutVector& z2); - //@} - /** - * The numbering of the degrees - * of freedom in continuous finite - * elements is hierarchic, - * i.e. in such a way that we - * first number the vertex dofs, - * in the order of the vertices - * as defined by the - * triangulation, then the line - * dofs in the order and - * respecting the direction of - * the lines, then the dofs on - * quads, etc. However, we could - * have, as well, numbered them - * in a lexicographic way, - * i.e. with indices first - * running in x-direction, then - * in y-direction and finally in - * z-direction. Discontinuous - * elements of class FE_DGQ() - * are numbered in this way, for - * example. - * - * This function constructs a - * table which lexicographic - * index each degree of freedom - * in the hierarchic numbering - * would have. It operates on the - * continuous finite element - * given as first argument, and - * outputs the lexicographic - * indices in the second. - * - * Note that since this function - * uses specifics of the - * continuous finite elements, it - * can only operate on - * FiniteElementData objects - * inherent in FE_Q(). However, - * this function does not take a - * FE_Q object as it is also - * invoked by the FE_Q() - * constructor. - * - * It is assumed that the size of - * the output argument already - * matches the correct size, - * which is equal to the number - * of degrees of freedom in the - * finite element. - */ - void extrapolate (const DoFHandler &dof1, ++ void extrapolate (const DoFHandler &dof1, + const InVector &z1, - const DoFHandler &dof2, ++ const DoFHandler &dof2, + const ConstraintMatrix &constraints, + OutVector &z2); + //@} + /** + * The numbering of the degrees + * of freedom in continuous finite + * elements is hierarchic, + * i.e. in such a way that we + * first number the vertex dofs, + * in the order of the vertices + * as defined by the + * triangulation, then the line + * dofs in the order and + * respecting the direction of + * the lines, then the dofs on + * quads, etc. However, we could + * have, as well, numbered them + * in a lexicographic way, + * i.e. with indices first + * running in x-direction, then + * in y-direction and finally in + * z-direction. Discontinuous + * elements of class FE_DGQ() + * are numbered in this way, for + * example. + * + * This function constructs a + * table which lexicographic + * index each degree of freedom + * in the hierarchic numbering + * would have. It operates on the + * continuous finite element + * given as first argument, and + * outputs the lexicographic + * indices in the second. + * + * Note that since this function + * uses specifics of the + * continuous finite elements, it + * can only operate on + * FiniteElementData objects + * inherent in FE_Q(). However, + * this function does not take a + * FE_Q object as it is also + * invoked by the FE_Q() + * constructor. + * + * It is assumed that the size of + * the output argument already + * matches the correct size, + * which is equal to the number + * of degrees of freedom in the + * finite element. + */ template void hierarchic_to_lexicographic_numbering (const FiniteElementData &fe_data, diff --cc deal.II/include/deal.II/fe/mapping_q1_eulerian.h index 4064ba3b17,661abf6d2b..f07ab09d42 --- a/deal.II/include/deal.II/fe/mapping_q1_eulerian.h +++ b/deal.II/include/deal.II/fe/mapping_q1_eulerian.h @@@ -86,100 -86,100 +86,100 @@@ DEAL_II_NAMESPACE_OPE template , int spacedim=dim > class MappingQ1Eulerian : public MappingQ1 { - public: - - /** - * Constructor. It takes a - * Vector & as its - * first argument to specify the - * transformation of the whole - * problem from the reference to - * the current configuration. - * The organization of the - * elements in the @p Vector - * must follow the concept how - * deal.II stores solutions that - * are associated to a - * triangulation. This is - * automatically the case if the - * @p Vector represents the - * solution of the previous step - * of a nonlinear problem. - * Alternatively, the @p Vector - * can be initialized by - * DoFAccessor::set_dof_values(). - */ - MappingQ1Eulerian (const VECTOR &euler_transform_vectors, - const DoFHandler &shiftmap_dof_handler); - - /** - * Return a pointer to a copy of the - * present object. The caller of this - * copy then assumes ownership of it. - */ - virtual - Mapping * clone () const; - - /** - * Always returns @p false because - * MappingQ1Eulerian does not in general - * preserve vertex locations (unless the - * translation vector happens to provide - * for zero displacements at vertex - * locations). - */ - bool preserves_vertex_locations () const; - - /** - * Exception. - */ - DeclException0 (ExcInactiveCell); - - - - protected: - /** - * Implementation of the interface in - * MappingQ1. Overrides the function in - * the base class, since we cannot use - * any cell similarity for this class. - */ - virtual void - fill_fe_values (const typename Triangulation::cell_iterator &cell, - const Quadrature &quadrature, - typename Mapping::InternalDataBase &mapping_data, - typename std::vector > &quadrature_points, - std::vector &JxW_values, - std::vector > &jacobians, - std::vector > &jacobian_grads, - std::vector > &inverse_jacobians, - std::vector > &cell_normal_vectors, - CellSimilarity::Similarity &cell_similarity) const; - - /** - * Reference to the vector of - * shifts. - */ - SmartPointer > euler_transform_vectors; - - /** - * Pointer to the DoFHandler to - * which the mapping vector is - * associated. - */ - SmartPointer,MappingQ1Eulerian > shiftmap_dof_handler; - - - private: - /** - * Computes the support points of - * the mapping. For - * @p MappingQ1Eulerian these - * are the vertices. - */ - virtual void compute_mapping_support_points( - const typename Triangulation::cell_iterator &cell, - std::vector > &a) const; + public: + + /** + * Constructor. It takes a + * Vector & as its + * first argument to specify the + * transformation of the whole + * problem from the reference to + * the current configuration. + * The organization of the + * elements in the @p Vector + * must follow the concept how + * deal.II stores solutions that + * are associated to a + * triangulation. This is + * automatically the case if the + * @p Vector represents the + * solution of the previous step + * of a nonlinear problem. + * Alternatively, the @p Vector + * can be initialized by + * DoFAccessor::set_dof_values(). + */ - MappingQ1Eulerian (const VECTOR &euler_transform_vectors, ++ MappingQ1Eulerian (const VECTOR &euler_transform_vectors, + const DoFHandler &shiftmap_dof_handler); + + /** + * Return a pointer to a copy of the + * present object. The caller of this + * copy then assumes ownership of it. + */ + virtual + Mapping *clone () const; + + /** + * Always returns @p false because + * MappingQ1Eulerian does not in general + * preserve vertex locations (unless the + * translation vector happens to provide + * for zero displacements at vertex + * locations). + */ + bool preserves_vertex_locations () const; + + /** + * Exception. + */ + DeclException0 (ExcInactiveCell); + + + + protected: + /** + * Implementation of the interface in + * MappingQ1. Overrides the function in + * the base class, since we cannot use + * any cell similarity for this class. + */ + virtual void + fill_fe_values (const typename Triangulation::cell_iterator &cell, + const Quadrature &quadrature, + typename Mapping::InternalDataBase &mapping_data, + typename std::vector > &quadrature_points, + std::vector &JxW_values, + std::vector > &jacobians, + std::vector > &jacobian_grads, + std::vector > &inverse_jacobians, + std::vector > &cell_normal_vectors, + CellSimilarity::Similarity &cell_similarity) const; + + /** + * Reference to the vector of + * shifts. + */ + SmartPointer > euler_transform_vectors; + + /** + * Pointer to the DoFHandler to + * which the mapping vector is + * associated. + */ + SmartPointer,MappingQ1Eulerian > shiftmap_dof_handler; + + + private: + /** + * Computes the support points of + * the mapping. For + * @p MappingQ1Eulerian these + * are the vertices. + */ + virtual void compute_mapping_support_points( + const typename Triangulation::cell_iterator &cell, + std::vector > &a) const; }; diff --cc deal.II/include/deal.II/fe/mapping_q_eulerian.h index 8f6a7b2f6d,df7b094683..54177ab37b --- a/deal.II/include/deal.II/fe/mapping_q_eulerian.h +++ b/deal.II/include/deal.II/fe/mapping_q_eulerian.h @@@ -88,148 -88,148 +88,148 @@@ DEAL_II_NAMESPACE_OPE template , int spacedim=dim > class MappingQEulerian : public MappingQ { + public: + /** + * Constructor. The first argument is + * the polynomical degree of the desired + * Qp mapping. It then takes a + * Vector & to specify the + * transformation of the domain + * from the reference to + * the current configuration. + * The organization of the + * elements in the @p Vector + * must follow the concept how + * deal.II stores solutions that + * are associated to a + * triangulation. This is + * automatically the case if the + * @p Vector represents the + * solution of the previous step + * of a nonlinear problem. + * Alternatively, the @p Vector + * can be initialized by + * DoFAccessor::set_dof_values(). + */ + + MappingQEulerian (const unsigned int degree, - const VECTOR &euler_vector, - const DoFHandler &euler_dof_handler); ++ const VECTOR &euler_vector, ++ const DoFHandler &euler_dof_handler); + + /** + * Return a pointer to a copy of the + * present object. The caller of this + * copy then assumes ownership of it. + */ + virtual + Mapping *clone () const; + + /** + * Always returns @p false because + * MappingQ1Eulerian does not in general + * preserve vertex locations (unless the + * translation vector happens to provide + * for zero displacements at vertex + * locations). + */ + bool preserves_vertex_locations () const; + + /** + * Exception + */ + DeclException0 (ExcInactiveCell); + + protected: + /** + * Implementation of the interface in + * MappingQ. Overrides the function in + * the base class, since we cannot use + * any cell similarity for this class. + */ + virtual void + fill_fe_values (const typename Triangulation::cell_iterator &cell, + const Quadrature &quadrature, + typename Mapping::InternalDataBase &mapping_data, + typename std::vector > &quadrature_points, + std::vector &JxW_values, + std::vector > &jacobians, + std::vector > &jacobian_grads, + std::vector > &inverse_jacobians, + std::vector > &cell_normal_vectors, + CellSimilarity::Similarity &cell_similarity) const; + + /** + * Reference to the vector of + * shifts. + */ + + SmartPointer > euler_vector; + + /** + * Pointer to the DoFHandler to + * which the mapping vector is + * associated. + */ + + SmartPointer,MappingQEulerian > euler_dof_handler; + + + private: + + /** + * Special quadrature rule used + * to define the support points + * in the reference configuration. + */ + + class SupportQuadrature : public Quadrature + { public: - /** - * Constructor. The first argument is - * the polynomical degree of the desired - * Qp mapping. It then takes a - * Vector & to specify the - * transformation of the domain - * from the reference to - * the current configuration. - * The organization of the - * elements in the @p Vector - * must follow the concept how - * deal.II stores solutions that - * are associated to a - * triangulation. This is - * automatically the case if the - * @p Vector represents the - * solution of the previous step - * of a nonlinear problem. - * Alternatively, the @p Vector - * can be initialized by - * DoFAccessor::set_dof_values(). - */ - - MappingQEulerian (const unsigned int degree, - const VECTOR &euler_vector, - const DoFHandler &euler_dof_handler); - - /** - * Return a pointer to a copy of the - * present object. The caller of this - * copy then assumes ownership of it. - */ - virtual - Mapping * clone () const; - - /** - * Always returns @p false because - * MappingQ1Eulerian does not in general - * preserve vertex locations (unless the - * translation vector happens to provide - * for zero displacements at vertex - * locations). - */ - bool preserves_vertex_locations () const; - - /** - * Exception - */ - DeclException0 (ExcInactiveCell); - - protected: - /** - * Implementation of the interface in - * MappingQ. Overrides the function in - * the base class, since we cannot use - * any cell similarity for this class. - */ - virtual void - fill_fe_values (const typename Triangulation::cell_iterator &cell, - const Quadrature &quadrature, - typename Mapping::InternalDataBase &mapping_data, - typename std::vector > &quadrature_points, - std::vector &JxW_values, - std::vector > &jacobians, - std::vector > &jacobian_grads, - std::vector > &inverse_jacobians, - std::vector > &cell_normal_vectors, - CellSimilarity::Similarity &cell_similarity) const; - - /** - * Reference to the vector of - * shifts. - */ - - SmartPointer > euler_vector; - - /** - * Pointer to the DoFHandler to - * which the mapping vector is - * associated. - */ - - SmartPointer,MappingQEulerian > euler_dof_handler; - - - private: - - /** - * Special quadrature rule used - * to define the support points - * in the reference configuration. - */ - - class SupportQuadrature : public Quadrature - { - public: - /** - * Constructor, with an argument - * defining the desired polynomial - * degree. - */ - - SupportQuadrature (const unsigned int map_degree); - - }; - - /** - * A member variable holding the - * quadrature points in the right - * order. - */ - const SupportQuadrature support_quadrature; - - /** - * FEValues object used to query the - * the given finite element field - * at the support points in the - * reference configuration. - * - * The variable is marked as - * mutable since we have to call - * FEValues::reinit from - * compute_mapping_support_points, - * a function that is 'const'. - */ - mutable FEValues fe_values; - - /** - * A variable to guard access to - * the fe_values variable. - */ - mutable Threads::ThreadMutex fe_values_mutex; - - /** - * Compute the positions of the - * support points in the current - * configuration - */ - virtual void compute_mapping_support_points( - const typename Triangulation::cell_iterator &cell, - std::vector > &a) const; + /** + * Constructor, with an argument + * defining the desired polynomial + * degree. + */ + + SupportQuadrature (const unsigned int map_degree); + + }; + + /** + * A member variable holding the + * quadrature points in the right + * order. + */ + const SupportQuadrature support_quadrature; + + /** + * FEValues object used to query the + * the given finite element field + * at the support points in the + * reference configuration. + * + * The variable is marked as + * mutable since we have to call + * FEValues::reinit from + * compute_mapping_support_points, + * a function that is 'const'. + */ + mutable FEValues fe_values; + + /** + * A variable to guard access to + * the fe_values variable. + */ + mutable Threads::ThreadMutex fe_values_mutex; + + /** + * Compute the positions of the + * support points in the current + * configuration + */ + virtual void compute_mapping_support_points( + const typename Triangulation::cell_iterator &cell, + std::vector > &a) const; }; diff --cc deal.II/include/deal.II/grid/grid_generator.h index 044daec674,d9a15f9b5f..2cea902ac0 --- a/deal.II/include/deal.II/grid/grid_generator.h +++ b/deal.II/include/deal.II/grid/grid_generator.h @@@ -50,982 -50,982 +50,982 @@@ template class Sparse */ class GridGenerator { - public: - /** - * Initialize the given triangulation - * with a hypercube (line in 1D, square - * in 2D, etc) consisting of exactly one - * cell. The hypercube volume is the - * tensor product interval - * [left,right]dim in - * the present number of dimensions, - * where the limits are given as - * arguments. They default to zero and - * unity, then producing the unit - * hypercube. All boundary indicators are - * set to zero ("not colorized") for 2d - * and 3d. In 1d the indicators are - * colorized, see hyper_rectangle(). - * - * @image html hyper_cubes.png - * - * See also - * subdivided_hyper_cube() for a - * coarse mesh consisting of - * several cells. See - * hyper_rectangle(), if - * different lengths in different - * ordinate directions are - * required. - * - * @note The triangulation needs to be - * void upon calling this - * function. - */ - template - static void hyper_cube (Triangulation &tria, - const double left = 0., - const double right= 1.); + public: + /** + * Initialize the given triangulation + * with a hypercube (line in 1D, square + * in 2D, etc) consisting of exactly one + * cell. The hypercube volume is the + * tensor product interval + * [left,right]dim in + * the present number of dimensions, + * where the limits are given as + * arguments. They default to zero and + * unity, then producing the unit + * hypercube. All boundary indicators are + * set to zero ("not colorized") for 2d + * and 3d. In 1d the indicators are + * colorized, see hyper_rectangle(). + * + * @image html hyper_cubes.png + * + * See also + * subdivided_hyper_cube() for a + * coarse mesh consisting of + * several cells. See + * hyper_rectangle(), if + * different lengths in different + * ordinate directions are + * required. + * + * @note The triangulation needs to be + * void upon calling this + * function. + */ + template - static void hyper_cube (Triangulation &tria, ++ static void hyper_cube (Triangulation &tria, + const double left = 0., + const double right= 1.); - /** - * Same as hyper_cube(), but - * with the difference that not - * only one cell is created but - * each coordinate direction is - * subdivided into - * @p repetitions cells. Thus, - * the number of cells filling - * the given volume is - * repetitionsdim. - * - * If spacedim=dim+1 the same - * mesh as in the case - * spacedim=dim is created, but - * the vertices have an - * additional coordinate =0. So, - * if dim=1 one obtains line - * along the x axis in the xy - * plane, and if dim=3 one - * obtains a square in lying in - * the xy plane in 3d space. - * - * @note The triangulation needs - * to be void upon calling this - * function. - */ - template - static void subdivided_hyper_cube (Triangulation &tria, - const unsigned int repetitions, - const double left = 0., - const double right= 1.); + /** + * Same as hyper_cube(), but + * with the difference that not + * only one cell is created but + * each coordinate direction is + * subdivided into + * @p repetitions cells. Thus, + * the number of cells filling + * the given volume is + * repetitionsdim. + * + * If spacedim=dim+1 the same + * mesh as in the case + * spacedim=dim is created, but + * the vertices have an + * additional coordinate =0. So, + * if dim=1 one obtains line + * along the x axis in the xy + * plane, and if dim=3 one + * obtains a square in lying in + * the xy plane in 3d space. + * + * @note The triangulation needs + * to be void upon calling this + * function. + */ + template - static void subdivided_hyper_cube (Triangulation &tria, ++ static void subdivided_hyper_cube (Triangulation &tria, + const unsigned int repetitions, + const double left = 0., + const double right= 1.); - /** - * Create a coordinate-parallel - * brick from the two - * diagonally opposite corner - * points @p p1 and @p p2. - * - * If the @p colorize flag is - * set, the - * @p boundary_indicators of the - * surfaces are assigned, such - * that the lower one in - * @p x-direction is 0, the - * upper one is 1. The indicators - * for the surfaces in - * @p y-direction are 2 and 3, - * the ones for @p z are 4 and - * 5. Additionally, material ids - * are assigned to the cells - * according to the octant their - * center is in: being in the right half - * plane for any coordinate - * direction xi - * adds 2i. For - * instance, the center point - * (1,-1,1) yields a material id 5. - * - * @note The triangulation needs to be - * void upon calling this - * function. - */ - template - static void hyper_rectangle (Triangulation &tria, - const Point &p1, - const Point &p2, - const bool colorize = false); + /** + * Create a coordinate-parallel + * brick from the two + * diagonally opposite corner + * points @p p1 and @p p2. + * + * If the @p colorize flag is + * set, the + * @p boundary_indicators of the + * surfaces are assigned, such + * that the lower one in + * @p x-direction is 0, the + * upper one is 1. The indicators + * for the surfaces in + * @p y-direction are 2 and 3, + * the ones for @p z are 4 and + * 5. Additionally, material ids + * are assigned to the cells + * according to the octant their + * center is in: being in the right half + * plane for any coordinate + * direction xi + * adds 2i. For + * instance, the center point + * (1,-1,1) yields a material id 5. + * + * @note The triangulation needs to be + * void upon calling this + * function. + */ + template + static void hyper_rectangle (Triangulation &tria, + const Point &p1, + const Point &p2, + const bool colorize = false); - /** - * Create a coordinate-parallel - * parallelepiped from the two - * diagonally opposite corner - * points @p p1 and @p p2. In - * dimension @p i, - * repetitions[i] cells are - * generated. - * - * To get cells with an aspect - * ratio different from that of - * the domain, use different - * numbers of subdivisions in - * different coordinate - * directions. The minimum number - * of subdivisions in each - * direction is - * 1. @p repetitions is a list - * of integers denoting the - * number of subdivisions in each - * coordinate direction. - * - * If the @p colorize flag is - * set, the - * @p boundary_indicators of the - * surfaces are assigned, such - * that the lower one in - * @p x-direction is 0, the - * upper one is 1. The indicators - * for the surfaces in - * @p y-direction are 2 and 3, - * the ones for @p z are 4 and - * 5. Additionally, material ids - * are assigned to the cells - * according to the octant their - * center is in: being in the right half - * plane for any coordinate - * direction xi - * adds 2i. For - * instance, the center point - * (1,-1,1) yields a material id 5. - * - * Note that the @p colorize flag is - * ignored in 1d and is assumed to always - * be true. That means the boundary - * indicator is 0 on the left and 1 on - * the right. See step-15 for details. - * - * @note The triangulation needs to be - * void upon calling this - * function. - * - * @note For an example of the - * use of this function see the - * step-28 - * tutorial program. - */ - template - static - void - subdivided_hyper_rectangle (Triangulation &tria, - const std::vector &repetitions, - const Point &p1, - const Point &p2, - const bool colorize=false); + /** + * Create a coordinate-parallel + * parallelepiped from the two + * diagonally opposite corner + * points @p p1 and @p p2. In + * dimension @p i, + * repetitions[i] cells are + * generated. + * + * To get cells with an aspect + * ratio different from that of + * the domain, use different + * numbers of subdivisions in + * different coordinate + * directions. The minimum number + * of subdivisions in each + * direction is + * 1. @p repetitions is a list + * of integers denoting the + * number of subdivisions in each + * coordinate direction. + * + * If the @p colorize flag is + * set, the + * @p boundary_indicators of the + * surfaces are assigned, such + * that the lower one in + * @p x-direction is 0, the + * upper one is 1. The indicators + * for the surfaces in + * @p y-direction are 2 and 3, + * the ones for @p z are 4 and + * 5. Additionally, material ids + * are assigned to the cells + * according to the octant their + * center is in: being in the right half + * plane for any coordinate + * direction xi + * adds 2i. For + * instance, the center point + * (1,-1,1) yields a material id 5. + * + * Note that the @p colorize flag is + * ignored in 1d and is assumed to always + * be true. That means the boundary + * indicator is 0 on the left and 1 on + * the right. See step-15 for details. + * + * @note The triangulation needs to be + * void upon calling this + * function. + * + * @note For an example of the + * use of this function see the + * step-28 + * tutorial program. + */ + template + static + void + subdivided_hyper_rectangle (Triangulation &tria, + const std::vector &repetitions, + const Point &p1, + const Point &p2, + const bool colorize=false); - /** - * Like the previous - * function. However, here the - * second argument does not - * denote the number of - * subdivisions in each - * coordinate direction, but a - * sequence of step sizes for - * each coordinate direction. The - * domain will therefore be - * subdivided into - * step_sizes[i].size() - * cells in coordinate direction - * i, with widths - * step_sizes[i][j] - * for the jth cell. - * - * This function is therefore the - * right one to generate graded - * meshes where cells are - * concentrated in certain areas, - * rather than a uniformly - * subdivided mesh as the - * previous function generates. - * - * The step sizes have to add up - * to the dimensions of the hyper - * rectangle specified by the - * points @p p1 and @p p2. - */ - template - static - void - subdivided_hyper_rectangle(Triangulation &tria, - const std::vector > &step_sizes, - const Point &p_1, - const Point &p_2, - const bool colorize); + /** + * Like the previous + * function. However, here the + * second argument does not + * denote the number of + * subdivisions in each + * coordinate direction, but a + * sequence of step sizes for + * each coordinate direction. The + * domain will therefore be + * subdivided into + * step_sizes[i].size() + * cells in coordinate direction + * i, with widths + * step_sizes[i][j] + * for the jth cell. + * + * This function is therefore the + * right one to generate graded + * meshes where cells are + * concentrated in certain areas, + * rather than a uniformly + * subdivided mesh as the + * previous function generates. + * + * The step sizes have to add up + * to the dimensions of the hyper + * rectangle specified by the + * points @p p1 and @p p2. + */ + template + static + void + subdivided_hyper_rectangle(Triangulation &tria, + const std::vector > &step_sizes, + const Point &p_1, + const Point &p_2, + const bool colorize); - /** - * Like the previous function, but with - * the following twist: the @p - * material_id argument is a - * dim-dimensional array that, for each - * cell, indicates which material_id - * should be set. In addition, and this - * is the major new functionality, if the - * material_id of a cell is (unsigned - * char)(-1), then that cell is - * deleted from the triangulation, - * i.e. the domain will have a void - * there. - */ - template - static - void - subdivided_hyper_rectangle (Triangulation &tria, - const std::vector< std::vector > &spacing, - const Point &p, - const Table &material_id, - const bool colorize=false); + /** + * Like the previous function, but with + * the following twist: the @p + * material_id argument is a + * dim-dimensional array that, for each + * cell, indicates which material_id + * should be set. In addition, and this + * is the major new functionality, if the + * material_id of a cell is (unsigned + * char)(-1), then that cell is + * deleted from the triangulation, + * i.e. the domain will have a void + * there. + */ + template + static + void + subdivided_hyper_rectangle (Triangulation &tria, + const std::vector< std::vector > &spacing, + const Point &p, + const Table &material_id, + const bool colorize=false); - /** - * A parallelogram. The first - * corner point is the - * origin. The dim - * adjacent points are the - * one-dimensional subtensors of - * the tensor provided and - * additional points will be sums - * of these two vectors. - * Colorizing is done according - * to hyper_rectangle(). - * - * @note This function is - * implemented in 2d only. - * - * @note The triangulation needs to be - * void upon calling this - * function. - */ - template - static void - parallelogram(Triangulation& tria, - const Tensor<2,dim>& corners, - const bool colorize=false); + /** + * A parallelogram. The first + * corner point is the + * origin. The dim + * adjacent points are the + * one-dimensional subtensors of + * the tensor provided and + * additional points will be sums + * of these two vectors. + * Colorizing is done according + * to hyper_rectangle(). + * + * @note This function is + * implemented in 2d only. + * + * @note The triangulation needs to be + * void upon calling this + * function. + */ + template + static void - parallelogram(Triangulation &tria, ++ parallelogram(Triangulation &tria, + const Tensor<2,dim> &corners, + const bool colorize=false); - /** - * Hypercube with a layer of - * hypercubes around it. The - * first two parameters give the - * lower and upper bound of the - * inner hypercube in all - * coordinate directions. - * @p thickness marks the size of - * the layer cells. - * - * If the flag colorize is set, - * the outer cells get material - * id's according to the - * following scheme: extending - * over the inner cube in - * (+/-) x-direction: 1/2. In y-direction - * 4/8, in z-direction 16/32. The cells - * at corners and edges (3d) get - * these values bitwise or'd. - * - * Presently only available in 2d - * and 3d. - * - * @note The triangulation needs to be - * void upon calling this - * function. - */ - template - static void enclosed_hyper_cube (Triangulation &tria, - const double left = 0., - const double right= 1., - const double thickness = 1., - const bool colorize = false); + /** + * Hypercube with a layer of + * hypercubes around it. The + * first two parameters give the + * lower and upper bound of the + * inner hypercube in all + * coordinate directions. + * @p thickness marks the size of + * the layer cells. + * + * If the flag colorize is set, + * the outer cells get material + * id's according to the + * following scheme: extending + * over the inner cube in + * (+/-) x-direction: 1/2. In y-direction + * 4/8, in z-direction 16/32. The cells + * at corners and edges (3d) get + * these values bitwise or'd. + * + * Presently only available in 2d + * and 3d. + * + * @note The triangulation needs to be + * void upon calling this + * function. + */ + template + static void enclosed_hyper_cube (Triangulation &tria, + const double left = 0., + const double right= 1., + const double thickness = 1., + const bool colorize = false); - /** - * Initialize the given - * triangulation with a - * hyperball, i.e. a circle or a - * ball around center - * with given radius. - * - * In order to avoid degenerate - * cells at the boundaries, the - * circle is triangulated by five - * cells, the ball by seven - * cells. The diameter of the - * center cell is chosen so that - * the aspect ratio of the - * boundary cells after one - * refinement is optimized. - * - * This function is declared to - * exist for triangulations of - * all space dimensions, but - * throws an error if called in - * 1d. - * - * @note The triangulation needs to be - * void upon calling this - * function. - */ - template - static void hyper_ball (Triangulation &tria, - const Point ¢er = Point(), - const double radius = 1.); + /** + * Initialize the given + * triangulation with a + * hyperball, i.e. a circle or a + * ball around center + * with given radius. + * + * In order to avoid degenerate + * cells at the boundaries, the + * circle is triangulated by five + * cells, the ball by seven + * cells. The diameter of the + * center cell is chosen so that + * the aspect ratio of the + * boundary cells after one + * refinement is optimized. + * + * This function is declared to + * exist for triangulations of + * all space dimensions, but + * throws an error if called in + * 1d. + * + * @note The triangulation needs to be + * void upon calling this + * function. + */ + template + static void hyper_ball (Triangulation &tria, + const Point ¢er = Point(), + const double radius = 1.); - /** - * This class produces a half - * hyper-ball around - * center, which - * contains four elements in 2d - * and 6 in 3d. The cut plane is - * perpendicular to the - * x-axis. - * - * The boundary indicators for the final - * triangulation are 0 for the curved boundary and - * 1 for the cut plane. - * - * The appropriate - * boundary class is - * HalfHyperBallBoundary, or HyperBallBoundary. - * - * @note The triangulation needs to be - * void upon calling this - * function. - */ - template - static void half_hyper_ball (Triangulation &tria, - const Point ¢er = Point(), - const double radius = 1.); + /** + * This class produces a half + * hyper-ball around + * center, which + * contains four elements in 2d + * and 6 in 3d. The cut plane is + * perpendicular to the + * x-axis. + * + * The boundary indicators for the final + * triangulation are 0 for the curved boundary and + * 1 for the cut plane. + * + * The appropriate + * boundary class is + * HalfHyperBallBoundary, or HyperBallBoundary. + * + * @note The triangulation needs to be + * void upon calling this + * function. + */ + template + static void half_hyper_ball (Triangulation &tria, + const Point ¢er = Point(), + const double radius = 1.); - /** - * Create a cylinder around the - * x-axis. The cylinder extends - * from x=-half_length to - * x=+half_length and its - * projection into the - * @p yz-plane is a circle of - * radius @p radius. - * - * In two dimensions, the - * cylinder is a rectangle from - * x=-half_length to - * x=+half_length and - * from y=-radius to - * y=radius. - * - * The boundaries are colored - * according to the following - * scheme: 0 for the hull of the - * cylinder, 1 for the left hand - * face and 2 for the right hand - * face. - * - * @note The triangulation needs to be - * void upon calling this - * function. - */ - template - static void cylinder (Triangulation &tria, - const double radius = 1., - const double half_length = 1.); + /** + * Create a cylinder around the + * x-axis. The cylinder extends + * from x=-half_length to + * x=+half_length and its + * projection into the + * @p yz-plane is a circle of + * radius @p radius. + * + * In two dimensions, the + * cylinder is a rectangle from + * x=-half_length to + * x=+half_length and + * from y=-radius to + * y=radius. + * + * The boundaries are colored + * according to the following + * scheme: 0 for the hull of the + * cylinder, 1 for the left hand + * face and 2 for the right hand + * face. + * + * @note The triangulation needs to be + * void upon calling this + * function. + */ + template + static void cylinder (Triangulation &tria, + const double radius = 1., + const double half_length = 1.); - /** - * Create a cutted cone around - * the x-axis. The cone extends - * from x=-half_length - * to x=half_length and - * its projection into the @p - * yz-plane is a circle of radius - * @p radius_0 at - * x=-half_length and a - * circle of radius @p radius_1 - * at x=+half_length. - * In between the radius is - * linearly decreasing. - * - * In two dimensions, the cone is - * a trapezoid from - * x=-half_length to - * x=+half_length and - * from y=-radius_0 to - * y=radius_0 at - * x=-half_length and - * from y=-radius_1 to - * y=radius_1 at - * x=+half_length. In - * between the range of - * y is linearly - * decreasing. - * - * The boundaries are colored - * according to the following - * scheme: 0 for the hull of the - * cone, 1 for the left hand - * face and 2 for the right hand - * face. - * - * An example of use can be found in the - * documentation of the ConeBoundary - * class, with which you probably want to - * associate boundary indicator 0 (the - * hull of the cone). - * - * @note The triangulation needs to be - * void upon calling this - * function. - * - * @author Markus Bürg, 2009 - */ - template - static void - truncated_cone (Triangulation &tria, - const double radius_0 = 1.0, - const double radius_1 = 0.5, - const double half_length = 1.0); + /** + * Create a cutted cone around + * the x-axis. The cone extends + * from x=-half_length + * to x=half_length and + * its projection into the @p + * yz-plane is a circle of radius + * @p radius_0 at + * x=-half_length and a + * circle of radius @p radius_1 + * at x=+half_length. + * In between the radius is + * linearly decreasing. + * + * In two dimensions, the cone is + * a trapezoid from + * x=-half_length to + * x=+half_length and + * from y=-radius_0 to + * y=radius_0 at + * x=-half_length and + * from y=-radius_1 to + * y=radius_1 at + * x=+half_length. In + * between the range of + * y is linearly + * decreasing. + * + * The boundaries are colored + * according to the following + * scheme: 0 for the hull of the + * cone, 1 for the left hand + * face and 2 for the right hand + * face. + * + * An example of use can be found in the + * documentation of the ConeBoundary + * class, with which you probably want to + * associate boundary indicator 0 (the + * hull of the cone). + * + * @note The triangulation needs to be + * void upon calling this + * function. + * + * @author Markus Bürg, 2009 + */ + template + static void + truncated_cone (Triangulation &tria, + const double radius_0 = 1.0, + const double radius_1 = 0.5, + const double half_length = 1.0); - /** - * Initialize the given - * triangulation with a hyper-L - * consisting of exactly - * 2^dim-1 cells. It - * produces the hypercube with - * the interval [left,right] without - * the hypercube made out of the - * interval [(a+b)/2,b]. - * - * @image html hyper_l.png - * - * The triangulation needs to be - * void upon calling this - * function. - * - * This function is declared to - * exist for triangulations of - * all space dimensions, but - * throws an error if called in - * 1d. - * - * @note The triangulation needs to be - * void upon calling this - * function. - */ - template - static void hyper_L (Triangulation &tria, - const double left = -1., - const double right= 1.); + /** + * Initialize the given + * triangulation with a hyper-L + * consisting of exactly + * 2^dim-1 cells. It + * produces the hypercube with + * the interval [left,right] without + * the hypercube made out of the + * interval [(a+b)/2,b]. + * + * @image html hyper_l.png + * + * The triangulation needs to be + * void upon calling this + * function. + * + * This function is declared to + * exist for triangulations of + * all space dimensions, but + * throws an error if called in + * 1d. + * + * @note The triangulation needs to be + * void upon calling this + * function. + */ + template + static void hyper_L (Triangulation &tria, + const double left = -1., + const double right= 1.); - /** - * Initialize the given - * Triangulation with a hypercube - * with a slit. In each - * coordinate direction, the - * hypercube extends from @p left - * to @p right. - * - * In 2d, the split goes in - * vertical direction from - * x=(left+right)/2, - * y=left to the center of - * the square at - * x=y=(left+right)/2. - * - * In 3d, the 2d domain is just - * extended in the - * z-direction, such that - * a plane cuts the lower half of - * a rectangle in two. + /** + * Initialize the given + * Triangulation with a hypercube + * with a slit. In each + * coordinate direction, the + * hypercube extends from @p left + * to @p right. + * + * In 2d, the split goes in + * vertical direction from + * x=(left+right)/2, + * y=left to the center of + * the square at + * x=y=(left+right)/2. + * + * In 3d, the 2d domain is just + * extended in the + * z-direction, such that + * a plane cuts the lower half of + * a rectangle in two. - * This function is declared to - * exist for triangulations of - * all space dimensions, but - * throws an error if called in - * 1d. - * - * @note The triangulation needs to be - * void upon calling this - * function. - */ - template - static void hyper_cube_slit (Triangulation &tria, - const double left = 0., - const double right= 1., - const bool colorize = false); + * This function is declared to + * exist for triangulations of + * all space dimensions, but + * throws an error if called in + * 1d. + * + * @note The triangulation needs to be + * void upon calling this + * function. + */ + template + static void hyper_cube_slit (Triangulation &tria, + const double left = 0., + const double right= 1., + const bool colorize = false); - /** - * Produce a hyper-shell, - * the region between two - * spheres around center, - * with given - * inner_radius and - * outer_radius. The number - * n_cells indicates the - * number of cells of the resulting - * triangulation, i.e., how many cells - * form the ring (in 2d) or the shell - * (in 3d). - * - * If the flag @p colorize is @p true, - * then the outer boundary will have the - * indicator 1, while the inner boundary - * has id zero. If the flag is @p false, - * both have indicator zero. - * - * In 2D, the number - * n_cells of elements - * for this initial triangulation - * can be chosen arbitrarily. If - * the number of initial cells is - * zero (as is the default), then - * it is computed adaptively such - * that the resulting elements - * have the least aspect ratio. - * - * In 3D, only two different numbers are - * meaningful, 6 for a surface based on a - * hexahedron (i.e. 6 panels on the inner - * sphere extruded in radial direction to - * form 6 cells) and 12 for the rhombic - * dodecahedron. These give rise to the - * following meshes upon one refinement: - * - * @image html hypershell3d-6.png - * @image html hypershell3d-12.png - * - * Neither of these meshes is - * particularly good since one ends up - * with poorly shaped cells at the inner - * edge upon refinement. For example, - * this is the middle plane of the mesh - * for the n_cells=6: - * - * @image html hyper_shell_6_cross_plane.png - * - * The mesh generated with - * n_cells=6 is better but - * still not good. As a consequence, you - * may also specify - * n_cells=96 as a third - * option. The mesh generated in this way - * is based on a once refined version of - * the one with n_cells=12, - * where all internal nodes are re-placed - * along a shell somewhere between the - * inner and outer boundary of the - * domain. The following two images - * compare half of the hyper shell for - * n_cells=12 and - * n_cells=96 (note that the - * doubled radial lines on the cross - * section are artifacts of the - * visualization): - * - * @image html hyper_shell_12_cut.png - * @image html hyper_shell_96_cut.png - * - * @note This function is declared to - * exist for triangulations of - * all space dimensions, but - * throws an error if called in - * 1d. - * - * @note The triangulation needs to be - * void upon calling this - * function. - */ - template - static void hyper_shell (Triangulation &tria, - const Point ¢er, - const double inner_radius, - const double outer_radius, - const unsigned int n_cells = 0, - bool colorize = false); + /** + * Produce a hyper-shell, + * the region between two + * spheres around center, + * with given + * inner_radius and + * outer_radius. The number + * n_cells indicates the + * number of cells of the resulting + * triangulation, i.e., how many cells + * form the ring (in 2d) or the shell + * (in 3d). + * + * If the flag @p colorize is @p true, + * then the outer boundary will have the + * indicator 1, while the inner boundary + * has id zero. If the flag is @p false, + * both have indicator zero. + * + * In 2D, the number + * n_cells of elements + * for this initial triangulation + * can be chosen arbitrarily. If + * the number of initial cells is + * zero (as is the default), then + * it is computed adaptively such + * that the resulting elements + * have the least aspect ratio. + * + * In 3D, only two different numbers are + * meaningful, 6 for a surface based on a + * hexahedron (i.e. 6 panels on the inner + * sphere extruded in radial direction to + * form 6 cells) and 12 for the rhombic + * dodecahedron. These give rise to the + * following meshes upon one refinement: + * + * @image html hypershell3d-6.png + * @image html hypershell3d-12.png + * + * Neither of these meshes is + * particularly good since one ends up + * with poorly shaped cells at the inner + * edge upon refinement. For example, + * this is the middle plane of the mesh + * for the n_cells=6: + * + * @image html hyper_shell_6_cross_plane.png + * + * The mesh generated with + * n_cells=6 is better but + * still not good. As a consequence, you + * may also specify + * n_cells=96 as a third + * option. The mesh generated in this way + * is based on a once refined version of + * the one with n_cells=12, + * where all internal nodes are re-placed + * along a shell somewhere between the + * inner and outer boundary of the + * domain. The following two images + * compare half of the hyper shell for + * n_cells=12 and + * n_cells=96 (note that the + * doubled radial lines on the cross + * section are artifacts of the + * visualization): + * + * @image html hyper_shell_12_cut.png + * @image html hyper_shell_96_cut.png + * + * @note This function is declared to + * exist for triangulations of + * all space dimensions, but + * throws an error if called in + * 1d. + * + * @note The triangulation needs to be + * void upon calling this + * function. + */ + template + static void hyper_shell (Triangulation &tria, + const Point ¢er, + const double inner_radius, + const double outer_radius, + const unsigned int n_cells = 0, + bool colorize = false); - /** - * Produce a half hyper-shell, - * i.e. the space between two - * circles in two space - * dimensions and the region - * between two spheres in 3d, - * with given inner and outer - * radius and a given number of - * elements for this initial - * triangulation. However, - * opposed to the previous - * function, it does not produce - * a whole shell, but only one - * half of it, namely that part - * for which the first component - * is restricted to non-negative - * values. The purpose of this - * class is to enable - * computations for solutions - * which have rotational - * symmetry, in which case the - * half shell in 2d represents a - * shell in 3d. - * - * If the number of - * initial cells is zero (as is - * the default), then it is - * computed adaptively such that - * the resulting elements have - * the least aspect ratio. - * - * If colorize is set to true, the - * inner, outer, left, and right - * boundary get indicator 0, 1, 2, - * and 3, respectively. Otherwise - * all indicators are set to 0. - * - * @note The triangulation needs to be - * void upon calling this - * function. - */ - template - static void half_hyper_shell (Triangulation &tria, - const Point ¢er, - const double inner_radius, - const double outer_radius, - const unsigned int n_cells = 0, - const bool colorize = false); + /** + * Produce a half hyper-shell, + * i.e. the space between two + * circles in two space + * dimensions and the region + * between two spheres in 3d, + * with given inner and outer + * radius and a given number of + * elements for this initial + * triangulation. However, + * opposed to the previous + * function, it does not produce + * a whole shell, but only one + * half of it, namely that part + * for which the first component + * is restricted to non-negative + * values. The purpose of this + * class is to enable + * computations for solutions + * which have rotational + * symmetry, in which case the + * half shell in 2d represents a + * shell in 3d. + * + * If the number of + * initial cells is zero (as is + * the default), then it is + * computed adaptively such that + * the resulting elements have + * the least aspect ratio. + * + * If colorize is set to true, the + * inner, outer, left, and right + * boundary get indicator 0, 1, 2, + * and 3, respectively. Otherwise + * all indicators are set to 0. + * + * @note The triangulation needs to be + * void upon calling this + * function. + */ + template + static void half_hyper_shell (Triangulation &tria, + const Point ¢er, + const double inner_radius, + const double outer_radius, + const unsigned int n_cells = 0, + const bool colorize = false); - /** - * Produce a domain that is the - * intersection between a - * hyper-shell with given inner - * and outer radius, i.e. the - * space between two circles in - * two space dimensions and the - * region between two spheres in - * 3d, and the positive quadrant - * (in 2d) or octant (in 3d). In - * 2d, this is indeed a quarter - * of the full annulus, while the - * function is a misnomer in 3d - * because there the domain is - * not a quarter but one eighth - * of the full shell. - * - * If the number of initial cells is zero - * (as is the default), then it is - * computed adaptively such that the - * resulting elements have the least - * aspect ratio in 2d. - * - * If colorize is set to true, the inner, - * outer, left, and right boundary get - * indicator 0, 1, 2, and 3 in 2d, - * respectively. Otherwise all indicators - * are set to 0. In 3d indicator 2 is at - * the face x=0, 3 at y=0, 4 at z=0. - * - * @note The triangulation needs to be - * void upon calling this function. - */ - template - static void quarter_hyper_shell (Triangulation &tria, - const Point ¢er, - const double inner_radius, - const double outer_radius, - const unsigned int n_cells = 0, - const bool colorize = false); + /** + * Produce a domain that is the + * intersection between a + * hyper-shell with given inner + * and outer radius, i.e. the + * space between two circles in + * two space dimensions and the + * region between two spheres in + * 3d, and the positive quadrant + * (in 2d) or octant (in 3d). In + * 2d, this is indeed a quarter + * of the full annulus, while the + * function is a misnomer in 3d + * because there the domain is + * not a quarter but one eighth + * of the full shell. + * + * If the number of initial cells is zero + * (as is the default), then it is + * computed adaptively such that the + * resulting elements have the least + * aspect ratio in 2d. + * + * If colorize is set to true, the inner, + * outer, left, and right boundary get + * indicator 0, 1, 2, and 3 in 2d, + * respectively. Otherwise all indicators + * are set to 0. In 3d indicator 2 is at + * the face x=0, 3 at y=0, 4 at z=0. + * + * @note The triangulation needs to be + * void upon calling this function. + */ + template + static void quarter_hyper_shell (Triangulation &tria, + const Point ¢er, + const double inner_radius, + const double outer_radius, + const unsigned int n_cells = 0, + const bool colorize = false); - /** - * Produce a domain that is the space - * between two cylinders in 3d, with - * given length, inner and outer radius - * and a given number of elements for - * this initial triangulation. If @p - * n_radial_cells is zero (as is the - * default), then it is computed - * adaptively such that the resulting - * elements have the least aspect - * ratio. The same holds for @p - * n_axial_cells. - * - * @note Although this function - * is declared as a template, it - * does not make sense in 1D and - * 2D. - * - * @note The triangulation needs - * to be void upon calling this - * function. - */ - template - static void cylinder_shell (Triangulation &tria, - const double length, - const double inner_radius, - const double outer_radius, - const unsigned int n_radial_cells = 0, - const unsigned int n_axial_cells = 0); + /** + * Produce a domain that is the space + * between two cylinders in 3d, with + * given length, inner and outer radius + * and a given number of elements for + * this initial triangulation. If @p + * n_radial_cells is zero (as is the + * default), then it is computed + * adaptively such that the resulting + * elements have the least aspect + * ratio. The same holds for @p + * n_axial_cells. + * + * @note Although this function + * is declared as a template, it + * does not make sense in 1D and + * 2D. + * + * @note The triangulation needs + * to be void upon calling this + * function. + */ + template + static void cylinder_shell (Triangulation &tria, + const double length, + const double inner_radius, + const double outer_radius, + const unsigned int n_radial_cells = 0, + const unsigned int n_axial_cells = 0); - /** - * Produce the surface meshing of the - * torus. The axis of the torus is the - * $y$-axis while the plane of the torus - * is the $x$-$z$ plane. The boundary of - * this object can be described by the - * TorusBoundary class. - * - * @param tria The triangulation to be - * filled. - * - * @param R The radius of the circle, - * which forms the middle line of the - * torus containing the loop of - * cells. Must be greater than @p r. - * - * @param r The inner radius of the - * torus. - */ + /** + * Produce the surface meshing of the + * torus. The axis of the torus is the + * $y$-axis while the plane of the torus + * is the $x$-$z$ plane. The boundary of + * this object can be described by the + * TorusBoundary class. + * + * @param tria The triangulation to be + * filled. + * + * @param R The radius of the circle, + * which forms the middle line of the + * torus containing the loop of + * cells. Must be greater than @p r. + * + * @param r The inner radius of the + * torus. + */ - static void torus (Triangulation<2,3>& tria, - const double R, - const double r); - static void torus (Triangulation<2,3> &tria, ++ static void torus (Triangulation<2,3> &tria, + const double R, + const double r); - /** - * This class produces a square - * on the xy-plane with a - * circular hole in the middle, - * times the interval [0.L] - * (only in 3d). - * - * @image html cubes_hole.png - * - * It is implemented in 2d and - * 3d, and takes the following - * arguments: - * - * @arg @p inner_radius: size of the - * internal hole - * @arg @p outer_radius: size of the - * biggest enclosed cylinder - * @arg @p L: extension on the @p z-direction - * @arg @p repetitions: number of subdivisions - * along the @p z-direction - * @arg @p colorize: wether to assign different - * boundary indicators to different faces. - * The colors are given in lexicographic - * ordering for the flat faces (0 to 3 in 2d, - * 0 to 5 in 3d) plus the curved hole - * (4 in 2d, and 6 in 3d). - * If @p colorize is set to false, then flat faces - * get the number 0 and the hole gets number 1. - */ - template - static void hyper_cube_with_cylindrical_hole (Triangulation &triangulation, + /** + * This class produces a square + * on the xy-plane with a + * circular hole in the middle, + * times the interval [0.L] + * (only in 3d). + * + * @image html cubes_hole.png + * + * It is implemented in 2d and + * 3d, and takes the following + * arguments: + * + * @arg @p inner_radius: size of the + * internal hole + * @arg @p outer_radius: size of the + * biggest enclosed cylinder + * @arg @p L: extension on the @p z-direction + * @arg @p repetitions: number of subdivisions + * along the @p z-direction + * @arg @p colorize: wether to assign different + * boundary indicators to different faces. + * The colors are given in lexicographic + * ordering for the flat faces (0 to 3 in 2d, + * 0 to 5 in 3d) plus the curved hole + * (4 in 2d, and 6 in 3d). + * If @p colorize is set to false, then flat faces + * get the number 0 and the hole gets number 1. + */ + template + static void hyper_cube_with_cylindrical_hole (Triangulation &triangulation, const double inner_radius = .25, const double outer_radius = .5, const double L = .5, const unsigned int repetition = 1, const bool colorize = false); - /** - * Produce a ring of cells in 3D that is - * cut open, twisted and glued together - * again. This results in a kind of - * moebius-loop. - * - * @param tria The triangulation to be worked on. - * @param n_cells The number of cells in the loop. Must be greater than 4. - * @param n_rotations The number of rotations (Pi/2 each) to be performed before glueing the loop together. - * @param R The radius of the circle, which forms the middle line of the torus containing the loop of cells. Must be greater than @p r. - * @param r The radius of the cylinder bend together as loop. - */ - static void moebius (Triangulation<3,3>& tria, - const unsigned int n_cells, - const unsigned int n_rotations, - const double R, - const double r); + /** + * Produce a ring of cells in 3D that is + * cut open, twisted and glued together + * again. This results in a kind of + * moebius-loop. + * + * @param tria The triangulation to be worked on. + * @param n_cells The number of cells in the loop. Must be greater than 4. + * @param n_rotations The number of rotations (Pi/2 each) to be performed before glueing the loop together. + * @param R The radius of the circle, which forms the middle line of the torus containing the loop of cells. Must be greater than @p r. + * @param r The radius of the cylinder bend together as loop. + */ - static void moebius (Triangulation<3,3> &tria, ++ static void moebius (Triangulation<3,3> &tria, + const unsigned int n_cells, + const unsigned int n_rotations, + const double R, + const double r); - /** - * Given the two triangulations - * specified as the first two - * arguments, create the - * triangulation that contains - * the cells of both - * triangulation and store it in - * the third parameter. Previous - * content of @p result will be - * deleted. - * - * This function is most often used - * to compose meshes for more - * complicated geometries if the - * geometry can be composed of - * simpler parts for which functions - * exist to generate coarse meshes. - * For example, the channel mesh used - * in step-35 could in principle be - * created using a mesh created by the - * GridGenerator::hyper_cube_with_cylindrical_hole - * function and several rectangles, - * and merging them using the current - * function. The rectangles will - * have to be translated to the - * right for this, a task that can - * be done using the GridTools::shift - * function (other tools to transform - * individual mesh building blocks are - * GridTools::transform, GridTools::rotate, - * and GridTools::scale). - * - * @note The two input triangulations - * must be coarse meshes that have - * no refined cells. - * - * @note The function copies the material ids - * of the cells of the two input - * triangulations into the output - * triangulation but it currently makes - * no attempt to do the same for boundary - * ids. In other words, if the two - * coarse meshes have anything but - * the default boundary indicators, - * then you will currently have to set - * boundary indicators again by hand - * in the output triangulation. - * - * @note For a related operation - * on refined meshes when both - * meshes are derived from the - * same coarse mesh, see - * GridTools::create_union_triangulation . - */ - template - static - void - merge_triangulations (const Triangulation &triangulation_1, - const Triangulation &triangulation_2, - Triangulation &result); + /** + * Given the two triangulations + * specified as the first two + * arguments, create the + * triangulation that contains + * the cells of both + * triangulation and store it in + * the third parameter. Previous + * content of @p result will be + * deleted. + * + * This function is most often used + * to compose meshes for more + * complicated geometries if the + * geometry can be composed of + * simpler parts for which functions + * exist to generate coarse meshes. + * For example, the channel mesh used + * in step-35 could in principle be + * created using a mesh created by the + * GridGenerator::hyper_cube_with_cylindrical_hole + * function and several rectangles, + * and merging them using the current + * function. The rectangles will + * have to be translated to the + * right for this, a task that can + * be done using the GridTools::shift + * function (other tools to transform + * individual mesh building blocks are + * GridTools::transform, GridTools::rotate, + * and GridTools::scale). + * + * @note The two input triangulations + * must be coarse meshes that have + * no refined cells. + * + * @note The function copies the material ids + * of the cells of the two input + * triangulations into the output + * triangulation but it currently makes + * no attempt to do the same for boundary + * ids. In other words, if the two + * coarse meshes have anything but + * the default boundary indicators, + * then you will currently have to set + * boundary indicators again by hand + * in the output triangulation. + * + * @note For a related operation + * on refined meshes when both + * meshes are derived from the + * same coarse mesh, see + * GridTools::create_union_triangulation . + */ + template + static + void + merge_triangulations (const Triangulation &triangulation_1, + const Triangulation &triangulation_2, + Triangulation &result); - /** - * This function transformes the - * @p Triangulation @p tria - * smoothly to a domain that is - * described by the boundary - * points in the map - * @p new_points. This map maps - * the point indices to the - * boundary points in the - * transformed domain. - * - * Note, that the - * @p Triangulation is changed - * in-place, therefore you don't - * need to keep two - * triangulations, but the given - * triangulation is changed - * (overwritten). - * - * In 1d, this function is not - * currently implemented. - */ - template - static void laplace_transformation (Triangulation &tria, - const std::map > &new_points); + /** + * This function transformes the + * @p Triangulation @p tria + * smoothly to a domain that is + * described by the boundary + * points in the map + * @p new_points. This map maps + * the point indices to the + * boundary points in the + * transformed domain. + * + * Note, that the + * @p Triangulation is changed + * in-place, therefore you don't + * need to keep two + * triangulations, but the given + * triangulation is changed + * (overwritten). + * + * In 1d, this function is not + * currently implemented. + */ + template + static void laplace_transformation (Triangulation &tria, + const std::map > &new_points); - /** - * Exception - */ - DeclException0 (ExcInvalidRadii); - /** - * Exception - */ - DeclException1 (ExcInvalidRepetitions, - int, - << "The number of repetitions " << arg1 - << " must be >=1."); - /** - * Exception - */ - DeclException1 (ExcInvalidRepetitionsDimension, - int, - << "The vector of repetitions must have " - << arg1 <<" elements."); + /** + * Exception + */ + DeclException0 (ExcInvalidRadii); + /** + * Exception + */ + DeclException1 (ExcInvalidRepetitions, + int, + << "The number of repetitions " << arg1 + << " must be >=1."); + /** + * Exception + */ + DeclException1 (ExcInvalidRepetitionsDimension, + int, + << "The vector of repetitions must have " + << arg1 <<" elements."); - private: - /** - * Perform the action specified - * by the @p colorize flag of - * the hyper_rectangle() - * function of this class. - */ - template - static - void - colorize_hyper_rectangle (Triangulation &tria); + private: + /** + * Perform the action specified + * by the @p colorize flag of + * the hyper_rectangle() + * function of this class. + */ + template + static + void + colorize_hyper_rectangle (Triangulation &tria); - /** - * Perform the action specified - * by the @p colorize flag of - * the - * subdivided_hyper_rectangle() - * function of this class. This - * function is singled out - * because it is dimension - * specific. - */ - template - static - void - colorize_subdivided_hyper_rectangle (Triangulation &tria, - const Point &p1, - const Point &p2, - const double epsilon); + /** + * Perform the action specified + * by the @p colorize flag of + * the + * subdivided_hyper_rectangle() + * function of this class. This + * function is singled out + * because it is dimension + * specific. + */ + template + static + void + colorize_subdivided_hyper_rectangle (Triangulation &tria, + const Point &p1, + const Point &p2, + const double epsilon); - /** - * Assign boundary number zero to - * the inner shell boundary and 1 - * to the outer. - */ - template - static - void - colorize_hyper_shell (Triangulation& tria, - const Point& center, - const double inner_radius, - const double outer_radius); + /** + * Assign boundary number zero to + * the inner shell boundary and 1 + * to the outer. + */ + template + static + void + colorize_hyper_shell (Triangulation &tria, + const Point ¢er, + const double inner_radius, + const double outer_radius); - /** - * Assign boundary number zero the inner - * shell boundary, one to the outer shell - * boundary, two to the face with x=0, - * three to the face with y=0, four to - * the face with z=0. - */ - template - static - void - colorize_quarter_hyper_shell(Triangulation & tria, - const Point& center, - const double inner_radius, - const double outer_radius); + /** + * Assign boundary number zero the inner + * shell boundary, one to the outer shell + * boundary, two to the face with x=0, + * three to the face with y=0, four to + * the face with z=0. + */ + template + static + void + colorize_quarter_hyper_shell(Triangulation &tria, + const Point ¢er, + const double inner_radius, + const double outer_radius); - /** - * Solve the Laplace equation for - * @p laplace_transformation - * function for one of the - * @p dim space - * dimensions. Externalized into - * a function of its own in order - * to allow parallel execution. - */ - static - void - laplace_solve (const SparseMatrix &S, - const std::map &m, - Vector &u); + /** + * Solve the Laplace equation for + * @p laplace_transformation + * function for one of the + * @p dim space + * dimensions. Externalized into + * a function of its own in order + * to allow parallel execution. + */ + static + void + laplace_solve (const SparseMatrix &S, + const std::map &m, + Vector &u); }; diff --cc deal.II/include/deal.II/grid/grid_in.h index a8df701221,067f89caa4..fbd5110423 --- a/deal.II/include/deal.II/grid/grid_in.h +++ b/deal.II/include/deal.II/grid/grid_in.h @@@ -261,338 -261,338 +261,338 @@@ struct SubCellData template class GridIn { - public: - /** - * List of possible mesh input - * formats. These values are used - * when calling the function - * read() in order to determine - * the actual reader to be - * called. - */ - enum Format - { - /// Use GridIn::default_format stored in this object - Default, - /// Use read_unv() - unv, - /// Use read_ucd() - ucd, - /// Use read_dbmesh() - dbmesh, - /// Use read_xda() - xda, - /// Use read_msh() - msh, - /// Use read_netcdf() - netcdf, - /// Use read_tecplot() - tecplot - }; - - /** - * Constructor. - */ - GridIn (); - - /** - * Attach this triangulation - * to be fed with the grid data. - */ - void attach_triangulation (Triangulation &tria); - - /** - * Read from the given stream. If - * no format is given, - * GridIn::Format::Default is - * used. - */ - void read (std::istream &in, Format format=Default); - - /** - * Open the file given by the - * string and call the previous - * function read(). This function - * uses the PathSearch mechanism - * to find files. The file class - * used is MESH. - */ - void read (const std::string &in, Format format=Default); - - /** - * Read grid data from an unv - * file as generated by the - * Salome mesh generator. - * Numerical data is ignored. - * - * Note the comments on - * generating this file format in - * the general documentation of - * this class. - */ - void read_unv(std::istream &in); - - /** - * Read grid data from an ucd file. - * Numerical data is ignored. - */ - void read_ucd (std::istream &in); - - /** - * Read grid data from a file - * containing data in the DB mesh - * format. - */ - void read_dbmesh (std::istream &in); - - /** - * Read grid data from a file - * containing data in the XDA - * format. - */ - void read_xda (std::istream &in); - - /** - * Read grid data from an msh - * file, either version 1 or - * version 2 of that file - * format. The GMSH formats are - * documented at - * http://www.geuz.org/gmsh/ . - * - * @note The input function of - * deal.II does not distinguish - * between newline and other - * whitespace. Therefore, deal.II - * will be able to read files in - * a slightly more general format - * than Gmsh. - */ - void read_msh (std::istream &in); - - /** - * Read grid data from a NetCDF - * file. The only data format - * currently supported is the - * TAU grid format. - * - * This function requires the - * library to be linked with the - * NetCDF library. - */ - void read_netcdf (const std::string &filename); - - /** - * Read grid data from a file containing - * tecplot ASCII data. This also works in - * the absence of any tecplot - * installation. - */ - void read_tecplot (std::istream &in); - - /** - * Returns the standard suffix - * for a file in this format. - */ - static std::string default_suffix (const Format format); - - /** - * Return the enum Format for the - * format name. - */ - static Format parse_format (const std::string &format_name); - - /** - * Return a list of implemented input - * formats. The different names are - * separated by vertical bar signs (`|') - * as used by the ParameterHandler - * classes. - */ - static std::string get_format_names (); - - /** - * Exception - */ - DeclException1(ExcUnknownSectionType, - int, - << "The section type <" << arg1 << "> in an UNV " - << "input file is not implemented."); - - /** - * Exception - */ - DeclException1(ExcUnknownElementType, - int, - << "The element type <" << arg1 << "> in an UNV " - << "input file is not implemented."); - - /** - * Exception - */ - DeclException1 (ExcUnknownIdentifier, - std::string, - << "The identifier <" << arg1 << "> as name of a " - << "part in an UCD input file is unknown or the " - << "respective input routine is not implemented." - << "(Maybe the space dimension of triangulation and " - << "input file do not match?"); - /** - * Exception - */ - DeclException0 (ExcNoTriangulationSelected); - /** - * Exception - */ - DeclException2 (ExcInvalidVertexIndex, - int, int, - << "Trying to access invalid vertex index " << arg2 - << " while creating cell " << arg1); - /** - * Exception - */ - DeclException0 (ExcInvalidDBMeshFormat); - /** - * Exception - */ - DeclException1 (ExcInvalidDBMESHInput, - std::string, - << "The string <" << arg1 << "> is not recognized at the present" - << " position of a DB Mesh file."); - - /** - * Exception - */ - DeclException1 (ExcDBMESHWrongDimension, - int, - << "The specified dimension " << arg1 - << " is not the same as that of the triangulation to be created."); - - DeclException1 (ExcInvalidGMSHInput, - std::string, - << "The string <" << arg1 << "> is not recognized at the present" - << " position of a Gmsh Mesh file."); - - DeclException1 (ExcGmshUnsupportedGeometry, - int, - << "The Element Identifier <" << arg1 << "> is not " - << "supported in the Deal.II Library.\n" - << "Supported elements are: \n" - << "ELM-TYPE\n" - << "1 Line (2 nodes, 1 edge).\n" - << "3 Quadrilateral (4 nodes, 4 edges).\n" - << "5 Hexahedron (8 nodes, 12 edges, 6 faces).\n" - << "15 Point (1 node, ignored when read)"); - - - DeclException0 (ExcGmshNoCellInformation); - protected: - /** - * Store address of the triangulation to - * be fed with the data read in. - */ - SmartPointer,GridIn > tria; - - /** - * This function can write the - * raw cell data objects created - * by the read_* functions in - * Gnuplot format to a - * stream. This is sometimes - * handy if one would like to see - * what actually was created, if - * it is known that the data is - * not correct in some way, but - * the Triangulation class - * refuses to generate a - * triangulation because of these - * errors. In particular, the - * output of this class writes - * out the cell numbers along - * with the direction of the - * faces of each cell. In - * particular the latter - * information is needed to - * verify whether the cell data - * objects follow the - * requirements of the ordering - * of cells and their faces, - * i.e. that all faces need to - * have unique directions and - * specified orientations with - * respect to neighboring cells - * (see the documentations to - * this class and the - * GridReordering class). - * - * The output of this function - * consists of vectors for each - * line bounding the cells - * indicating the direction it - * has with respect to the - * orientation of this cell, and - * the cell number. The whole - * output is in a form such that - * it can be read in by Gnuplot - * and generate the full plot - * without further ado by the - * user. - */ - static void debug_output_grid (const std::vector > &cells, - const std::vector > &vertices, - std::ostream &out); - - private: - - /** - * Skip empty lines in the input - * stream, i.e. lines that - * contain either nothing or only - * whitespace. - */ - static void skip_empty_lines (std::istream &in); - - /** - * Skip lines of comment that - * start with the indicated - * character (e.g. #) - * following the point where the - * given input stream presently - * is. After the call to this - * function, the stream is at the - * start of the first line after - * the comment lines, or at the - * same position as before if - * there were no lines of - * comments. - */ - static void skip_comment_lines (std::istream &in, - const char comment_start); - - /** - * This function does the nasty work (due - * to very lax conventions and different - * versions of the tecplot format) of - * extracting the important parameters from - * a tecplot header, contained in the - * string @p header. The other variables - * are output variables, their value has no - * influence on the function execution.. - */ - static void parse_tecplot_header(std::string &header, - std::vector &tecplot2deal, - unsigned int &n_vars, - unsigned int &n_vertices, - unsigned int &n_cells, - std::vector &IJK, - bool &structured, - bool &blocked); - - /** - * Input format used by read() if - * no format is given. - */ - Format default_format; + public: + /** + * List of possible mesh input + * formats. These values are used + * when calling the function + * read() in order to determine + * the actual reader to be + * called. + */ + enum Format + { + /// Use GridIn::default_format stored in this object + Default, + /// Use read_unv() + unv, + /// Use read_ucd() + ucd, + /// Use read_dbmesh() + dbmesh, + /// Use read_xda() + xda, + /// Use read_msh() + msh, + /// Use read_netcdf() + netcdf, + /// Use read_tecplot() + tecplot + }; + + /** + * Constructor. + */ + GridIn (); + + /** + * Attach this triangulation + * to be fed with the grid data. + */ + void attach_triangulation (Triangulation &tria); + + /** + * Read from the given stream. If + * no format is given, + * GridIn::Format::Default is + * used. + */ + void read (std::istream &in, Format format=Default); + + /** + * Open the file given by the + * string and call the previous + * function read(). This function + * uses the PathSearch mechanism + * to find files. The file class + * used is MESH. + */ + void read (const std::string &in, Format format=Default); + + /** + * Read grid data from an unv + * file as generated by the + * Salome mesh generator. + * Numerical data is ignored. + * + * Note the comments on + * generating this file format in + * the general documentation of + * this class. + */ + void read_unv(std::istream &in); + + /** + * Read grid data from an ucd file. + * Numerical data is ignored. + */ + void read_ucd (std::istream &in); + + /** + * Read grid data from a file + * containing data in the DB mesh + * format. + */ + void read_dbmesh (std::istream &in); + + /** + * Read grid data from a file + * containing data in the XDA + * format. + */ + void read_xda (std::istream &in); + + /** + * Read grid data from an msh + * file, either version 1 or + * version 2 of that file + * format. The GMSH formats are + * documented at + * http://www.geuz.org/gmsh/ . + * + * @note The input function of + * deal.II does not distinguish + * between newline and other + * whitespace. Therefore, deal.II + * will be able to read files in + * a slightly more general format + * than Gmsh. + */ + void read_msh (std::istream &in); + + /** + * Read grid data from a NetCDF + * file. The only data format + * currently supported is the + * TAU grid format. + * + * This function requires the + * library to be linked with the + * NetCDF library. + */ + void read_netcdf (const std::string &filename); + + /** + * Read grid data from a file containing + * tecplot ASCII data. This also works in + * the absence of any tecplot + * installation. + */ + void read_tecplot (std::istream &in); + + /** + * Returns the standard suffix + * for a file in this format. + */ + static std::string default_suffix (const Format format); + + /** + * Return the enum Format for the + * format name. + */ + static Format parse_format (const std::string &format_name); + + /** + * Return a list of implemented input + * formats. The different names are + * separated by vertical bar signs (`|') + * as used by the ParameterHandler + * classes. + */ + static std::string get_format_names (); + + /** + * Exception + */ + DeclException1(ExcUnknownSectionType, + int, + << "The section type <" << arg1 << "> in an UNV " + << "input file is not implemented."); + + /** + * Exception + */ + DeclException1(ExcUnknownElementType, + int, + << "The element type <" << arg1 << "> in an UNV " + << "input file is not implemented."); + + /** + * Exception + */ + DeclException1 (ExcUnknownIdentifier, + std::string, + << "The identifier <" << arg1 << "> as name of a " + << "part in an UCD input file is unknown or the " + << "respective input routine is not implemented." + << "(Maybe the space dimension of triangulation and " + << "input file do not match?"); + /** + * Exception + */ + DeclException0 (ExcNoTriangulationSelected); + /** + * Exception + */ + DeclException2 (ExcInvalidVertexIndex, + int, int, + << "Trying to access invalid vertex index " << arg2 + << " while creating cell " << arg1); + /** + * Exception + */ + DeclException0 (ExcInvalidDBMeshFormat); + /** + * Exception + */ + DeclException1 (ExcInvalidDBMESHInput, + std::string, + << "The string <" << arg1 << "> is not recognized at the present" + << " position of a DB Mesh file."); + + /** + * Exception + */ + DeclException1 (ExcDBMESHWrongDimension, + int, + << "The specified dimension " << arg1 + << " is not the same as that of the triangulation to be created."); + + DeclException1 (ExcInvalidGMSHInput, + std::string, + << "The string <" << arg1 << "> is not recognized at the present" + << " position of a Gmsh Mesh file."); + + DeclException1 (ExcGmshUnsupportedGeometry, + int, + << "The Element Identifier <" << arg1 << "> is not " + << "supported in the Deal.II Library.\n" + << "Supported elements are: \n" + << "ELM-TYPE\n" + << "1 Line (2 nodes, 1 edge).\n" + << "3 Quadrilateral (4 nodes, 4 edges).\n" + << "5 Hexahedron (8 nodes, 12 edges, 6 faces).\n" + << "15 Point (1 node, ignored when read)"); + + + DeclException0 (ExcGmshNoCellInformation); + protected: + /** + * Store address of the triangulation to + * be fed with the data read in. + */ + SmartPointer,GridIn > tria; + + /** + * This function can write the + * raw cell data objects created + * by the read_* functions in + * Gnuplot format to a + * stream. This is sometimes + * handy if one would like to see + * what actually was created, if + * it is known that the data is + * not correct in some way, but + * the Triangulation class + * refuses to generate a + * triangulation because of these + * errors. In particular, the + * output of this class writes + * out the cell numbers along + * with the direction of the + * faces of each cell. In + * particular the latter + * information is needed to + * verify whether the cell data + * objects follow the + * requirements of the ordering + * of cells and their faces, + * i.e. that all faces need to + * have unique directions and + * specified orientations with + * respect to neighboring cells + * (see the documentations to + * this class and the + * GridReordering class). + * + * The output of this function + * consists of vectors for each + * line bounding the cells + * indicating the direction it + * has with respect to the + * orientation of this cell, and + * the cell number. The whole + * output is in a form such that + * it can be read in by Gnuplot + * and generate the full plot + * without further ado by the + * user. + */ + static void debug_output_grid (const std::vector > &cells, + const std::vector > &vertices, + std::ostream &out); + + private: + + /** + * Skip empty lines in the input + * stream, i.e. lines that + * contain either nothing or only + * whitespace. + */ + static void skip_empty_lines (std::istream &in); + + /** + * Skip lines of comment that + * start with the indicated + * character (e.g. #) + * following the point where the + * given input stream presently + * is. After the call to this + * function, the stream is at the + * start of the first line after + * the comment lines, or at the + * same position as before if + * there were no lines of + * comments. + */ + static void skip_comment_lines (std::istream &in, + const char comment_start); + + /** + * This function does the nasty work (due + * to very lax conventions and different + * versions of the tecplot format) of + * extracting the important parameters from + * a tecplot header, contained in the + * string @p header. The other variables + * are output variables, their value has no + * influence on the function execution.. + */ + static void parse_tecplot_header(std::string &header, + std::vector &tecplot2deal, - unsigned int &n_vars, - unsigned int &n_vertices, - unsigned int &n_cells, ++ unsigned int &n_vars, ++ unsigned int &n_vertices, ++ unsigned int &n_cells, + std::vector &IJK, + bool &structured, + bool &blocked); + + /** + * Input format used by read() if + * no format is given. + */ + Format default_format; }; diff --cc deal.II/include/deal.II/grid/grid_reordering_internal.h index aaf09e7068,3d15b6f19c..89c89b7da4 --- a/deal.II/include/deal.II/grid/grid_reordering_internal.h +++ b/deal.II/include/deal.II/grid/grid_reordering_internal.h @@@ -102,98 -102,98 +102,98 @@@ namespace interna }; - /** - * An enriched quad with information about how the mesh fits together - * so that we can move around the mesh efficiently. - * - * @author Michael Anderson, 2003 - */ + /** + * An enriched quad with information about how the mesh fits together + * so that we can move around the mesh efficiently. + * + * @author Michael Anderson, 2003 + */ class MQuad { - public: - /** - * v0 - v3 are indexes of the - * vertices of the quad, s0 - - * s3 are indexes for the - * sides of the quad - */ - MQuad (const unsigned int v0, - const unsigned int v1, - const unsigned int v2, - const unsigned int v3, - const unsigned int s0, - const unsigned int s1, - const unsigned int s2, - const unsigned int s3, - const CellData<2> &cd); - - /** - * Stores the vertex numbers - */ - unsigned int v[4]; - /** - * Stores the side numbers - */ - unsigned int side[4]; - - /** - * Copy of the @p CellData object - * from which we construct the - * data of this object. - */ - CellData<2> original_cell_data; + public: + /** + * v0 - v3 are indexes of the + * vertices of the quad, s0 - + * s3 are indexes for the + * sides of the quad + */ + MQuad (const unsigned int v0, + const unsigned int v1, + const unsigned int v2, + const unsigned int v3, + const unsigned int s0, + const unsigned int s1, + const unsigned int s2, + const unsigned int s3, - const CellData<2> &cd); ++ const CellData<2> &cd); + + /** + * Stores the vertex numbers + */ + unsigned int v[4]; + /** + * Stores the side numbers + */ + unsigned int side[4]; + + /** + * Copy of the @p CellData object + * from which we construct the + * data of this object. + */ + CellData<2> original_cell_data; }; - /** - * The enriched side class containing connectivity information. - * Orientation is from v0 to v1; Initially this should have v0 class Container, int spacedim> typename Container::active_cell_iterator - find_active_cell_around_point (const Container &container, + find_active_cell_around_point (const Container &container, const Point &p); - /** - * Find and return an iterator to - * the active cell that surrounds - * a given point @p p. The - * type of the first parameter - * may be either - * Triangulation, - * DoFHandler, hp::DoFHandler, or - * MGDoFHandler, i.e., we - * can find the cell around a - * point for iterators into each - * of these classes. - * - * The algorithm used in this - * function proceeds by first - * looking for vertex located - * closest to the given point, see - * find_closest_vertex(). Secondly, - * all adjacent cells to this point - * are found in the mesh, see - * find_cells_adjacent_to_vertex(). - * Lastly, for each of these cells, - * it is tested whether the point is - * inside. This check is performed - * using arbitrary boundary mappings. - * Still, it is possible that due - * to roundoff errors, the point - * cannot be located exactly inside - * the unit cell. In this case, - * even points at a very small - * distance outside the unit cell - * are allowed. - * - * If a point lies on the - * boundary of two or more cells, - * then the algorithm tries to identify - * the cell that is of highest - * refinement level. - * - * The function returns an - * iterator to the cell, as well - * as the local position of the - * point inside the unit - * cell. This local position - * might be located slightly - * outside an actual unit cell, - * due to numerical roundoff. - * Therefore, the point returned - * by this function should - * be projected onto the unit cell, - * using GeometryInfo::project_to_unit_cell. - * This is not automatically performed - * by the algorithm. - */ + /** + * Find and return an iterator to + * the active cell that surrounds + * a given point @p p. The + * type of the first parameter + * may be either + * Triangulation, + * DoFHandler, hp::DoFHandler, or + * MGDoFHandler, i.e., we + * can find the cell around a + * point for iterators into each + * of these classes. + * + * The algorithm used in this + * function proceeds by first + * looking for vertex located + * closest to the given point, see + * find_closest_vertex(). Secondly, + * all adjacent cells to this point + * are found in the mesh, see + * find_cells_adjacent_to_vertex(). + * Lastly, for each of these cells, + * it is tested whether the point is + * inside. This check is performed + * using arbitrary boundary mappings. + * Still, it is possible that due + * to roundoff errors, the point + * cannot be located exactly inside + * the unit cell. In this case, + * even points at a very small + * distance outside the unit cell + * are allowed. + * + * If a point lies on the + * boundary of two or more cells, + * then the algorithm tries to identify + * the cell that is of highest + * refinement level. + * + * The function returns an + * iterator to the cell, as well + * as the local position of the + * point inside the unit + * cell. This local position + * might be located slightly + * outside an actual unit cell, + * due to numerical roundoff. + * Therefore, the point returned + * by this function should + * be projected onto the unit cell, + * using GeometryInfo::project_to_unit_cell. + * This is not automatically performed + * by the algorithm. + */ template class Container, int spacedim> std::pair::active_cell_iterator, Point > find_active_cell_around_point (const Mapping &mapping, @@@ -570,48 -570,48 +570,48 @@@ const SparsityPattern &cell_connection_graph, Triangulation &triangulation); - /** - * For each active cell, return in the - * output array to which subdomain (as - * given by the cell->subdomain_id() - * function) it belongs. The output array - * is supposed to have the right size - * already when calling this function. - * - * This function returns the association - * of each cell with one subdomain. If - * you are looking for the association of - * each @em DoF with a subdomain, use the - * DoFTools::get_subdomain_association - * function. - */ + /** + * For each active cell, return in the + * output array to which subdomain (as + * given by the cell->subdomain_id() + * function) it belongs. The output array + * is supposed to have the right size + * already when calling this function. + * + * This function returns the association + * of each cell with one subdomain. If + * you are looking for the association of + * each @em DoF with a subdomain, use the + * DoFTools::get_subdomain_association + * function. + */ template void - get_subdomain_association (const Triangulation &triangulation, + get_subdomain_association (const Triangulation &triangulation, std::vector &subdomain); - /** - * Count how many cells are uniquely - * associated with the given @p subdomain - * index. - * - * This function may return zero - * if there are no cells with the - * given @p subdomain index. This - * can happen, for example, if - * you try to partition a coarse - * mesh into more partitions (one - * for each processor) than there - * are cells in the mesh. - * - * This function returns the number of - * cells associated with one - * subdomain. If you are looking for the - * association of @em DoFs with this - * subdomain, use the - * DoFTools::count_dofs_with_subdomain_association - * function. - */ + /** + * Count how many cells are uniquely + * associated with the given @p subdomain + * index. + * + * This function may return zero + * if there are no cells with the + * given @p subdomain index. This + * can happen, for example, if + * you try to partition a coarse + * mesh into more partitions (one + * for each processor) than there + * are cells in the mesh. + * + * This function returns the number of + * cells associated with one + * subdomain. If you are looking for the + * association of @em DoFs with this + * subdomain, use the + * DoFTools::count_dofs_with_subdomain_association + * function. + */ template unsigned int count_cells_with_subdomain_association (const Triangulation &triangulation, diff --cc deal.II/include/deal.II/grid/tria_objects.h index b9c60b5567,c90e306a62..106f6cf3f1 --- a/deal.II/include/deal.II/grid/tria_objects.h +++ b/deal.II/include/deal.II/grid/tria_objects.h @@@ -57,456 -57,456 +57,456 @@@ namespace interna template class TriaObjects { - public: - /** - * Constructor resetting some data. - */ - TriaObjects(); - - /** - * Vector of the objects belonging to - * this level. The index of the object - * equals the index in this container. - */ - std::vector cells; - /** - * Index of the even children of an object. - * Since when objects are refined, all - * children are created at the same - * time, they are appended to the list - * at least in pairs after each other. - * We therefore only store the index - * of the even children, the uneven - * follow immediately afterwards. - * - * If an object has no children, -1 is - * stored in this list. An object is - * called active if it has no - * children. The function - * TriaAccessorBase::has_children() - * tests for this. - */ - std::vector children; - - /** - * Store the refinement - * case each of the - * cells is refined - * with. This vector - * might be replaced by - * vector > - * (dim, vector - * (n_cells)) which is - * more memory efficient. - */ - std::vector > refinement_cases; - - /** - * Vector storing whether an object is - * used in the @p cells vector. - * - * Since it is difficult to delete - * elements in a @p vector, when an - * element is not needed any more - * (e.g. after derefinement), it is - * not deleted from the list, but - * rather the according @p used flag - * is set to @p false. - */ - std::vector used; - - /** - * Make available a field for user data, - * one bit per object. This field is usually - * used when an operation runs over all - * cells and needs information whether - * another cell (e.g. a neighbor) has - * already been processed. - * - * You can clear all used flags using - * dealii::Triangulation::clear_user_flags(). - */ - std::vector user_flags; - - - /** - * We use this union to store - * boundary and material - * data. Because only one one - * out of these two is - * actually needed here, we - * use an union. - */ - struct BoundaryOrMaterialId + public: + /** + * Constructor resetting some data. + */ + TriaObjects(); + + /** + * Vector of the objects belonging to + * this level. The index of the object + * equals the index in this container. + */ + std::vector cells; + /** + * Index of the even children of an object. + * Since when objects are refined, all + * children are created at the same + * time, they are appended to the list + * at least in pairs after each other. + * We therefore only store the index + * of the even children, the uneven + * follow immediately afterwards. + * + * If an object has no children, -1 is + * stored in this list. An object is + * called active if it has no + * children. The function + * TriaAccessorBase::has_children() + * tests for this. + */ + std::vector children; + + /** + * Store the refinement + * case each of the + * cells is refined + * with. This vector + * might be replaced by + * vector > + * (dim, vector + * (n_cells)) which is + * more memory efficient. + */ + std::vector > refinement_cases; + + /** + * Vector storing whether an object is + * used in the @p cells vector. + * + * Since it is difficult to delete + * elements in a @p vector, when an + * element is not needed any more + * (e.g. after derefinement), it is + * not deleted from the list, but + * rather the according @p used flag + * is set to @p false. + */ + std::vector used; + + /** + * Make available a field for user data, + * one bit per object. This field is usually + * used when an operation runs over all + * cells and needs information whether + * another cell (e.g. a neighbor) has + * already been processed. + * + * You can clear all used flags using + * dealii::Triangulation::clear_user_flags(). + */ + std::vector user_flags; + + + /** + * We use this union to store + * boundary and material + * data. Because only one one + * out of these two is + * actually needed here, we + * use an union. + */ + struct BoundaryOrMaterialId + { + union { - union - { - types::boundary_id boundary_id; - types::material_id material_id; - }; - - - /** - * Default constructor. - */ - BoundaryOrMaterialId (); - - /** - * Return the size of objects - * of this kind. - */ - static - std::size_t memory_consumption (); - - /** - * Read or write the data - * of this object to or - * from a stream for the - * purpose of - * serialization - */ - template - void serialize(Archive & ar, - const unsigned int version); + types::boundary_id boundary_id; + types::material_id material_id; }; - /** - * Store boundary and material data. For - * example, in one dimension, this field - * stores the material id of a line, which - * is a number between 0 and - * numbers::invalid_material_id-1. In more - * than one dimension, lines have no - * material id, but they may be at the - * boundary; then, we store the - * boundary indicator in this field, - * which denotes to which part of the - * boundary this line belongs and which - * boundary conditions hold on this - * part. The boundary indicator also - * is a number between zero and - * numbers::internal_face_boundary_id-1; - * the id numbers::internal_face_boundary_id - * is reserved for lines - * in the interior and may be used - * to check whether a line is at the - * boundary or not, which otherwise - * is not possible if you don't know - * which cell it belongs to. - */ - std::vector boundary_or_material_id; - - /** - * Assert that enough space - * is allocated to - * accommodate - * new_objs_in_pairs - * new objects, stored in - * pairs, plus - * new_obj_single - * stored individually. - * This function does not - * only call - * vector::reserve(), - * but does really append - * the needed elements. - * - * In 2D e.g. refined lines have to be - * stored in pairs, whereas new lines in the - * interior of refined cells can be stored as - * single lines. - */ - void reserve_space (const unsigned int new_objs_in_pairs, - const unsigned int new_objs_single = 0); - - /** - * Return an iterator to the - * next free slot for a - * single object. This - * function is only used by - * dealii::Triangulation::execute_refinement() - * in 3D. - * - * @warning Interestingly, - * this function is not used - * for 1D or 2D - * triangulations, where it - * seems the authors of the - * refinement function insist - * on reimplementing its - * contents. - * - * @todo This function is - * not instantiated for the - * codim-one case - */ - template - dealii::TriaRawIterator > - next_free_single_object (const dealii::Triangulation &tria); - - /** - * Return an iterator to the - * next free slot for a pair - * of objects. This - * function is only used by - * dealii::Triangulation::execute_refinement() - * in 3D. - * - * @warning Interestingly, - * this function is not used - * for 1D or 2D - * triangulations, where it - * seems the authors of the - * refinement function insist - * on reimplementing its - * contents. - * - * @todo This function is - * not instantiated for the - * codim-one case - */ - template - dealii::TriaRawIterator > - next_free_pair_object (const dealii::Triangulation &tria); - - /** - * Return an iterator to the - * next free slot for a pair - * of hexes. Only implemented - * for - * G=Hexahedron. - */ - template - typename dealii::Triangulation::raw_hex_iterator - next_free_hex (const dealii::Triangulation &tria, - const unsigned int level); - - /** - * Clear all the data contained in this object. - */ - void clear(); - - /** - * The orientation of the - * face number face - * of the cell with number - * cell. The return - * value is true, if - * the normal vector points - * the usual way - * (GeometryInfo::unit_normal_orientation) - * and false else. - * - * The result is always - * true in this - * class, but derived classes - * will reimplement this. - * - * @warning There is a bug in - * the class hierarchy right - * now. Avoid ever calling - * this function through a - * reference, since you might - * end up with the base class - * function instead of the - * derived class. Still, we - * do not want to make it - * virtual for efficiency - * reasons. - */ - bool face_orientation(const unsigned int cell, const unsigned int face) const; - - - /** - * Access to user pointers. - */ - void*& user_pointer(const unsigned int i); - - /** - * Read-only access to user pointers. - */ - const void* user_pointer(const unsigned int i) const; - - /** - * Access to user indices. - */ - unsigned int& user_index(const unsigned int i); - - /** - * Read-only access to user pointers. - */ - unsigned int user_index(const unsigned int i) const; - - /** - * Reset user data to zero. - */ - void clear_user_data(const unsigned int i); - - /** - * Clear all user pointers or - * indices and reset their - * type, such that the next - * access may be aither or. - */ - void clear_user_data(); - - /** - * Clear all user flags. - */ - void clear_user_flags(); - - /** - * Check the memory consistency of the - * different containers. Should only be - * called with the prepro flag @p DEBUG - * set. The function should be called from - * the functions of the higher - * TriaLevel classes. - */ - void monitor_memory (const unsigned int true_dimension) const; - - /** - * Determine an estimate for the - * memory consumption (in bytes) - * of this object. - */ - std::size_t memory_consumption () const; - - /** - * Read or write the data of this object to or - * from a stream for the purpose of serialization - */ + + + /** + * Default constructor. + */ + BoundaryOrMaterialId (); + + /** + * Return the size of objects + * of this kind. + */ + static + std::size_t memory_consumption (); + + /** + * Read or write the data + * of this object to or + * from a stream for the + * purpose of + * serialization + */ template - void serialize(Archive & ar, + void serialize(Archive &ar, const unsigned int version); - - /** - * Exception - */ - DeclException3 (ExcMemoryWasted, - char*, int, int, - << "The container " << arg1 << " contains " - << arg2 << " elements, but it`s capacity is " - << arg3 << "."); - /** - * Exception - * @ingroup Exceptions - */ - DeclException2 (ExcMemoryInexact, - int, int, - << "The containers have sizes " << arg1 << " and " - << arg2 << ", which is not as expected."); - - /** - * Exception - */ - DeclException2 (ExcWrongIterator, - char*, char*, - << "You asked for the next free " << arg1 << "_iterator, " - "but you can only ask for " << arg2 <<"_iterators."); - - /** - * dealii::Triangulation objects can - * either access a user - * pointer or a user - * index. What you tried to - * do is trying to access one - * of those after using the - * other. - * - * @ingroup Exceptions - */ - DeclException0 (ExcPointerIndexClash); - - protected: - /** - * Counter for next_free_single_* functions - */ - unsigned int next_free_single; - - /** - * Counter for next_free_pair_* functions - */ - unsigned int next_free_pair; - - /** - * Bool flag for next_free_single_* functions - */ - bool reverse_order_next_free_single; - - /** - * The data type storing user - * pointers or user indices. - */ - struct UserData + }; + /** + * Store boundary and material data. For + * example, in one dimension, this field + * stores the material id of a line, which + * is a number between 0 and + * numbers::invalid_material_id-1. In more + * than one dimension, lines have no + * material id, but they may be at the + * boundary; then, we store the + * boundary indicator in this field, + * which denotes to which part of the + * boundary this line belongs and which + * boundary conditions hold on this + * part. The boundary indicator also + * is a number between zero and + * numbers::internal_face_boundary_id-1; + * the id numbers::internal_face_boundary_id + * is reserved for lines + * in the interior and may be used + * to check whether a line is at the + * boundary or not, which otherwise + * is not possible if you don't know + * which cell it belongs to. + */ + std::vector boundary_or_material_id; + + /** + * Assert that enough space + * is allocated to + * accommodate + * new_objs_in_pairs + * new objects, stored in + * pairs, plus + * new_obj_single + * stored individually. + * This function does not + * only call + * vector::reserve(), + * but does really append + * the needed elements. + * + * In 2D e.g. refined lines have to be + * stored in pairs, whereas new lines in the + * interior of refined cells can be stored as + * single lines. + */ + void reserve_space (const unsigned int new_objs_in_pairs, + const unsigned int new_objs_single = 0); + + /** + * Return an iterator to the + * next free slot for a + * single object. This + * function is only used by + * dealii::Triangulation::execute_refinement() + * in 3D. + * + * @warning Interestingly, + * this function is not used + * for 1D or 2D + * triangulations, where it + * seems the authors of the + * refinement function insist + * on reimplementing its + * contents. + * + * @todo This function is + * not instantiated for the + * codim-one case + */ + template + dealii::TriaRawIterator > + next_free_single_object (const dealii::Triangulation &tria); + + /** + * Return an iterator to the + * next free slot for a pair + * of objects. This + * function is only used by + * dealii::Triangulation::execute_refinement() + * in 3D. + * + * @warning Interestingly, + * this function is not used + * for 1D or 2D + * triangulations, where it + * seems the authors of the + * refinement function insist + * on reimplementing its + * contents. + * + * @todo This function is + * not instantiated for the + * codim-one case + */ + template + dealii::TriaRawIterator > + next_free_pair_object (const dealii::Triangulation &tria); + + /** + * Return an iterator to the + * next free slot for a pair + * of hexes. Only implemented + * for + * G=Hexahedron. + */ + template + typename dealii::Triangulation::raw_hex_iterator + next_free_hex (const dealii::Triangulation &tria, + const unsigned int level); + + /** + * Clear all the data contained in this object. + */ + void clear(); + + /** + * The orientation of the + * face number face + * of the cell with number + * cell. The return + * value is true, if + * the normal vector points + * the usual way + * (GeometryInfo::unit_normal_orientation) + * and false else. + * + * The result is always + * true in this + * class, but derived classes + * will reimplement this. + * + * @warning There is a bug in + * the class hierarchy right + * now. Avoid ever calling + * this function through a + * reference, since you might + * end up with the base class + * function instead of the + * derived class. Still, we + * do not want to make it + * virtual for efficiency + * reasons. + */ + bool face_orientation(const unsigned int cell, const unsigned int face) const; + + + /** + * Access to user pointers. + */ - void *&user_pointer(const unsigned int i); ++ void *&user_pointer(const unsigned int i); + + /** + * Read-only access to user pointers. + */ + const void *user_pointer(const unsigned int i) const; + + /** + * Access to user indices. + */ + unsigned int &user_index(const unsigned int i); + + /** + * Read-only access to user pointers. + */ + unsigned int user_index(const unsigned int i) const; + + /** + * Reset user data to zero. + */ + void clear_user_data(const unsigned int i); + + /** + * Clear all user pointers or + * indices and reset their + * type, such that the next + * access may be aither or. + */ + void clear_user_data(); + + /** + * Clear all user flags. + */ + void clear_user_flags(); + + /** + * Check the memory consistency of the + * different containers. Should only be + * called with the prepro flag @p DEBUG + * set. The function should be called from + * the functions of the higher + * TriaLevel classes. + */ + void monitor_memory (const unsigned int true_dimension) const; + + /** + * Determine an estimate for the + * memory consumption (in bytes) + * of this object. + */ + std::size_t memory_consumption () const; + + /** + * Read or write the data of this object to or + * from a stream for the purpose of serialization + */ + template + void serialize(Archive &ar, + const unsigned int version); + + /** + * Exception + */ + DeclException3 (ExcMemoryWasted, + char *, int, int, + << "The container " << arg1 << " contains " + << arg2 << " elements, but it`s capacity is " + << arg3 << "."); + /** + * Exception + * @ingroup Exceptions + */ + DeclException2 (ExcMemoryInexact, + int, int, + << "The containers have sizes " << arg1 << " and " + << arg2 << ", which is not as expected."); + + /** + * Exception + */ + DeclException2 (ExcWrongIterator, + char *, char *, + << "You asked for the next free " << arg1 << "_iterator, " + "but you can only ask for " << arg2 <<"_iterators."); + + /** + * dealii::Triangulation objects can + * either access a user + * pointer or a user + * index. What you tried to + * do is trying to access one + * of those after using the + * other. + * + * @ingroup Exceptions + */ + DeclException0 (ExcPointerIndexClash); + + protected: + /** + * Counter for next_free_single_* functions + */ + unsigned int next_free_single; + + /** + * Counter for next_free_pair_* functions + */ + unsigned int next_free_pair; + + /** + * Bool flag for next_free_single_* functions + */ + bool reverse_order_next_free_single; + + /** + * The data type storing user + * pointers or user indices. + */ + struct UserData + { + union { - union - { - /// The entry used as user - /// pointer. - void* p; - /// The entry used as user - /// index. - unsigned int i; - }; - - /** - * Default constructor. - */ - UserData() - { - p = 0; - } - - /** - * Write the data of this object - * to a stream for the purpose of - * serialization. - */ - template - void serialize (Archive & ar, const unsigned int version); + /// The entry used as user + /// pointer. + void *p; + /// The entry used as user + /// index. + unsigned int i; }; - /** - * Enum descibing the - * possible types of - * userdata. - */ - enum UserDataType + /** + * Default constructor. + */ + UserData() { - /// No userdata used yet. - data_unknown, - /// UserData contains pointers. - data_pointer, - /// UserData contains indices. - data_index - }; + p = 0; + } - - /** - * Pointer which is not used by the - * library but may be accessed and set - * by the user to handle data local to - * a line/quad/etc. - */ - std::vector user_data; - /** - * In order to avoid - * confusion between user - * pointers and indices, this - * enum is set by the first - * function accessing either - * and subsequent access will - * not be allowed to change - * the type of data accessed. - */ - mutable UserDataType user_data_type; + /** + * Write the data of this object + * to a stream for the purpose of + * serialization. + */ + template + void serialize (Archive &ar, const unsigned int version); + }; + + /** + * Enum descibing the + * possible types of + * userdata. + */ + enum UserDataType + { + /// No userdata used yet. + data_unknown, + /// UserData contains pointers. + data_pointer, + /// UserData contains indices. + data_index + }; + + + /** + * Pointer which is not used by the + * library but may be accessed and set + * by the user to handle data local to + * a line/quad/etc. + */ + std::vector user_data; + /** + * In order to avoid + * confusion between user + * pointers and indices, this + * enum is set by the first + * function accessing either + * and subsequent access will + * not be allowed to change + * the type of data accessed. + */ + mutable UserDataType user_data_type; }; - /** - * For hexahedrons the data of TriaObjects needs to be extended, as we can obtain faces - * (quads) in non-standard-orientation, therefore we declare a class TriaObjectsHex, which - * additionally contains a bool-vector of the face-orientations. - */ + /** + * For hexahedrons the data of TriaObjects needs to be extended, as we can obtain faces + * (quads) in non-standard-orientation, therefore we declare a class TriaObjectsHex, which + * additionally contains a bool-vector of the face-orientations. + */ class TriaObjectsHex : public TriaObjects > { diff --cc deal.II/include/deal.II/hp/dof_handler.h index d11b8361f9,57c53edc7a..14492a89b0 --- a/deal.II/include/deal.II/hp/dof_handler.h +++ b/deal.II/include/deal.II/hp/dof_handler.h @@@ -62,810 -62,794 +62,811 @@@ namespace interna namespace hp { - /** - * Manage the distribution and numbering of the degrees of freedom for - * hp-FEM algorithms. - * - * This class has not yet been implemented for the use in the codimension - * one case (spacedim != dim ). - * - * @ingroup dofs - * @ingroup hp - */ + /** + * Manage the distribution and numbering of the degrees of freedom for + * hp-FEM algorithms. + * + * This class has not yet been implemented for the use in the codimension + * one case (spacedim != dim ). + * + * @ingroup dofs + * @ingroup hp + */ template - class DoFHandler : public Subscriptor + class DoFHandler : public Subscriptor, - protected Triangulation::RefinementListener ++ protected Triangulation::RefinementListener { - typedef dealii::internal::DoFHandler::Iterators > IteratorSelector; + typedef dealii::internal::DoFHandler::Iterators > IteratorSelector; + public: + typedef typename IteratorSelector::CellAccessor cell_accessor; + typedef typename IteratorSelector::FaceAccessor face_accessor; + + typedef typename IteratorSelector::line_iterator line_iterator; + typedef typename IteratorSelector::active_line_iterator active_line_iterator; + + typedef typename IteratorSelector::quad_iterator quad_iterator; + typedef typename IteratorSelector::active_quad_iterator active_quad_iterator; + + typedef typename IteratorSelector::hex_iterator hex_iterator; + typedef typename IteratorSelector::active_hex_iterator active_hex_iterator; + + typedef typename IteratorSelector::cell_iterator cell_iterator; + typedef typename IteratorSelector::active_cell_iterator active_cell_iterator; + + typedef typename IteratorSelector::face_iterator face_iterator; + typedef typename IteratorSelector::active_face_iterator active_face_iterator; + + /** + * Alias the @p FunctionMap type + * declared elsewhere. + */ + typedef typename FunctionMap::type FunctionMap; + + /** + * Make the dimension available + * in function templates. + */ + static const unsigned int dimension = dim; + + /** + * Make the space dimension available + * in function templates. + */ + static const unsigned int space_dimension = spacedim; + + /** + * When the arrays holding the + * DoF indices are set up, but + * before they are filled with + * actual values, they are set to + * an invalid value, in order to + * monitor possible + * problems. This invalid value + * is the constant defined here. + * + * Please note that you should + * not rely on it having a + * certain value, but rather take + * its symbolic name. + */ + static const unsigned int invalid_dof_index = numbers::invalid_unsigned_int; + + /** + * The default index of the + * finite element to be used on + * a given cell. For the usual, + * non-hp dealii::DoFHandler class + * that only supports the same + * finite element to be used on + * all cells, the index of the + * finite element needs to be + * the same on all cells + * anyway, and by convention we + * pick zero for this + * value. The situation here is + * different, since the hp + * classes support the case + * where different finite + * element indices may be used + * on different cells. The + * default index consequently + * corresponds to an invalid + * value. + */ + static const unsigned int default_fe_index = numbers::invalid_unsigned_int; + + + /** + * Constructor. Take @p tria as the + * triangulation to work on. + */ + DoFHandler (const Triangulation &tria); + + /** + * Destructor. + */ + virtual ~DoFHandler (); + + /** + * Go through the triangulation and + * distribute the degrees of freedoms + * needed for the given finite element + * according to the current distribution + * of active fe indices. + * + * A pointer of the transferred + * finite element is + * stored. Therefore, the + * lifetime of the finite element + * object shall be longer than + * that of this object. If you + * don't want this behaviour, you + * may want to call the @p clear + * member function which also + * releases the lock of this + * object to the finite element. + */ + virtual void distribute_dofs (const hp::FECollection &fe); + + /** + * Go through the triangulation and set + * the active FE indices of all active + * cells to the values given in @p + * active_fe_indices. + */ + void set_active_fe_indices (const std::vector &active_fe_indices); + + /** + * Go through the triangulation and + * store the active FE indices of all + * active cells to the vector @p + * active_fe_indices. This vector is + * resized, if necessary. + */ + void get_active_fe_indices (std::vector &active_fe_indices) const; + + /** + * Clear all data of this object and + * especially delete the lock this object + * has to the finite element used the last + * time when @p distribute_dofs was called. + */ + virtual void clear (); + + /** + * Renumber degrees of freedom based on + * a list of new dof numbers for all the + * dofs. + * + * @p new_numbers is an array of integers + * with size equal to the number of dofs + * on the present grid. It stores the new + * indices after renumbering in the + * order of the old indices. + * + * This function is called by + * the functions in + * DoFRenumbering function + * after computing the ordering + * of the degrees of freedom. + * However, you can call this + * function yourself, which is + * necessary if a user wants to + * implement an ordering scheme + * herself, for example + * downwind numbering. + * + * The @p new_number array must + * have a size equal to the + * number of degrees of + * freedom. Each entry must + * state the new global DoF + * number of the degree of + * freedom referenced. + */ + void renumber_dofs (const std::vector &new_numbers); + + /** + * Return the maximum number of + * degrees of freedom a degree of freedom + * in the given triangulation with the + * given finite element may couple with. + * This is the maximum number of entries + * per line in the system matrix; this + * information can therefore be used upon + * construction of the SparsityPattern + * object. + * + * The returned number is not really the + * maximum number but an estimate based + * on the finite element and the maximum + * number of cells meeting at a vertex. + * The number holds for the constrained + * matrix also. + * + * As for + * DoFHandler::max_couplings_between_dofs(), + * the result of this function is often + * not very accurate for 3d and/or high + * polynomial degrees. The consequences + * are discussed in the documentation + * of the module on @ref Sparsity. + */ + unsigned int max_couplings_between_dofs () const; + + /** + * Return the number of degrees of freedom + * located on the boundary another dof on + * the boundary can couple with. + * + * The number is the same as for + * @p max_coupling_between_dofs in one + * dimension less. + */ + unsigned int max_couplings_between_boundary_dofs () const; + + /** + * @name Cell iterator functions + */ + /*@{*/ + /** + * Iterator to the first used + * cell on level @p level. + */ + cell_iterator begin (const unsigned int level = 0) const; + + /** + * Iterator to the first active + * cell on level @p level. + */ + active_cell_iterator begin_active(const unsigned int level = 0) const; + + /** + * Iterator past the end; this + * iterator serves for + * comparisons of iterators with + * past-the-end or + * before-the-beginning states. + */ + cell_iterator end () const; + + /** + * Return an iterator which is + * the first iterator not on + * level. If @p level is the + * last level, then this returns + * end(). + */ + cell_iterator end (const unsigned int level) const; + + /** + * Return an active iterator + * which is the first iterator + * not on level. If @p level is + * the last level, then this + * returns end(). + */ + active_cell_iterator end_active (const unsigned int level) const; + + //@} + + /*---------------------------------------*/ + + + /** + * Return the global number of + * degrees of freedom. If the + * current object handles all + * degrees of freedom itself + * (even if you may intend to + * solve your linear system in + * parallel, such as in step-17 + * or step-18), then this number + * equals the number of locally + * owned degrees of freedom since + * this object doesn't know + * anything about what you want + * to do with it and believes + * that it owns every degree of + * freedom it knows about. + * + * On the other hand, if this + * object operates on a + * parallel::distributed::Triangulation + * object, then this function + * returns the global number of + * degrees of freedom, + * accumulated over all + * processors. + * + * In either case, included in + * the returned number are those + * DoFs which are constrained by + * hanging nodes, see @ref constraints. + */ + unsigned int n_dofs () const; + + /** + * Return the number of degrees of freedom + * located on the boundary. + */ + unsigned int n_boundary_dofs () const; + + /** + * Return the number of degrees + * of freedom located on those + * parts of the boundary which + * have a boundary indicator + * listed in the given set. The + * reason that a @p map rather + * than a @p set is used is the + * same as described in the + * section on the + * @p make_boundary_sparsity_pattern + * function. + */ + unsigned int + n_boundary_dofs (const FunctionMap &boundary_indicators) const; + + /** + * Same function, but with + * different data type of the + * argument, which is here simply + * a list of the boundary + * indicators under + * consideration. + */ + unsigned int + n_boundary_dofs (const std::set &boundary_indicators) const; + + /** + * Return the number of + * degrees of freedom that + * belong to this + * process. + * + * If this is a sequential job, + * then the result equals that + * produced by n_dofs(). On the + * other hand, if we are + * operating on a + * parallel::distributed::Triangulation, + * then it includes only the + * degrees of freedom that the + * current processor owns. Note + * that in this case this does + * not include all degrees of + * freedom that have been + * distributed on the current + * processor's image of the mesh: + * in particular, some of the + * degrees of freedom on the + * interface between the cells + * owned by this processor and + * cells owned by other + * processors may be theirs, and + * degrees of freedom on ghost + * cells are also not necessarily + * included. + */ + unsigned int n_locally_owned_dofs() const; + + /** + * Return an IndexSet describing + * the set of locally owned DoFs + * as a subset of + * 0..n_dofs(). The number of + * elements of this set equals + * n_locally_owned_dofs(). + */ + const IndexSet &locally_owned_dofs() const; + + + /** + * Returns a vector that + * stores the locally owned + * DoFs of each processor. If + * you are only interested in + * the number of elements + * each processor owns then + * n_dofs_per_processor() is + * a better choice. + * + * If this is a sequential job, + * then the vector has a single + * element that equals the + * IndexSet representing the + * entire range [0,n_dofs()]. + */ + const std::vector & + locally_owned_dofs_per_processor () const; + + /** + * Return a vector that + * stores the number of + * degrees of freedom each + * processor that + * participates in this + * triangulation owns + * locally. The sum of all + * these numbers equals the + * number of degrees of + * freedom that exist + * globally, i.e. what + * n_dofs() returns. + * + * Each element of the vector + * returned by this function + * equals the number of + * elements of the + * corresponding sets + * returned by + * global_dof_indices(). + * + * If this is a sequential job, + * then the vector has a single + * element equal to n_dofs(). + */ + const std::vector & + n_locally_owned_dofs_per_processor () const; + + /** + * Return a constant reference to + * the set of finite element + * objects that are used by this + * @p DoFHandler. + */ + const hp::FECollection &get_fe () const; + + /** + * Return a constant reference to the + * triangulation underlying this object. + */ + const Triangulation &get_tria () const; + + /** + * Determine an estimate for the + * memory consumption (in bytes) + * of this object. + * + * This function is made virtual, + * since a dof handler object + * might be accessed through a + * pointers to thisr base class, + * although the actual object + * might be a derived class. + */ + virtual std::size_t memory_consumption () const; + + /** + * Exception + */ + DeclException0 (ExcInvalidTriangulation); + /** + * Exception + */ + DeclException0 (ExcNoFESelected); + /** + * Exception + */ + DeclException0 (ExcRenumberingIncomplete); + /** + * Exception + */ + DeclException0 (ExcGridsDoNotMatch); + /** + * Exception + */ + DeclException0 (ExcInvalidBoundaryIndicator); + /** + * Exception + */ + DeclException1 (ExcMatrixHasWrongSize, + int, + << "The matrix has the wrong dimension " << arg1); + /** + * Exception + */ + DeclException0 (ExcFunctionNotUseful); + /** + * Exception + */ + DeclException1 (ExcNewNumbersNotConsecutive, + int, + << "The given list of new dof indices is not consecutive: " + << "the index " << arg1 << " does not exist."); + /** + * Exception + */ + DeclException2 (ExcInvalidFEIndex, + int, int, + << "The mesh contains a cell with an active_fe_index of " + << arg1 << ", but the finite element collection only has " + << arg2 << " elements"); + /** + * Exception + */ + DeclException1 (ExcInvalidLevel, + int, + << "The given level " << arg1 + << " is not in the valid range!"); + /** + * Exception + */ + DeclException0 (ExcFacesHaveNoLevel); + /** + * The triangulation level you + * accessed is empty. + */ + DeclException1 (ExcEmptyLevel, + int, + << "You tried to do something on level " << arg1 + << ", but this level is empty."); + + protected: + + /** + * Address of the triangulation to + * work on. + */ + SmartPointer,DoFHandler > tria; + + /** + * Store a pointer to the finite + * element set given latest for + * the distribution of dofs. In + * order to avoid destruction of + * the object before the lifetime + * of the DoF handler, we + * subscribe to the finite + * element object. To unlock the + * FE before the end of the + * lifetime of this DoF handler, + * use the clear() function + * (this clears all data of this + * object as well, though). + */ + SmartPointer,hp::DoFHandler > finite_elements; + + private: + + /** + * Copy constructor. I can see no reason + * why someone might want to use it, so + * I don't provide it. Since this class + * has pointer members, making it private + * prevents the compiler to provide it's + * own, incorrect one if anyone chose to + * copy such an object. + */ + DoFHandler (const DoFHandler &); + + /** + * Copy operator. I can see no reason + * why someone might want to use it, so + * I don't provide it. Since this class + * has pointer members, making it private + * prevents the compiler to provide it's + * own, incorrect one if anyone chose to + * copy such an object. + */ + DoFHandler &operator = (const DoFHandler &); + ++ class MGVertexDoFs ++ { + public: - typedef typename IteratorSelector::CellAccessor cell_accessor; - typedef typename IteratorSelector::FaceAccessor face_accessor; - - typedef typename IteratorSelector::line_iterator line_iterator; - typedef typename IteratorSelector::active_line_iterator active_line_iterator; - - typedef typename IteratorSelector::quad_iterator quad_iterator; - typedef typename IteratorSelector::active_quad_iterator active_quad_iterator; - - typedef typename IteratorSelector::hex_iterator hex_iterator; - typedef typename IteratorSelector::active_hex_iterator active_hex_iterator; - - typedef typename IteratorSelector::cell_iterator cell_iterator; - typedef typename IteratorSelector::active_cell_iterator active_cell_iterator; - - typedef typename IteratorSelector::face_iterator face_iterator; - typedef typename IteratorSelector::active_face_iterator active_face_iterator; - - /** - * Alias the @p FunctionMap type - * declared elsewhere. - */ - typedef typename FunctionMap::type FunctionMap; - - /** - * Make the dimension available - * in function templates. - */ - static const unsigned int dimension = dim; - - /** - * Make the space dimension available - * in function templates. - */ - static const unsigned int space_dimension = spacedim; - - /** - * When the arrays holding the - * DoF indices are set up, but - * before they are filled with - * actual values, they are set to - * an invalid value, in order to - * monitor possible - * problems. This invalid value - * is the constant defined here. - * - * Please note that you should - * not rely on it having a - * certain value, but rather take - * its symbolic name. - */ - static const unsigned int invalid_dof_index = numbers::invalid_unsigned_int; - - /** - * The default index of the - * finite element to be used on - * a given cell. For the usual, - * non-hp dealii::DoFHandler class - * that only supports the same - * finite element to be used on - * all cells, the index of the - * finite element needs to be - * the same on all cells - * anyway, and by convention we - * pick zero for this - * value. The situation here is - * different, since the hp - * classes support the case - * where different finite - * element indices may be used - * on different cells. The - * default index consequently - * corresponds to an invalid - * value. - */ - static const unsigned int default_fe_index = numbers::invalid_unsigned_int; - - - /** - * Constructor. Take @p tria as the - * triangulation to work on. - */ - DoFHandler (const Triangulation &tria); - - /** - * Destructor. - */ - virtual ~DoFHandler (); - - /** - * Go through the triangulation and - * distribute the degrees of freedoms - * needed for the given finite element - * according to the current distribution - * of active fe indices. - * - * A pointer of the transferred - * finite element is - * stored. Therefore, the - * lifetime of the finite element - * object shall be longer than - * that of this object. If you - * don't want this behaviour, you - * may want to call the @p clear - * member function which also - * releases the lock of this - * object to the finite element. - */ - virtual void distribute_dofs (const hp::FECollection &fe); - - /** - * Go through the triangulation and set - * the active FE indices of all active - * cells to the values given in @p - * active_fe_indices. - */ - void set_active_fe_indices (const std::vector& active_fe_indices); - - /** - * Go through the triangulation and - * store the active FE indices of all - * active cells to the vector @p - * active_fe_indices. This vector is - * resized, if necessary. - */ - void get_active_fe_indices (std::vector& active_fe_indices) const; - - /** - * Clear all data of this object and - * especially delete the lock this object - * has to the finite element used the last - * time when @p distribute_dofs was called. - */ - virtual void clear (); - - /** - * Renumber degrees of freedom based on - * a list of new dof numbers for all the - * dofs. - * - * @p new_numbers is an array of integers - * with size equal to the number of dofs - * on the present grid. It stores the new - * indices after renumbering in the - * order of the old indices. - * - * This function is called by - * the functions in - * DoFRenumbering function - * after computing the ordering - * of the degrees of freedom. - * However, you can call this - * function yourself, which is - * necessary if a user wants to - * implement an ordering scheme - * herself, for example - * downwind numbering. - * - * The @p new_number array must - * have a size equal to the - * number of degrees of - * freedom. Each entry must - * state the new global DoF - * number of the degree of - * freedom referenced. - */ - void renumber_dofs (const std::vector &new_numbers); - - /** - * Return the maximum number of - * degrees of freedom a degree of freedom - * in the given triangulation with the - * given finite element may couple with. - * This is the maximum number of entries - * per line in the system matrix; this - * information can therefore be used upon - * construction of the SparsityPattern - * object. - * - * The returned number is not really the - * maximum number but an estimate based - * on the finite element and the maximum - * number of cells meeting at a vertex. - * The number holds for the constrained - * matrix also. - * - * As for - * DoFHandler::max_couplings_between_dofs(), - * the result of this function is often - * not very accurate for 3d and/or high - * polynomial degrees. The consequences - * are discussed in the documentation - * of the module on @ref Sparsity. - */ - unsigned int max_couplings_between_dofs () const; - - /** - * Return the number of degrees of freedom - * located on the boundary another dof on - * the boundary can couple with. - * - * The number is the same as for - * @p max_coupling_between_dofs in one - * dimension less. - */ - unsigned int max_couplings_between_boundary_dofs () const; - - /** - * @name Cell iterator functions - */ - /*@{*/ - /** - * Iterator to the first used - * cell on level @p level. - */ - cell_iterator begin (const unsigned int level = 0) const; - - /** - * Iterator to the first active - * cell on level @p level. - */ - active_cell_iterator begin_active(const unsigned int level = 0) const; - - /** - * Iterator past the end; this - * iterator serves for - * comparisons of iterators with - * past-the-end or - * before-the-beginning states. - */ - cell_iterator end () const; - - /** - * Return an iterator which is - * the first iterator not on - * level. If @p level is the - * last level, then this returns - * end(). - */ - cell_iterator end (const unsigned int level) const; - - /** - * Return an active iterator - * which is the first iterator - * not on level. If @p level is - * the last level, then this - * returns end(). - */ - active_cell_iterator end_active (const unsigned int level) const; - - //@} - - /*---------------------------------------*/ - - - /** - * Return the global number of - * degrees of freedom. If the - * current object handles all - * degrees of freedom itself - * (even if you may intend to - * solve your linear system in - * parallel, such as in step-17 - * or step-18), then this number - * equals the number of locally - * owned degrees of freedom since - * this object doesn't know - * anything about what you want - * to do with it and believes - * that it owns every degree of - * freedom it knows about. - * - * On the other hand, if this - * object operates on a - * parallel::distributed::Triangulation - * object, then this function - * returns the global number of - * degrees of freedom, - * accumulated over all - * processors. - * - * In either case, included in - * the returned number are those - * DoFs which are constrained by - * hanging nodes, see @ref constraints. - */ - unsigned int n_dofs () const; - - /** - * Return the number of degrees of freedom - * located on the boundary. - */ - unsigned int n_boundary_dofs () const; - - /** - * Return the number of degrees - * of freedom located on those - * parts of the boundary which - * have a boundary indicator - * listed in the given set. The - * reason that a @p map rather - * than a @p set is used is the - * same as described in the - * section on the - * @p make_boundary_sparsity_pattern - * function. - */ - unsigned int - n_boundary_dofs (const FunctionMap &boundary_indicators) const; - - /** - * Same function, but with - * different data type of the - * argument, which is here simply - * a list of the boundary - * indicators under - * consideration. - */ - unsigned int - n_boundary_dofs (const std::set &boundary_indicators) const; - - /** - * Return the number of - * degrees of freedom that - * belong to this - * process. - * - * If this is a sequential job, - * then the result equals that - * produced by n_dofs(). On the - * other hand, if we are - * operating on a - * parallel::distributed::Triangulation, - * then it includes only the - * degrees of freedom that the - * current processor owns. Note - * that in this case this does - * not include all degrees of - * freedom that have been - * distributed on the current - * processor's image of the mesh: - * in particular, some of the - * degrees of freedom on the - * interface between the cells - * owned by this processor and - * cells owned by other - * processors may be theirs, and - * degrees of freedom on ghost - * cells are also not necessarily - * included. - */ - unsigned int n_locally_owned_dofs() const; - - /** - * Return an IndexSet describing - * the set of locally owned DoFs - * as a subset of - * 0..n_dofs(). The number of - * elements of this set equals - * n_locally_owned_dofs(). - */ - const IndexSet & locally_owned_dofs() const; - - - /** - * Returns a vector that - * stores the locally owned - * DoFs of each processor. If - * you are only interested in - * the number of elements - * each processor owns then - * n_dofs_per_processor() is - * a better choice. - * - * If this is a sequential job, - * then the vector has a single - * element that equals the - * IndexSet representing the - * entire range [0,n_dofs()]. - */ - const std::vector & - locally_owned_dofs_per_processor () const; - - /** - * Return a vector that - * stores the number of - * degrees of freedom each - * processor that - * participates in this - * triangulation owns - * locally. The sum of all - * these numbers equals the - * number of degrees of - * freedom that exist - * globally, i.e. what - * n_dofs() returns. - * - * Each element of the vector - * returned by this function - * equals the number of - * elements of the - * corresponding sets - * returned by - * global_dof_indices(). - * - * If this is a sequential job, - * then the vector has a single - * element equal to n_dofs(). - */ - const std::vector & - n_locally_owned_dofs_per_processor () const; - - /** - * Return a constant reference to - * the set of finite element - * objects that are used by this - * @p DoFHandler. - */ - const hp::FECollection & get_fe () const; - - /** - * Return a constant reference to the - * triangulation underlying this object. - */ - const Triangulation & get_tria () const; - - /** - * Determine an estimate for the - * memory consumption (in bytes) - * of this object. - * - * This function is made virtual, - * since a dof handler object - * might be accessed through a - * pointers to thisr base class, - * although the actual object - * might be a derived class. - */ - virtual std::size_t memory_consumption () const; - - /** - * Exception - */ - DeclException0 (ExcInvalidTriangulation); - /** - * Exception - */ - DeclException0 (ExcNoFESelected); - /** - * Exception - */ - DeclException0 (ExcRenumberingIncomplete); - /** - * Exception - */ - DeclException0 (ExcGridsDoNotMatch); - /** - * Exception - */ - DeclException0 (ExcInvalidBoundaryIndicator); - /** - * Exception - */ - DeclException1 (ExcMatrixHasWrongSize, - int, - << "The matrix has the wrong dimension " << arg1); - /** - * Exception - */ - DeclException0 (ExcFunctionNotUseful); - /** - * Exception - */ - DeclException1 (ExcNewNumbersNotConsecutive, - int, - << "The given list of new dof indices is not consecutive: " - << "the index " << arg1 << " does not exist."); - /** - * Exception - */ - DeclException2 (ExcInvalidFEIndex, - int, int, - << "The mesh contains a cell with an active_fe_index of " - << arg1 << ", but the finite element collection only has " - << arg2 << " elements"); - /** - * Exception - */ - DeclException1 (ExcInvalidLevel, - int, - << "The given level " << arg1 - << " is not in the valid range!"); - /** - * Exception - */ - DeclException0 (ExcFacesHaveNoLevel); - /** - * The triangulation level you - * accessed is empty. - */ - DeclException1 (ExcEmptyLevel, - int, - << "You tried to do something on level " << arg1 - << ", but this level is empty."); - - protected: - - /** - * Address of the triangulation to - * work on. - */ - SmartPointer,DoFHandler > tria; - - /** - * Store a pointer to the finite - * element set given latest for - * the distribution of dofs. In - * order to avoid destruction of - * the object before the lifetime - * of the DoF handler, we - * subscribe to the finite - * element object. To unlock the - * FE before the end of the - * lifetime of this DoF handler, - * use the clear() function - * (this clears all data of this - * object as well, though). - */ - SmartPointer,hp::DoFHandler > finite_elements; - - private: - - /** - * Copy constructor. I can see no reason - * why someone might want to use it, so - * I don't provide it. Since this class - * has pointer members, making it private - * prevents the compiler to provide it's - * own, incorrect one if anyone chose to - * copy such an object. - */ - DoFHandler (const DoFHandler &); - - /** - * Copy operator. I can see no reason - * why someone might want to use it, so - * I don't provide it. Since this class - * has pointer members, making it private - * prevents the compiler to provide it's - * own, incorrect one if anyone chose to - * copy such an object. - */ - DoFHandler & operator = (const DoFHandler &); - - class MGVertexDoFs { - public: - MGVertexDoFs (); - ~MGVertexDoFs (); - unsigned int get_index (const unsigned int level, const unsigned int dof_number) const; - void set_index (const unsigned int level, const unsigned int dof_number, const unsigned int index); - }; - - /** - * Free all used memory. - */ - void clear_space (); - - template - unsigned int get_dof_index (const unsigned int obj_level, const unsigned int obj_index, const unsigned int fe_index, const unsigned int local_index) const; - - template - void set_dof_index (const unsigned int obj_level, const unsigned int obj_index, const unsigned int fe_index, const unsigned int local_index, const unsigned int global_index) const; - - /** - * Create default tables for - * the active_fe_indices in - * the - * dealii::internal::hp::DoFLevel. They - * are initialized with the a - * zero indicator, meaning - * that fe[0] is going to be - * used by default. This - * method is called before - * refinement and before - * distribute_dofs is - * called. It ensures each - * cell has a valid - * active_fe_index. - */ - - void create_active_fe_table (); - - /** - * Functions that will be triggered - * through signals whenever the - * triangulation is modified. - * - * Here they are used to - * administrate the the - * active_fe_fields during the - * spatial refinement. - */ - virtual void pre_refinement_action (); - virtual void post_refinement_action (); - - /** - * Compute identities between - * DoFs located on - * vertices. Called from - * distribute_dofs(). - */ - void - compute_vertex_dof_identities (std::vector &new_dof_indices) const; - - /** - * Compute identities between - * DoFs located on - * lines. Called from - * distribute_dofs(). - */ - void - compute_line_dof_identities (std::vector &new_dof_indices) const; - - /** - * Compute identities between - * DoFs located on - * quads. Called from - * distribute_dofs(). - */ - void - compute_quad_dof_identities (std::vector &new_dof_indices) const; - - /** - * Renumber the objects with - * the given and all lower - * structural dimensions, - * i.e. renumber vertices by - * giving a template argument - * of zero to the int2type - * argument, lines and vertices - * with one, etc. - * - * Note that in contrast to the - * public renumber_dofs() - * function, these internal - * functions do not ensure that - * the new DoFs are - * contiguously numbered. The - * function may therefore also - * be used to assign different - * DoFs the same number, for - * example to unify hp DoFs - * corresponding to different - * finite elements but - * co-located on the same - * entity. - */ - void renumber_dofs_internal (const std::vector &new_numbers, - dealii::internal::int2type<0>); - - void renumber_dofs_internal (const std::vector &new_numbers, - dealii::internal::int2type<1>); - - void renumber_dofs_internal (const std::vector &new_numbers, - dealii::internal::int2type<2>); - - void renumber_dofs_internal (const std::vector &new_numbers, - dealii::internal::int2type<3>); - - /** - * Space to store the DoF - * numbers for the different - * levels. Analogous to the - * levels[] tree of - * the Triangulation objects. - */ - std::vector*> levels; - /** - * Space to store the DoF - * numbers for the faces. - * Analogous to the - * faces pointer of - * the Triangulation objects. - */ - dealii::internal::hp::DoFFaces * faces; - - /** - * A structure that contains all - * sorts of numbers that - * characterize the degrees of - * freedom this object works on. - * - * For most members of this - * structure, there is an - * accessor function in this - * class that returns its value. - */ - dealii::internal::DoFHandler::NumberCache number_cache; - - /** - * Array to store the indices - * for degrees of freedom - * located at vertices. - * - * The format used here, in the - * form of a linked list, is - * the same as used for the - * arrays used in the - * internal::hp::DoFLevel - * hierarchy. Starting indices - * into this array are provided - * by the vertex_dofs_offsets - * field. - * - * Access to this field is - * generally through the - * DoFAccessor::get_vertex_dof_index() and - * DoFAccessor::set_vertex_dof_index() - * functions, encapsulating the - * actual data format used to - * the present class. - */ - std::vector vertex_dofs; - - /** - * For each vertex in the - * triangulation, store the - * offset within the - * vertex_dofs array where the - * dofs for this vertex start. - * - * As for that array, the - * format is the same as - * described in the - * documentation of - * hp::DoFLevel. - * - * Access to this field is - * generally through the - * Accessor::get_vertex_dof_index() and - * Accessor::set_vertex_dof_index() - * functions, encapsulating the - * actual data format used to - * the present class. - */ - std::vector vertex_dofs_offsets; - - std::vector mg_vertex_dofs; - - /** - * Array to store the - * information, if a cell on - * some level has children or - * not. It is used by the - * refinement listeners as a - * persistent buffer during the - * refinement, i.e. from between - * when pre_refinement_action is - * called and when post_refinement_action - * runs. - */ - std::vector *> has_children; - - /** - * A list of connections with which this object connects - * to the triangulation to get information about when the - * triangulation changes. - */ - std::vector tria_listeners; - - /** - * Make accessor objects friends. - */ - template friend class dealii::DoFAccessor; - template friend class dealii::DoFCellAccessor; - friend struct dealii::internal::DoFAccessor::Implementation; - friend struct dealii::internal::DoFCellAccessor::Implementation; - - /** - * Likewise for DoFLevel - * objects since they need to - * access the vertex dofs in - * the functions that set and - * retrieve vertex dof indices. - */ - template friend class dealii::internal::hp::DoFLevel; - template friend class dealii::internal::hp::DoFObjects; - friend struct dealii::internal::hp::DoFHandler::Implementation; ++ MGVertexDoFs (); ++ ~MGVertexDoFs (); ++ unsigned int get_index (const unsigned int level, const unsigned int dof_number) const; ++ void set_index (const unsigned int level, const unsigned int dof_number, const unsigned int index); ++ }; ++ + /** + * Free all used memory. + */ + void clear_space (); + ++ template ++ unsigned int get_dof_index (const unsigned int obj_level, const unsigned int obj_index, const unsigned int fe_index, const unsigned int local_index) const; ++ ++ template ++ void set_dof_index (const unsigned int obj_level, const unsigned int obj_index, const unsigned int fe_index, const unsigned int local_index, const unsigned int global_index) const; ++ + /** + * Create default tables for + * the active_fe_indices in + * the + * dealii::internal::hp::DoFLevel. They + * are initialized with the a + * zero indicator, meaning + * that fe[0] is going to be + * used by default. This + * method is called before + * refinement and before + * distribute_dofs is + * called. It ensures each + * cell has a valid + * active_fe_index. + */ + + void create_active_fe_table (); + + /** + * Functions that will be triggered + * through signals whenever the + * triangulation is modified. + * + * Here they are used to + * administrate the the + * active_fe_fields during the + * spatial refinement. + */ - void pre_refinement_action (); - void post_refinement_action (); - ++ virtual void pre_refinement_action (); ++ virtual void post_refinement_action (); + + /** + * Compute identities between + * DoFs located on + * vertices. Called from + * distribute_dofs(). + */ + void + compute_vertex_dof_identities (std::vector &new_dof_indices) const; + + /** + * Compute identities between + * DoFs located on + * lines. Called from + * distribute_dofs(). + */ + void + compute_line_dof_identities (std::vector &new_dof_indices) const; + + /** + * Compute identities between + * DoFs located on + * quads. Called from + * distribute_dofs(). + */ + void + compute_quad_dof_identities (std::vector &new_dof_indices) const; + + /** + * Renumber the objects with + * the given and all lower + * structural dimensions, + * i.e. renumber vertices by + * giving a template argument + * of zero to the int2type + * argument, lines and vertices + * with one, etc. + * + * Note that in contrast to the + * public renumber_dofs() + * function, these internal + * functions do not ensure that + * the new DoFs are + * contiguously numbered. The + * function may therefore also + * be used to assign different + * DoFs the same number, for + * example to unify hp DoFs + * corresponding to different + * finite elements but + * co-located on the same + * entity. + */ + void renumber_dofs_internal (const std::vector &new_numbers, + dealii::internal::int2type<0>); + + void renumber_dofs_internal (const std::vector &new_numbers, + dealii::internal::int2type<1>); + + void renumber_dofs_internal (const std::vector &new_numbers, + dealii::internal::int2type<2>); + + void renumber_dofs_internal (const std::vector &new_numbers, + dealii::internal::int2type<3>); + + /** + * Space to store the DoF + * numbers for the different + * levels. Analogous to the + * levels[] tree of + * the Triangulation objects. + */ + std::vector*> levels; + /** + * Space to store the DoF + * numbers for the faces. + * Analogous to the + * faces pointer of + * the Triangulation objects. + */ + dealii::internal::hp::DoFFaces *faces; + + /** + * A structure that contains all + * sorts of numbers that + * characterize the degrees of + * freedom this object works on. + * + * For most members of this + * structure, there is an + * accessor function in this + * class that returns its value. + */ + dealii::internal::DoFHandler::NumberCache number_cache; + + /** + * Array to store the indices + * for degrees of freedom + * located at vertices. + * + * The format used here, in the + * form of a linked list, is + * the same as used for the + * arrays used in the + * internal::hp::DoFLevel + * hierarchy. Starting indices + * into this array are provided + * by the vertex_dofs_offsets + * field. + * + * Access to this field is + * generally through the + * DoFAccessor::get_vertex_dof_index() and + * DoFAccessor::set_vertex_dof_index() + * functions, encapsulating the + * actual data format used to + * the present class. + */ + std::vector vertex_dofs; + + /** + * For each vertex in the + * triangulation, store the + * offset within the + * vertex_dofs array where the + * dofs for this vertex start. + * + * As for that array, the + * format is the same as + * described in the + * documentation of + * hp::DoFLevel. + * + * Access to this field is + * generally through the + * Accessor::get_vertex_dof_index() and + * Accessor::set_vertex_dof_index() + * functions, encapsulating the + * actual data format used to + * the present class. + */ + std::vector vertex_dofs_offsets; + ++ std::vector mg_vertex_dofs; ++ + /** + * Array to store the + * information, if a cell on + * some level has children or + * not. It is used by the + * refinement listeners as a + * persistent buffer during the + * refinement, i.e. from between + * when pre_refinement_action is + * called and when post_refinement_action + * runs. + */ + std::vector *> has_children; + + /** + * A list of connections with which this object connects + * to the triangulation to get information about when the + * triangulation changes. + */ + std::vector tria_listeners; + + /** + * Make accessor objects friends. + */ + template friend class dealii::DoFAccessor; + template friend class dealii::DoFCellAccessor; + friend struct dealii::internal::DoFAccessor::Implementation; + friend struct dealii::internal::DoFCellAccessor::Implementation; + + /** + * Likewise for DoFLevel + * objects since they need to + * access the vertex dofs in + * the functions that set and + * retrieve vertex dof indices. + */ + template friend class dealii::internal::hp::DoFLevel; + template friend class dealii::internal::hp::DoFObjects; + friend struct dealii::internal::hp::DoFHandler::Implementation; }; @@@ -937,37 -921,7 +938,37 @@@ return *tria; } + template + inline + DoFHandler::MGVertexDoFs::MGVertexDoFs() + { + Assert (false, ExcNotImplemented ()); + } + + template + inline + DoFHandler::MGVertexDoFs::~MGVertexDoFs() + { + Assert (false, ExcNotImplemented ()); + } + + template + inline + unsigned int DoFHandler::MGVertexDoFs::get_index (const unsigned int, - const unsigned int) const ++ const unsigned int) const + { + Assert (false, ExcNotImplemented ()); + return invalid_dof_index; + } - + + template + inline + void DoFHandler::MGVertexDoFs::set_index (const unsigned int, - const unsigned int, - const unsigned int) ++ const unsigned int, ++ const unsigned int) + { + Assert (false, ExcNotImplemented ()); + } #endif diff --cc deal.II/include/deal.II/hp/fe_values.h index 66f7178f09,92ce5dac47..7ad98f4cad --- a/deal.II/include/deal.II/hp/fe_values.h +++ b/deal.II/include/deal.II/hp/fe_values.h @@@ -247,275 -247,275 +247,275 @@@ namespace h template class FEValues : public dealii::internal::hp::FEValuesBase > { - public: + public: - static const unsigned int dimension = dim; + static const unsigned int dimension = dim; - static const unsigned int space_dimension = spacedim; + static const unsigned int space_dimension = spacedim; - /** - * Constructor. Initialize this - * object with the given - * parameters. - * - * The finite element - * collection parameter is - * actually ignored, but is in - * the signature of this - * function to make it - * compatible with the - * signature of the respective - * constructor of the usual - * FEValues object, with - * the respective parameter in - * that function also being the - * return value of the - * DoFHandler::get_fe() - * function. - */ - FEValues (const dealii::hp::MappingCollection &mapping_collection, - const dealii::hp::FECollection &fe_collection, - const dealii::hp::QCollection &q_collection, - const UpdateFlags update_flags); + /** + * Constructor. Initialize this + * object with the given + * parameters. + * + * The finite element + * collection parameter is + * actually ignored, but is in + * the signature of this + * function to make it + * compatible with the + * signature of the respective + * constructor of the usual + * FEValues object, with + * the respective parameter in + * that function also being the + * return value of the + * DoFHandler::get_fe() + * function. + */ + FEValues (const dealii::hp::MappingCollection &mapping_collection, - const dealii::hp::FECollection &fe_collection, ++ const dealii::hp::FECollection &fe_collection, + const dealii::hp::QCollection &q_collection, + const UpdateFlags update_flags); - /** - * Constructor. Initialize this - * object with the given - * parameters, and choose a - * @p MappingQ1 object for the - * mapping object. - * - * The finite element - * collection parameter is - * actually ignored, but is in - * the signature of this - * function to make it - * compatible with the - * signature of the respective - * constructor of the usual - * FEValues object, with - * the respective parameter in - * that function also being the - * return value of the - * DoFHandler::get_fe() - * function. - */ - FEValues (const hp::FECollection &fe_collection, - const hp::QCollection &q_collection, - const UpdateFlags update_flags); + /** + * Constructor. Initialize this + * object with the given + * parameters, and choose a + * @p MappingQ1 object for the + * mapping object. + * + * The finite element + * collection parameter is + * actually ignored, but is in + * the signature of this + * function to make it + * compatible with the + * signature of the respective + * constructor of the usual + * FEValues object, with + * the respective parameter in + * that function also being the + * return value of the + * DoFHandler::get_fe() + * function. + */ + FEValues (const hp::FECollection &fe_collection, + const hp::QCollection &q_collection, + const UpdateFlags update_flags); - /** - * Reinitialize the object for - * the given cell. - * - * After the call, you can get - * an FEValues object using the - * get_present_fe_values() - * function that corresponds to - * the present cell. For this - * FEValues object, we use the - * additional arguments - * described below to determine - * which finite element, - * mapping, and quadrature - * formula to use. They are - * order in such a way that the - * arguments one may want to - * change most frequently come - * first. The rules for these - * arguments are as follows: - * - * If the @p fe_index argument - * to this function is left at - * its default value, then we - * use that finite element - * within the hp::FECollection - * passed to the constructor of - * this class with index given - * by - * cell-@>active_fe_index(). Consequently, - * the hp::FECollection - * argument given to this - * object should really be the - * same as that used in the - * construction of the - * hp::DofHandler associated - * with the present cell. On - * the other hand, if a value - * is given for this argument, - * it overrides the choice of - * cell-@>active_fe_index(). - * - * If the @p q_index argument - * is left at its default - * value, then we use that - * quadrature formula within - * the hp::QCollection passed - * to the constructor of this - * class with index given by - * cell-@>active_fe_index(), - * i.e. the same index as that - * of the finite element. In - * this case, there should be a - * corresponding quadrature - * formula for each finite - * element in the - * hp::FECollection. As a - * special case, if the - * quadrature collection - * contains only a single - * element (a frequent case if - * one wants to use the same - * quadrature object for all - * finite elements in an hp - * discretization, even if that - * may not be the most - * efficient), then this single - * quadrature is used unless a - * different value for this - * argument is specified. On - * the other hand, if a value - * is given for this argument, - * it overrides the choice of - * cell-@>active_fe_index() - * or the choice for the single - * quadrature. - * - * If the @p mapping_index - * argument is left at its - * default value, then we use - * that mapping object within - * the hp::MappingCollection - * passed to the constructor of - * this class with index given - * by - * cell-@>active_fe_index(), - * i.e. the same index as that - * of the finite - * element. As above, if the - * mapping collection contains - * only a single element (a - * frequent case if one wants - * to use a MappingQ1 object - * for all finite elements in - * an hp discretization), then - * this single mapping is used - * unless a different value for - * this argument is specified. - */ - void - reinit (const typename hp::DoFHandler::cell_iterator &cell, - const unsigned int q_index = numbers::invalid_unsigned_int, - const unsigned int mapping_index = numbers::invalid_unsigned_int, - const unsigned int fe_index = numbers::invalid_unsigned_int); + /** + * Reinitialize the object for + * the given cell. + * + * After the call, you can get + * an FEValues object using the + * get_present_fe_values() + * function that corresponds to + * the present cell. For this + * FEValues object, we use the + * additional arguments + * described below to determine + * which finite element, + * mapping, and quadrature + * formula to use. They are + * order in such a way that the + * arguments one may want to + * change most frequently come + * first. The rules for these + * arguments are as follows: + * + * If the @p fe_index argument + * to this function is left at + * its default value, then we + * use that finite element + * within the hp::FECollection + * passed to the constructor of + * this class with index given + * by + * cell-@>active_fe_index(). Consequently, + * the hp::FECollection + * argument given to this + * object should really be the + * same as that used in the + * construction of the + * hp::DofHandler associated + * with the present cell. On + * the other hand, if a value + * is given for this argument, + * it overrides the choice of + * cell-@>active_fe_index(). + * + * If the @p q_index argument + * is left at its default + * value, then we use that + * quadrature formula within + * the hp::QCollection passed + * to the constructor of this + * class with index given by + * cell-@>active_fe_index(), + * i.e. the same index as that + * of the finite element. In + * this case, there should be a + * corresponding quadrature + * formula for each finite + * element in the + * hp::FECollection. As a + * special case, if the + * quadrature collection + * contains only a single + * element (a frequent case if + * one wants to use the same + * quadrature object for all + * finite elements in an hp + * discretization, even if that + * may not be the most + * efficient), then this single + * quadrature is used unless a + * different value for this + * argument is specified. On + * the other hand, if a value + * is given for this argument, + * it overrides the choice of + * cell-@>active_fe_index() + * or the choice for the single + * quadrature. + * + * If the @p mapping_index + * argument is left at its + * default value, then we use + * that mapping object within + * the hp::MappingCollection + * passed to the constructor of + * this class with index given + * by + * cell-@>active_fe_index(), + * i.e. the same index as that + * of the finite + * element. As above, if the + * mapping collection contains + * only a single element (a + * frequent case if one wants + * to use a MappingQ1 object + * for all finite elements in + * an hp discretization), then + * this single mapping is used + * unless a different value for + * this argument is specified. + */ + void + reinit (const typename hp::DoFHandler::cell_iterator &cell, + const unsigned int q_index = numbers::invalid_unsigned_int, + const unsigned int mapping_index = numbers::invalid_unsigned_int, + const unsigned int fe_index = numbers::invalid_unsigned_int); - /** - * Like the previous function, - * but for non-hp - * iterators. The reason this - * (and the other non-hp - * iterator) function exists is - * so that one can use - * hp::FEValues not only for - * hp::DoFhandler objects, but - * for all sorts of DoFHandler - * objects, and triangulations - * not associated with - * DoFHandlers in general. - * - * Since - * cell-@>active_fe_index() - * always returns zero for - * non-hp iterators, this - * function chooses the zero-th - * finite element, mapping, and - * quadrature object from the - * relevant constructions - * passed to the constructor of - * this object. The only - * exception is if you specify - * a value different from the - * default value for any of - * these last three arguments. - */ - void - reinit (const typename dealii::DoFHandler::cell_iterator &cell, - const unsigned int q_index = numbers::invalid_unsigned_int, - const unsigned int mapping_index = numbers::invalid_unsigned_int, - const unsigned int fe_index = numbers::invalid_unsigned_int); + /** + * Like the previous function, + * but for non-hp + * iterators. The reason this + * (and the other non-hp + * iterator) function exists is + * so that one can use + * hp::FEValues not only for + * hp::DoFhandler objects, but + * for all sorts of DoFHandler + * objects, and triangulations + * not associated with + * DoFHandlers in general. + * + * Since + * cell-@>active_fe_index() + * always returns zero for + * non-hp iterators, this + * function chooses the zero-th + * finite element, mapping, and + * quadrature object from the + * relevant constructions + * passed to the constructor of + * this object. The only + * exception is if you specify + * a value different from the + * default value for any of + * these last three arguments. + */ + void + reinit (const typename dealii::DoFHandler::cell_iterator &cell, + const unsigned int q_index = numbers::invalid_unsigned_int, + const unsigned int mapping_index = numbers::invalid_unsigned_int, + const unsigned int fe_index = numbers::invalid_unsigned_int); - /** - * Like the previous function, - * but for non-hp - * iterators. The reason this - * (and the other non-hp - * iterator) function exists is - * so that one can use - * hp::FEValues not only for - * hp::DoFhandler objects, but - * for all sorts of DoFHandler - * objects, and triangulations - * not associated with - * DoFHandlers in general. - * - * Since - * cell-@>active_fe_index() - * always returns zero for - * non-hp iterators, this - * function chooses the zero-th - * finite element, mapping, and - * quadrature object from the - * relevant constructions - * passed to the constructor of - * this object. The only - * exception is if you specify - * a value different from the - * default value for any of - * these last three arguments. - */ - void - reinit (const typename MGDoFHandler::cell_iterator &cell, - const unsigned int q_index = numbers::invalid_unsigned_int, - const unsigned int mapping_index = numbers::invalid_unsigned_int, - const unsigned int fe_index = numbers::invalid_unsigned_int); + /** + * Like the previous function, + * but for non-hp + * iterators. The reason this + * (and the other non-hp + * iterator) function exists is + * so that one can use + * hp::FEValues not only for + * hp::DoFhandler objects, but + * for all sorts of DoFHandler + * objects, and triangulations + * not associated with + * DoFHandlers in general. + * + * Since + * cell-@>active_fe_index() + * always returns zero for + * non-hp iterators, this + * function chooses the zero-th + * finite element, mapping, and + * quadrature object from the + * relevant constructions + * passed to the constructor of + * this object. The only + * exception is if you specify + * a value different from the + * default value for any of + * these last three arguments. + */ + void + reinit (const typename MGDoFHandler::cell_iterator &cell, + const unsigned int q_index = numbers::invalid_unsigned_int, + const unsigned int mapping_index = numbers::invalid_unsigned_int, + const unsigned int fe_index = numbers::invalid_unsigned_int); - /** - * Like the previous function, - * but for non-hp - * iterators. The reason this - * (and the other non-hp - * iterator) function exists is - * so that one can use - * hp::FEValues not only for - * hp::DoFhandler objects, but - * for all sorts of DoFHandler - * objects, and triangulations - * not associated with - * DoFHandlers in general. - * - * Since - * cell-@>active_fe_index() - * doesn't make sense for - * triangulation iterators, - * this function chooses the - * zero-th finite element, - * mapping, and quadrature - * object from the relevant - * constructions passed to the - * constructor of this - * object. The only exception - * is if you specify a value - * different from the default - * value for any of these last - * three arguments. - */ - void - reinit (const typename Triangulation::cell_iterator &cell, - const unsigned int q_index = numbers::invalid_unsigned_int, - const unsigned int mapping_index = numbers::invalid_unsigned_int, - const unsigned int fe_index = numbers::invalid_unsigned_int); + /** + * Like the previous function, + * but for non-hp + * iterators. The reason this + * (and the other non-hp + * iterator) function exists is + * so that one can use + * hp::FEValues not only for + * hp::DoFhandler objects, but + * for all sorts of DoFHandler + * objects, and triangulations + * not associated with + * DoFHandlers in general. + * + * Since + * cell-@>active_fe_index() + * doesn't make sense for + * triangulation iterators, + * this function chooses the + * zero-th finite element, + * mapping, and quadrature + * object from the relevant + * constructions passed to the + * constructor of this + * object. The only exception + * is if you specify a value + * different from the default + * value for any of these last + * three arguments. + */ + void + reinit (const typename Triangulation::cell_iterator &cell, + const unsigned int q_index = numbers::invalid_unsigned_int, + const unsigned int mapping_index = numbers::invalid_unsigned_int, + const unsigned int fe_index = numbers::invalid_unsigned_int); }; @@@ -548,273 -548,273 +548,273 @@@ template class FEFaceValues : public dealii::internal::hp::FEValuesBase > { - public: - /** - * Constructor. Initialize this - * object with the given - * parameters. - * - * The finite element - * collection parameter is - * actually ignored, but is in - * the signature of this - * function to make it - * compatible with the - * signature of the respective - * constructor of the usual - * FEValues object, with - * the respective parameter in - * that function also being the - * return value of the - * DoFHandler::get_fe() - * function. - */ - FEFaceValues (const hp::MappingCollection &mapping_collection, - const hp::FECollection &fe_collection, - const hp::QCollection &q_collection, - const UpdateFlags update_flags); + public: + /** + * Constructor. Initialize this + * object with the given + * parameters. + * + * The finite element + * collection parameter is + * actually ignored, but is in + * the signature of this + * function to make it + * compatible with the + * signature of the respective + * constructor of the usual + * FEValues object, with + * the respective parameter in + * that function also being the + * return value of the + * DoFHandler::get_fe() + * function. + */ + FEFaceValues (const hp::MappingCollection &mapping_collection, - const hp::FECollection &fe_collection, ++ const hp::FECollection &fe_collection, + const hp::QCollection &q_collection, + const UpdateFlags update_flags); - /** - * Constructor. Initialize this - * object with the given - * parameters, and choose a - * @p MappingQ1 object for the - * mapping object. - * - * The finite element - * collection parameter is - * actually ignored, but is in - * the signature of this - * function to make it - * compatible with the - * signature of the respective - * constructor of the usual - * FEValues object, with - * the respective parameter in - * that function also being the - * return value of the - * DoFHandler::get_fe() - * function. - */ - FEFaceValues (const hp::FECollection &fe_collection, - const hp::QCollection &q_collection, - const UpdateFlags update_flags); + /** + * Constructor. Initialize this + * object with the given + * parameters, and choose a + * @p MappingQ1 object for the + * mapping object. + * + * The finite element + * collection parameter is + * actually ignored, but is in + * the signature of this + * function to make it + * compatible with the + * signature of the respective + * constructor of the usual + * FEValues object, with + * the respective parameter in + * that function also being the + * return value of the + * DoFHandler::get_fe() + * function. + */ - FEFaceValues (const hp::FECollection &fe_collection, ++ FEFaceValues (const hp::FECollection &fe_collection, + const hp::QCollection &q_collection, + const UpdateFlags update_flags); - /** - * Reinitialize the object for - * the given cell and face. - * - * After the call, you can get - * an FEFaceValues object using the - * get_present_fe_values() - * function that corresponds to - * the present cell. For this - * FEFaceValues object, we use the - * additional arguments - * described below to determine - * which finite element, - * mapping, and quadrature - * formula to use. They are - * order in such a way that the - * arguments one may want to - * change most frequently come - * first. The rules for these - * arguments are as follows: - * - * If the @p fe_index argument - * to this function is left at - * its default value, then we - * use that finite element - * within the hp::FECollection - * passed to the constructor of - * this class with index given - * by - * cell-@>active_fe_index(). Consequently, - * the hp::FECollection - * argument given to this - * object should really be the - * same as that used in the - * construction of the - * hp::DofHandler associated - * with the present cell. On - * the other hand, if a value - * is given for this argument, - * it overrides the choice of - * cell-@>active_fe_index(). - * - * If the @p q_index argument - * is left at its default - * value, then we use that - * quadrature formula within - * the hp::QCollection passed - * to the constructor of this - * class with index given by - * cell-@>active_fe_index(), - * i.e. the same index as that - * of the finite element. In - * this case, there should be a - * corresponding quadrature - * formula for each finite - * element in the - * hp::FECollection. As a - * special case, if the - * quadrature collection - * contains only a single - * element (a frequent case if - * one wants to use the same - * quadrature object for all - * finite elements in an hp - * discretization, even if that - * may not be the most - * efficient), then this single - * quadrature is used unless a - * different value for this - * argument is specified. On - * the other hand, if a value - * is given for this argument, - * it overrides the choice of - * cell-@>active_fe_index() - * or the choice for the single - * quadrature. - * - * If the @p mapping_index - * argument is left at its - * default value, then we use - * that mapping object within - * the hp::MappingCollection - * passed to the constructor of - * this class with index given - * by - * cell-@>active_fe_index(), - * i.e. the same index as that - * of the finite - * element. As above, if the - * mapping collection contains - * only a single element (a - * frequent case if one wants - * to use a MappingQ1 object - * for all finite elements in - * an hp discretization), then - * this single mapping is used - * unless a different value for - * this argument is specified. - */ - void - reinit (const typename hp::DoFHandler::cell_iterator &cell, - const unsigned int face_no, - const unsigned int q_index = numbers::invalid_unsigned_int, - const unsigned int mapping_index = numbers::invalid_unsigned_int, - const unsigned int fe_index = numbers::invalid_unsigned_int); + /** + * Reinitialize the object for + * the given cell and face. + * + * After the call, you can get + * an FEFaceValues object using the + * get_present_fe_values() + * function that corresponds to + * the present cell. For this + * FEFaceValues object, we use the + * additional arguments + * described below to determine + * which finite element, + * mapping, and quadrature + * formula to use. They are + * order in such a way that the + * arguments one may want to + * change most frequently come + * first. The rules for these + * arguments are as follows: + * + * If the @p fe_index argument + * to this function is left at + * its default value, then we + * use that finite element + * within the hp::FECollection + * passed to the constructor of + * this class with index given + * by + * cell-@>active_fe_index(). Consequently, + * the hp::FECollection + * argument given to this + * object should really be the + * same as that used in the + * construction of the + * hp::DofHandler associated + * with the present cell. On + * the other hand, if a value + * is given for this argument, + * it overrides the choice of + * cell-@>active_fe_index(). + * + * If the @p q_index argument + * is left at its default + * value, then we use that + * quadrature formula within + * the hp::QCollection passed + * to the constructor of this + * class with index given by + * cell-@>active_fe_index(), + * i.e. the same index as that + * of the finite element. In + * this case, there should be a + * corresponding quadrature + * formula for each finite + * element in the + * hp::FECollection. As a + * special case, if the + * quadrature collection + * contains only a single + * element (a frequent case if + * one wants to use the same + * quadrature object for all + * finite elements in an hp + * discretization, even if that + * may not be the most + * efficient), then this single + * quadrature is used unless a + * different value for this + * argument is specified. On + * the other hand, if a value + * is given for this argument, + * it overrides the choice of + * cell-@>active_fe_index() + * or the choice for the single + * quadrature. + * + * If the @p mapping_index + * argument is left at its + * default value, then we use + * that mapping object within + * the hp::MappingCollection + * passed to the constructor of + * this class with index given + * by + * cell-@>active_fe_index(), + * i.e. the same index as that + * of the finite + * element. As above, if the + * mapping collection contains + * only a single element (a + * frequent case if one wants + * to use a MappingQ1 object + * for all finite elements in + * an hp discretization), then + * this single mapping is used + * unless a different value for + * this argument is specified. + */ + void + reinit (const typename hp::DoFHandler::cell_iterator &cell, + const unsigned int face_no, + const unsigned int q_index = numbers::invalid_unsigned_int, + const unsigned int mapping_index = numbers::invalid_unsigned_int, + const unsigned int fe_index = numbers::invalid_unsigned_int); - /** - * Like the previous function, - * but for non-hp - * iterators. The reason this - * (and the other non-hp - * iterator) function exists is - * so that one can use - * hp::FEValues not only for - * hp::DoFhandler objects, but - * for all sorts of DoFHandler - * objects, and triangulations - * not associated with - * DoFHandlers in general. - * - * Since - * cell-@>active_fe_index() - * always returns zero for - * non-hp iterators, this - * function chooses the zero-th - * finite element, mapping, and - * quadrature object from the - * relevant constructions - * passed to the constructor of - * this object. The only - * exception is if you specify - * a value different from the - * default value for any of - * these last three arguments. - */ - void - reinit (const typename dealii::DoFHandler::cell_iterator &cell, - const unsigned int face_no, - const unsigned int q_index = numbers::invalid_unsigned_int, - const unsigned int mapping_index = numbers::invalid_unsigned_int, - const unsigned int fe_index = numbers::invalid_unsigned_int); + /** + * Like the previous function, + * but for non-hp + * iterators. The reason this + * (and the other non-hp + * iterator) function exists is + * so that one can use + * hp::FEValues not only for + * hp::DoFhandler objects, but + * for all sorts of DoFHandler + * objects, and triangulations + * not associated with + * DoFHandlers in general. + * + * Since + * cell-@>active_fe_index() + * always returns zero for + * non-hp iterators, this + * function chooses the zero-th + * finite element, mapping, and + * quadrature object from the + * relevant constructions + * passed to the constructor of + * this object. The only + * exception is if you specify + * a value different from the + * default value for any of + * these last three arguments. + */ + void + reinit (const typename dealii::DoFHandler::cell_iterator &cell, + const unsigned int face_no, + const unsigned int q_index = numbers::invalid_unsigned_int, + const unsigned int mapping_index = numbers::invalid_unsigned_int, + const unsigned int fe_index = numbers::invalid_unsigned_int); - /** - * Like the previous function, - * but for non-hp - * iterators. The reason this - * (and the other non-hp - * iterator) function exists is - * so that one can use - * hp::FEValues not only for - * hp::DoFhandler objects, but - * for all sorts of DoFHandler - * objects, and triangulations - * not associated with - * DoFHandlers in general. - * - * Since - * cell-@>active_fe_index() - * always returns zero for - * non-hp iterators, this - * function chooses the zero-th - * finite element, mapping, and - * quadrature object from the - * relevant constructions - * passed to the constructor of - * this object. The only - * exception is if you specify - * a value different from the - * default value for any of - * these last three arguments. - */ - void - reinit (const typename MGDoFHandler::cell_iterator &cell, - const unsigned int face_no, - const unsigned int q_index = numbers::invalid_unsigned_int, - const unsigned int mapping_index = numbers::invalid_unsigned_int, - const unsigned int fe_index = numbers::invalid_unsigned_int); + /** + * Like the previous function, + * but for non-hp + * iterators. The reason this + * (and the other non-hp + * iterator) function exists is + * so that one can use + * hp::FEValues not only for + * hp::DoFhandler objects, but + * for all sorts of DoFHandler + * objects, and triangulations + * not associated with + * DoFHandlers in general. + * + * Since + * cell-@>active_fe_index() + * always returns zero for + * non-hp iterators, this + * function chooses the zero-th + * finite element, mapping, and + * quadrature object from the + * relevant constructions + * passed to the constructor of + * this object. The only + * exception is if you specify + * a value different from the + * default value for any of + * these last three arguments. + */ + void + reinit (const typename MGDoFHandler::cell_iterator &cell, + const unsigned int face_no, + const unsigned int q_index = numbers::invalid_unsigned_int, + const unsigned int mapping_index = numbers::invalid_unsigned_int, + const unsigned int fe_index = numbers::invalid_unsigned_int); - /** - * Like the previous function, - * but for non-hp - * iterators. The reason this - * (and the other non-hp - * iterator) function exists is - * so that one can use - * hp::FEValues not only for - * hp::DoFhandler objects, but - * for all sorts of DoFHandler - * objects, and triangulations - * not associated with - * DoFHandlers in general. - * - * Since - * cell-@>active_fe_index() - * doesn't make sense for - * triangulation iterators, - * this function chooses the - * zero-th finite element, - * mapping, and quadrature - * object from the relevant - * constructions passed to the - * constructor of this - * object. The only exception - * is if you specify a value - * different from the default - * value for any of these last - * three arguments. - */ - void - reinit (const typename Triangulation::cell_iterator &cell, - const unsigned int face_no, - const unsigned int q_index = numbers::invalid_unsigned_int, - const unsigned int mapping_index = numbers::invalid_unsigned_int, - const unsigned int fe_index = numbers::invalid_unsigned_int); + /** + * Like the previous function, + * but for non-hp + * iterators. The reason this + * (and the other non-hp + * iterator) function exists is + * so that one can use + * hp::FEValues not only for + * hp::DoFhandler objects, but + * for all sorts of DoFHandler + * objects, and triangulations + * not associated with + * DoFHandlers in general. + * + * Since + * cell-@>active_fe_index() + * doesn't make sense for + * triangulation iterators, + * this function chooses the + * zero-th finite element, + * mapping, and quadrature + * object from the relevant + * constructions passed to the + * constructor of this + * object. The only exception + * is if you specify a value + * different from the default + * value for any of these last + * three arguments. + */ + void + reinit (const typename Triangulation::cell_iterator &cell, + const unsigned int face_no, + const unsigned int q_index = numbers::invalid_unsigned_int, + const unsigned int mapping_index = numbers::invalid_unsigned_int, + const unsigned int fe_index = numbers::invalid_unsigned_int); }; @@@ -829,256 -829,256 +829,256 @@@ template class FESubfaceValues : public dealii::internal::hp::FEValuesBase > { - public: - /** - * Constructor. Initialize this - * object with the given - * parameters. - * - * The finite element - * collection parameter is - * actually ignored, but is in - * the signature of this - * function to make it - * compatible with the - * signature of the respective - * constructor of the usual - * FEValues object, with - * the respective parameter in - * that function also being the - * return value of the - * DoFHandler::get_fe() - * function. - */ - FESubfaceValues (const hp::MappingCollection &mapping_collection, - const hp::FECollection &fe_collection, - const hp::QCollection &q_collection, - const UpdateFlags update_flags); + public: + /** + * Constructor. Initialize this + * object with the given + * parameters. + * + * The finite element + * collection parameter is + * actually ignored, but is in + * the signature of this + * function to make it + * compatible with the + * signature of the respective + * constructor of the usual + * FEValues object, with + * the respective parameter in + * that function also being the + * return value of the + * DoFHandler::get_fe() + * function. + */ + FESubfaceValues (const hp::MappingCollection &mapping_collection, - const hp::FECollection &fe_collection, ++ const hp::FECollection &fe_collection, + const hp::QCollection &q_collection, + const UpdateFlags update_flags); - /** - * Constructor. Initialize this - * object with the given - * parameters, and choose a - * @p MappingQ1 object for the - * mapping object. - * - * The finite element - * collection parameter is - * actually ignored, but is in - * the signature of this - * function to make it - * compatible with the - * signature of the respective - * constructor of the usual - * FEValues object, with - * the respective parameter in - * that function also being the - * return value of the - * DoFHandler::get_fe() - * function. - */ - FESubfaceValues (const hp::FECollection &fe_collection, - const hp::QCollection &q_collection, - const UpdateFlags update_flags); + /** + * Constructor. Initialize this + * object with the given + * parameters, and choose a + * @p MappingQ1 object for the + * mapping object. + * + * The finite element + * collection parameter is + * actually ignored, but is in + * the signature of this + * function to make it + * compatible with the + * signature of the respective + * constructor of the usual + * FEValues object, with + * the respective parameter in + * that function also being the + * return value of the + * DoFHandler::get_fe() + * function. + */ + FESubfaceValues (const hp::FECollection &fe_collection, + const hp::QCollection &q_collection, + const UpdateFlags update_flags); - /** - * Reinitialize the object for - * the given cell, face, and subface. - * - * After the call, you can get - * an FESubfaceValues object using the - * get_present_fe_values() - * function that corresponds to - * the present cell. For this - * FESubfaceValues object, we use the - * additional arguments - * described below to determine - * which finite element, - * mapping, and quadrature - * formula to use. They are - * order in such a way that the - * arguments one may want to - * change most frequently come - * first. The rules for these - * arguments are as follows: - * - * If the @p q_index argument - * is left at its default - * value, then we use that - * quadrature formula within - * the hp::QCollection passed - * to the constructor of this - * class with index given by - * cell-@>active_fe_index(), - * i.e. the same index as that - * of the finite element. In - * this case, there should be a - * corresponding quadrature - * formula for each finite - * element in the - * hp::FECollection. As a - * special case, if the - * quadrature collection - * contains only a single - * element (a frequent case if - * one wants to use the same - * quadrature object for all - * finite elements in an hp - * discretization, even if that - * may not be the most - * efficient), then this single - * quadrature is used unless a - * different value for this - * argument is specified. On - * the other hand, if a value - * is given for this argument, - * it overrides the choice of - * cell-@>active_fe_index() - * or the choice for the single - * quadrature. - * - * If the @p mapping_index - * argument is left at its - * default value, then we use - * that mapping object within - * the hp::MappingCollection - * passed to the constructor of - * this class with index given - * by - * cell-@>active_fe_index(), - * i.e. the same index as that - * of the finite - * element. As above, if the - * mapping collection contains - * only a single element (a - * frequent case if one wants - * to use a MappingQ1 object - * for all finite elements in - * an hp discretization), then - * this single mapping is used - * unless a different value for - * this argument is specified. - */ - void - reinit (const typename hp::DoFHandler::cell_iterator &cell, - const unsigned int face_no, - const unsigned int subface_no, - const unsigned int q_index = numbers::invalid_unsigned_int, - const unsigned int mapping_index = numbers::invalid_unsigned_int, - const unsigned int fe_index = numbers::invalid_unsigned_int); + /** + * Reinitialize the object for + * the given cell, face, and subface. + * + * After the call, you can get + * an FESubfaceValues object using the + * get_present_fe_values() + * function that corresponds to + * the present cell. For this + * FESubfaceValues object, we use the + * additional arguments + * described below to determine + * which finite element, + * mapping, and quadrature + * formula to use. They are + * order in such a way that the + * arguments one may want to + * change most frequently come + * first. The rules for these + * arguments are as follows: + * + * If the @p q_index argument + * is left at its default + * value, then we use that + * quadrature formula within + * the hp::QCollection passed + * to the constructor of this + * class with index given by + * cell-@>active_fe_index(), + * i.e. the same index as that + * of the finite element. In + * this case, there should be a + * corresponding quadrature + * formula for each finite + * element in the + * hp::FECollection. As a + * special case, if the + * quadrature collection + * contains only a single + * element (a frequent case if + * one wants to use the same + * quadrature object for all + * finite elements in an hp + * discretization, even if that + * may not be the most + * efficient), then this single + * quadrature is used unless a + * different value for this + * argument is specified. On + * the other hand, if a value + * is given for this argument, + * it overrides the choice of + * cell-@>active_fe_index() + * or the choice for the single + * quadrature. + * + * If the @p mapping_index + * argument is left at its + * default value, then we use + * that mapping object within + * the hp::MappingCollection + * passed to the constructor of + * this class with index given + * by + * cell-@>active_fe_index(), + * i.e. the same index as that + * of the finite + * element. As above, if the + * mapping collection contains + * only a single element (a + * frequent case if one wants + * to use a MappingQ1 object + * for all finite elements in + * an hp discretization), then + * this single mapping is used + * unless a different value for + * this argument is specified. + */ + void + reinit (const typename hp::DoFHandler::cell_iterator &cell, + const unsigned int face_no, + const unsigned int subface_no, + const unsigned int q_index = numbers::invalid_unsigned_int, + const unsigned int mapping_index = numbers::invalid_unsigned_int, + const unsigned int fe_index = numbers::invalid_unsigned_int); - /** - * Like the previous function, - * but for non-hp - * iterators. The reason this - * (and the other non-hp - * iterator) function exists is - * so that one can use - * hp::FEValues not only for - * hp::DoFhandler objects, but - * for all sorts of DoFHandler - * objects, and triangulations - * not associated with - * DoFHandlers in general. - * - * Since - * cell-@>active_fe_index() - * always returns zero for - * non-hp iterators, this - * function chooses the zero-th - * finite element, mapping, and - * quadrature object from the - * relevant constructions - * passed to the constructor of - * this object. The only - * exception is if you specify - * a value different from the - * default value for any of - * these last three arguments. - */ - void - reinit (const typename dealii::DoFHandler::cell_iterator &cell, - const unsigned int face_no, - const unsigned int subface_no, - const unsigned int q_index = numbers::invalid_unsigned_int, - const unsigned int mapping_index = numbers::invalid_unsigned_int, - const unsigned int fe_index = numbers::invalid_unsigned_int); + /** + * Like the previous function, + * but for non-hp + * iterators. The reason this + * (and the other non-hp + * iterator) function exists is + * so that one can use + * hp::FEValues not only for + * hp::DoFhandler objects, but + * for all sorts of DoFHandler + * objects, and triangulations + * not associated with + * DoFHandlers in general. + * + * Since + * cell-@>active_fe_index() + * always returns zero for + * non-hp iterators, this + * function chooses the zero-th + * finite element, mapping, and + * quadrature object from the + * relevant constructions + * passed to the constructor of + * this object. The only + * exception is if you specify + * a value different from the + * default value for any of + * these last three arguments. + */ + void + reinit (const typename dealii::DoFHandler::cell_iterator &cell, + const unsigned int face_no, + const unsigned int subface_no, + const unsigned int q_index = numbers::invalid_unsigned_int, + const unsigned int mapping_index = numbers::invalid_unsigned_int, + const unsigned int fe_index = numbers::invalid_unsigned_int); - /** - * Like the previous function, - * but for non-hp - * iterators. The reason this - * (and the other non-hp - * iterator) function exists is - * so that one can use - * hp::FEValues not only for - * hp::DoFhandler objects, but - * for all sorts of DoFHandler - * objects, and triangulations - * not associated with - * DoFHandlers in general. - * - * Since - * cell-@>active_fe_index() - * always returns zero for - * non-hp iterators, this - * function chooses the zero-th - * finite element, mapping, and - * quadrature object from the - * relevant constructions - * passed to the constructor of - * this object. The only - * exception is if you specify - * a value different from the - * default value for any of - * these last three arguments. - */ - void - reinit (const typename MGDoFHandler::cell_iterator &cell, - const unsigned int face_no, - const unsigned int subface_no, - const unsigned int q_index = numbers::invalid_unsigned_int, - const unsigned int mapping_index = numbers::invalid_unsigned_int, - const unsigned int fe_index = numbers::invalid_unsigned_int); + /** + * Like the previous function, + * but for non-hp + * iterators. The reason this + * (and the other non-hp + * iterator) function exists is + * so that one can use + * hp::FEValues not only for + * hp::DoFhandler objects, but + * for all sorts of DoFHandler + * objects, and triangulations + * not associated with + * DoFHandlers in general. + * + * Since + * cell-@>active_fe_index() + * always returns zero for + * non-hp iterators, this + * function chooses the zero-th + * finite element, mapping, and + * quadrature object from the + * relevant constructions + * passed to the constructor of + * this object. The only + * exception is if you specify + * a value different from the + * default value for any of + * these last three arguments. + */ + void + reinit (const typename MGDoFHandler::cell_iterator &cell, + const unsigned int face_no, + const unsigned int subface_no, + const unsigned int q_index = numbers::invalid_unsigned_int, + const unsigned int mapping_index = numbers::invalid_unsigned_int, + const unsigned int fe_index = numbers::invalid_unsigned_int); - /** - * Like the previous function, - * but for non-hp - * iterators. The reason this - * (and the other non-hp - * iterator) function exists is - * so that one can use - * hp::FEValues not only for - * hp::DoFhandler objects, but - * for all sorts of DoFHandler - * objects, and triangulations - * not associated with - * DoFHandlers in general. - * - * Since - * cell-@>active_fe_index() - * doesn't make sense for - * triangulation iterators, - * this function chooses the - * zero-th finite element, - * mapping, and quadrature - * object from the relevant - * constructions passed to the - * constructor of this - * object. The only exception - * is if you specify a value - * different from the default - * value for any of these last - * three arguments. - */ - void - reinit (const typename Triangulation::cell_iterator &cell, - const unsigned int face_no, - const unsigned int subface_no, - const unsigned int q_index = numbers::invalid_unsigned_int, - const unsigned int mapping_index = numbers::invalid_unsigned_int, - const unsigned int fe_index = numbers::invalid_unsigned_int); + /** + * Like the previous function, + * but for non-hp + * iterators. The reason this + * (and the other non-hp + * iterator) function exists is + * so that one can use + * hp::FEValues not only for + * hp::DoFhandler objects, but + * for all sorts of DoFHandler + * objects, and triangulations + * not associated with + * DoFHandlers in general. + * + * Since + * cell-@>active_fe_index() + * doesn't make sense for + * triangulation iterators, + * this function chooses the + * zero-th finite element, + * mapping, and quadrature + * object from the relevant + * constructions passed to the + * constructor of this + * object. The only exception + * is if you specify a value + * different from the default + * value for any of these last + * three arguments. + */ + void + reinit (const typename Triangulation::cell_iterator &cell, + const unsigned int face_no, + const unsigned int subface_no, + const unsigned int q_index = numbers::invalid_unsigned_int, + const unsigned int mapping_index = numbers::invalid_unsigned_int, + const unsigned int fe_index = numbers::invalid_unsigned_int); }; } diff --cc deal.II/include/deal.II/lac/block_matrix_base.h index 2e3cecf83c,d7de0a7eb2..8cd475e641 --- a/deal.II/include/deal.II/lac/block_matrix_base.h +++ b/deal.II/include/deal.II/lac/block_matrix_base.h @@@ -356,892 -356,892 +356,892 @@@ namespace BlockMatrixIterator template class BlockMatrixBase : public Subscriptor { - public: - /** - * Typedef the type of the underlying - * matrix. - */ - typedef MatrixType BlockType; - - /** - * Type of matrix entries. In analogy to - * the STL container classes. - */ - typedef typename BlockType::value_type value_type; - typedef value_type *pointer; - typedef const value_type *const_pointer; - typedef value_type &reference; - typedef const value_type &const_reference; - typedef std::size_t size_type; - - typedef - MatrixIterator > - iterator; - - typedef - MatrixIterator > - const_iterator; - - - /** - * Default constructor. - */ - BlockMatrixBase (); - - /** - * Copy the given matrix to this - * one. The operation throws an - * error if the sparsity patterns - * of the two involved matrices - * do not point to the same - * object, since in this case the - * copy operation is - * cheaper. Since this operation - * is notheless not for free, we - * do not make it available - * through operator=(), since - * this may lead to unwanted - * usage, e.g. in copy arguments - * to functions, which should - * really be arguments by - * reference. - * - * The source matrix may be a - * matrix of arbitrary type, as - * long as its data type is - * convertible to the data type - * of this matrix. - * - * The function returns a - * reference to this. - */ - template - BlockMatrixBase & - copy_from (const BlockMatrixType &source); - - /** - * Access the block with the - * given coordinates. - */ - BlockType & - block (const unsigned int row, - const unsigned int column); - - - /** - * Access the block with the - * given coordinates. Version for - * constant objects. - */ - const BlockType & - block (const unsigned int row, - const unsigned int column) const; - - /** - * Return the dimension of the - * image space. To remember: the - * matrix is of dimension - * $m \times n$. - */ - unsigned int m () const; - - /** - * Return the dimension of the - * range space. To remember: the - * matrix is of dimension - * $m \times n$. - */ - unsigned int n () const; - - - /** - * Return the number of blocks in - * a column. Returns zero if no - * sparsity pattern is presently - * associated to this matrix. - */ - unsigned int n_block_rows () const; - - /** - * Return the number of blocks in - * a row. Returns zero if no - * sparsity pattern is presently - * associated to this matrix. - */ - unsigned int n_block_cols () const; - - /** - * Set the element (i,j) - * to value. Throws an - * error if the entry does not - * exist or if value is - * not a finite number. Still, it - * is allowed to store zero - * values in non-existent fields. - */ - void set (const unsigned int i, - const unsigned int j, - const value_type value); - - /** - * Set all elements given in a - * FullMatrix into the sparse matrix - * locations given by - * indices. In other words, - * this function writes the elements - * in full_matrix into the - * calling matrix, using the - * local-to-global indexing specified - * by indices for both the - * rows and the columns of the - * matrix. This function assumes a - * quadratic sparse matrix and a - * quadratic full_matrix, the usual - * situation in FE calculations. - * - * The optional parameter - * elide_zero_values can be - * used to specify whether zero - * values should be set anyway or - * they should be filtered away (and - * not change the previous content in - * the respective element if it - * exists). The default value is - * false, i.e., even zero - * values are treated. - */ - template - void set (const std::vector &indices, - const FullMatrix &full_matrix, - const bool elide_zero_values = false); - - /** - * Same function as before, but now - * including the possibility to use - * rectangular full_matrices and - * different local-to-global indexing - * on rows and columns, respectively. - */ - template - void set (const std::vector &row_indices, - const std::vector &col_indices, - const FullMatrix &full_matrix, - const bool elide_zero_values = false); - - /** - * Set several elements in the - * specified row of the matrix with - * column indices as given by - * col_indices to the - * respective value. - * - * The optional parameter - * elide_zero_values can be - * used to specify whether zero - * values should be set anyway or - * they should be filtered away (and - * not change the previous content in - * the respective element if it - * exists). The default value is - * false, i.e., even zero - * values are treated. - */ - template - void set (const unsigned int row, - const std::vector &col_indices, - const std::vector &values, - const bool elide_zero_values = false); - - /** - * Set several elements to values - * given by values in a - * given row in columns given by - * col_indices into the sparse - * matrix. - * - * The optional parameter - * elide_zero_values can be - * used to specify whether zero - * values should be inserted anyway - * or they should be filtered - * away. The default value is - * false, i.e., even zero - * values are inserted/replaced. - */ - template - void set (const unsigned int row, - const unsigned int n_cols, - const unsigned int *col_indices, - const number *values, - const bool elide_zero_values = false); - - /** - * Add value to the - * element (i,j). Throws - * an error if the entry does not - * exist or if value is - * not a finite number. Still, it - * is allowed to store zero - * values in non-existent fields. - */ - void add (const unsigned int i, - const unsigned int j, - const value_type value); - - /** - * Add all elements given in a - * FullMatrix into sparse - * matrix locations given by - * indices. In other words, - * this function adds the elements in - * full_matrix to the - * respective entries in calling - * matrix, using the local-to-global - * indexing specified by - * indices for both the rows - * and the columns of the - * matrix. This function assumes a - * quadratic sparse matrix and a - * quadratic full_matrix, the usual - * situation in FE calculations. - * - * The optional parameter - * elide_zero_values can be - * used to specify whether zero - * values should be added anyway or - * these should be filtered away and - * only non-zero data is added. The - * default value is true, - * i.e., zero values won't be added - * into the matrix. - */ - template - void add (const std::vector &indices, - const FullMatrix &full_matrix, - const bool elide_zero_values = true); - - /** - * Same function as before, but now - * including the possibility to use - * rectangular full_matrices and - * different local-to-global indexing - * on rows and columns, respectively. - */ - template - void add (const std::vector &row_indices, - const std::vector &col_indices, - const FullMatrix &full_matrix, - const bool elide_zero_values = true); - - /** - * Set several elements in the - * specified row of the matrix with - * column indices as given by - * col_indices to the - * respective value. - * - * The optional parameter - * elide_zero_values can be - * used to specify whether zero - * values should be added anyway or - * these should be filtered away and - * only non-zero data is added. The - * default value is true, - * i.e., zero values won't be added - * into the matrix. - */ - template - void add (const unsigned int row, - const std::vector &col_indices, - const std::vector &values, - const bool elide_zero_values = true); - - /** - * Add an array of values given by - * values in the given - * global matrix row at columns - * specified by col_indices in the - * sparse matrix. - * - * The optional parameter - * elide_zero_values can be - * used to specify whether zero - * values should be added anyway or - * these should be filtered away and - * only non-zero data is added. The - * default value is true, - * i.e., zero values won't be added - * into the matrix. - */ - template - void add (const unsigned int row, - const unsigned int n_cols, - const unsigned int *col_indices, - const number *values, - const bool elide_zero_values = true, - const bool col_indices_are_sorted = false); - - /** - * Return the value of the entry - * (i,j). This may be an - * expensive operation and you - * should always take care where - * to call this function. In - * order to avoid abuse, this - * function throws an exception - * if the wanted element does not - * exist in the matrix. - */ - value_type operator () (const unsigned int i, - const unsigned int j) const; - - /** - * This function is mostly like - * operator()() in that it - * returns the value of the - * matrix entry (i,j). The only - * difference is that if this - * entry does not exist in the - * sparsity pattern, then instead - * of raising an exception, zero - * is returned. While this may be - * convenient in some cases, note - * that it is simple to write - * algorithms that are slow - * compared to an optimal - * solution, since the sparsity - * of the matrix is not used. - */ - value_type el (const unsigned int i, - const unsigned int j) const; - - /** - * Return the main diagonal element in - * the ith row. This function - * throws an error if the matrix is not - * quadratic and also if the diagonal - * blocks of the matrix are not - * quadratic. - * - * This function is considerably - * faster than the operator()(), - * since for quadratic matrices, the - * diagonal entry may be the - * first to be stored in each row - * and access therefore does not - * involve searching for the - * right column number. - */ - value_type diag_element (const unsigned int i) const; - - /** - * Call the compress() function on all - * the subblocks of the matrix. - * - * - * See @ref GlossCompress "Compressing - * distributed objects" for more - * information. - */ - void compress (::dealii::VectorOperation::values operation - =::dealii::VectorOperation::unknown); - - /** - * Multiply the entire matrix by a - * fixed factor. - */ - BlockMatrixBase & operator *= (const value_type factor); - - /** - * Divide the entire matrix by a - * fixed factor. - */ - BlockMatrixBase & operator /= (const value_type factor); - - /** - * Add matrix scaled by - * factor to this matrix, - * i.e. the matrix factor*matrix - * is added to this. This - * function throws an error if the - * sparsity patterns of the two involved - * matrices do not point to the same - * object, since in this case the - * operation is cheaper. - * - * The source matrix may be a sparse - * matrix over an arbitrary underlying - * scalar type, as long as its data type - * is convertible to the data type of - * this matrix. - */ - template - void add (const value_type factor, - const BlockMatrixType &matrix); - - - /** - * Adding Matrix-vector - * multiplication. Add $M*src$ on - * $dst$ with $M$ being this - * matrix. - */ - template - void vmult_add (BlockVectorType &dst, - const BlockVectorType &src) const; - - /** - * Adding Matrix-vector - * multiplication. Add - * MTsrc to - * dst with M being - * this matrix. This function - * does the same as vmult_add() - * but takes the transposed - * matrix. - */ - template - void Tvmult_add (BlockVectorType &dst, - const BlockVectorType &src) const; - - /** - * Return the norm of the vector - * v with respect to the - * norm induced by this matrix, - * i.e. vTMv). This - * is useful, e.g. in the finite - * element context, where the - * LT-norm of a - * function equals the matrix - * norm with respect to the mass - * matrix of the vector - * representing the nodal values - * of the finite element - * function. Note that even - * though the function's name - * might suggest something - * different, for historic - * reasons not the norm but its - * square is returned, as defined - * above by the scalar product. - * - * Obviously, the matrix needs to - * be square for this operation. - */ - template - value_type - matrix_norm_square (const BlockVectorType &v) const; - - /** - * Compute the matrix scalar - * product $\left(u,Mv\right)$. - */ - template - value_type - matrix_scalar_product (const BlockVectorType &u, - const BlockVectorType &v) const; - - /** - * Compute the residual - * r=b-Ax. Write the - * residual into dst. - */ - template - value_type residual (BlockVectorType &dst, - const BlockVectorType &x, - const BlockVectorType &b) const; - - /** - * STL-like iterator with the - * first entry. - */ - iterator begin (); - - /** - * Final iterator. - */ - iterator end (); - - /** - * STL-like iterator with the - * first entry of row r. - */ - iterator begin (const unsigned int r); - - /** - * Final iterator of row r. - */ - iterator end (const unsigned int r); - /** - * STL-like iterator with the - * first entry. - */ - const_iterator begin () const; - - /** - * Final iterator. - */ - const_iterator end () const; - - /** - * STL-like iterator with the - * first entry of row r. - */ - const_iterator begin (const unsigned int r) const; - - /** - * Final iterator of row r. - */ - const_iterator end (const unsigned int r) const; - - /** - * Return a reference to the underlying - * BlockIndices data of the rows. - */ - const BlockIndices & get_row_indices () const; - - /** - * Return a reference to the underlying - * BlockIndices data of the rows. - */ - const BlockIndices & get_column_indices () const; - - /** - * Determine an estimate for the memory - * consumption (in bytes) of this - * object. Note that only the memory - * reserved on the current processor is - * returned in case this is called in - * an MPI-based program. - */ - std::size_t memory_consumption () const; - - /** @addtogroup Exceptions - * @{ */ - - /** - * Exception - */ - DeclException4 (ExcIncompatibleRowNumbers, - int, int, int, int, - << "The blocks [" << arg1 << ',' << arg2 << "] and [" - << arg3 << ',' << arg4 << "] have differing row numbers."); - /** - * Exception - */ - DeclException4 (ExcIncompatibleColNumbers, - int, int, int, int, - << "The blocks [" << arg1 << ',' << arg2 << "] and [" - << arg3 << ',' << arg4 << "] have differing column numbers."); - //@} - protected: - /** - * Release all memory and return - * to a state just like after - * having called the default - * constructor. It also forgets - * the sparsity pattern it was - * previously tied to. - * - * This calls clear for all - * sub-matrices and then resets this - * object to have no blocks at all. - * - * This function is protected - * since it may be necessary to - * release additional structures. - * A derived class can make it - * public again, if it is - * sufficient. - */ - void clear (); - - /** - * Index arrays for rows and columns. - */ - BlockIndices row_block_indices; - BlockIndices column_block_indices; - - /** - * Array of sub-matrices. - */ - Table<2,SmartPointer > > sub_objects; - - /** - * This function collects the - * sizes of the sub-objects and - * stores them in internal - * arrays, in order to be able to - * relay global indices into the - * matrix to indices into the - * subobjects. You *must* call - * this function each time after - * you have changed the size of - * the sub-objects. - * - * Derived classes should call this - * function whenever the size of the - * sub-objects has changed and the @p - * X_block_indices arrays need to be - * updated. - * - * Note that this function is not public - * since not all derived classes need to - * export its interface. For example, for - * the usual deal.II SparseMatrix class, - * the sizes are implicitly determined - * whenever reinit() is called, and - * individual blocks cannot be - * resized. For that class, this function - * therefore does not have to be - * public. On the other hand, for the - * PETSc classes, there is no associated - * sparsity pattern object that - * determines the block sizes, and for - * these the function needs to be - * publicly available. These classes - * therefore export this function. - */ - void collect_sizes (); - - /** - * Matrix-vector multiplication: - * let $dst = M*src$ with $M$ - * being this matrix. - * - * Due to problems with deriving template - * arguments between the block and - * non-block versions of the vmult/Tvmult - * functions, the actual functions are - * implemented in derived classes, with - * implementations forwarding the calls - * to the implementations provided here - * under a unique name for which template - * arguments can be derived by the - * compiler. - */ - template - void vmult_block_block (BlockVectorType &dst, - const BlockVectorType &src) const; - - /** - * Matrix-vector - * multiplication. Just like the - * previous function, but only - * applicable if the matrix has - * only one block column. - * - * Due to problems with deriving template - * arguments between the block and - * non-block versions of the vmult/Tvmult - * functions, the actual functions are - * implemented in derived classes, with - * implementations forwarding the calls - * to the implementations provided here - * under a unique name for which template - * arguments can be derived by the - * compiler. - */ - template - void vmult_block_nonblock (BlockVectorType &dst, - const VectorType &src) const; - - /** - * Matrix-vector - * multiplication. Just like the - * previous function, but only - * applicable if the matrix has - * only one block row. - * - * Due to problems with deriving template - * arguments between the block and - * non-block versions of the vmult/Tvmult - * functions, the actual functions are - * implemented in derived classes, with - * implementations forwarding the calls - * to the implementations provided here - * under a unique name for which template - * arguments can be derived by the - * compiler. - */ - template - void vmult_nonblock_block (VectorType &dst, - const BlockVectorType &src) const; - - /** - * Matrix-vector - * multiplication. Just like the - * previous function, but only - * applicable if the matrix has - * only one block. - * - * Due to problems with deriving template - * arguments between the block and - * non-block versions of the vmult/Tvmult - * functions, the actual functions are - * implemented in derived classes, with - * implementations forwarding the calls - * to the implementations provided here - * under a unique name for which template - * arguments can be derived by the - * compiler. - */ - template - void vmult_nonblock_nonblock (VectorType &dst, - const VectorType &src) const; - - /** - * Matrix-vector multiplication: - * let $dst = M^T*src$ with $M$ - * being this matrix. This - * function does the same as - * vmult() but takes the - * transposed matrix. - * - * Due to problems with deriving template - * arguments between the block and - * non-block versions of the vmult/Tvmult - * functions, the actual functions are - * implemented in derived classes, with - * implementations forwarding the calls - * to the implementations provided here - * under a unique name for which template - * arguments can be derived by the - * compiler. - */ - template - void Tvmult_block_block (BlockVectorType &dst, + public: + /** + * Typedef the type of the underlying + * matrix. + */ + typedef MatrixType BlockType; + + /** + * Type of matrix entries. In analogy to + * the STL container classes. + */ + typedef typename BlockType::value_type value_type; + typedef value_type *pointer; + typedef const value_type *const_pointer; + typedef value_type &reference; + typedef const value_type &const_reference; + typedef std::size_t size_type; + + typedef + MatrixIterator > + iterator; + + typedef + MatrixIterator > + const_iterator; + + + /** + * Default constructor. + */ + BlockMatrixBase (); + + /** + * Copy the given matrix to this + * one. The operation throws an + * error if the sparsity patterns + * of the two involved matrices + * do not point to the same + * object, since in this case the + * copy operation is + * cheaper. Since this operation + * is notheless not for free, we + * do not make it available + * through operator=(), since + * this may lead to unwanted + * usage, e.g. in copy arguments + * to functions, which should + * really be arguments by + * reference. + * + * The source matrix may be a + * matrix of arbitrary type, as + * long as its data type is + * convertible to the data type + * of this matrix. + * + * The function returns a + * reference to this. + */ + template + BlockMatrixBase & + copy_from (const BlockMatrixType &source); + + /** + * Access the block with the + * given coordinates. + */ + BlockType & + block (const unsigned int row, + const unsigned int column); + + + /** + * Access the block with the + * given coordinates. Version for + * constant objects. + */ + const BlockType & + block (const unsigned int row, + const unsigned int column) const; + + /** + * Return the dimension of the + * image space. To remember: the + * matrix is of dimension + * $m \times n$. + */ + unsigned int m () const; + + /** + * Return the dimension of the + * range space. To remember: the + * matrix is of dimension + * $m \times n$. + */ + unsigned int n () const; + + + /** + * Return the number of blocks in + * a column. Returns zero if no + * sparsity pattern is presently + * associated to this matrix. + */ + unsigned int n_block_rows () const; + + /** + * Return the number of blocks in + * a row. Returns zero if no + * sparsity pattern is presently + * associated to this matrix. + */ + unsigned int n_block_cols () const; + + /** + * Set the element (i,j) + * to value. Throws an + * error if the entry does not + * exist or if value is + * not a finite number. Still, it + * is allowed to store zero + * values in non-existent fields. + */ + void set (const unsigned int i, + const unsigned int j, + const value_type value); + + /** + * Set all elements given in a + * FullMatrix into the sparse matrix + * locations given by + * indices. In other words, + * this function writes the elements + * in full_matrix into the + * calling matrix, using the + * local-to-global indexing specified + * by indices for both the + * rows and the columns of the + * matrix. This function assumes a + * quadratic sparse matrix and a + * quadratic full_matrix, the usual + * situation in FE calculations. + * + * The optional parameter + * elide_zero_values can be + * used to specify whether zero + * values should be set anyway or + * they should be filtered away (and + * not change the previous content in + * the respective element if it + * exists). The default value is + * false, i.e., even zero + * values are treated. + */ + template + void set (const std::vector &indices, + const FullMatrix &full_matrix, + const bool elide_zero_values = false); + + /** + * Same function as before, but now + * including the possibility to use + * rectangular full_matrices and + * different local-to-global indexing + * on rows and columns, respectively. + */ + template + void set (const std::vector &row_indices, + const std::vector &col_indices, + const FullMatrix &full_matrix, + const bool elide_zero_values = false); + + /** + * Set several elements in the + * specified row of the matrix with + * column indices as given by + * col_indices to the + * respective value. + * + * The optional parameter + * elide_zero_values can be + * used to specify whether zero + * values should be set anyway or + * they should be filtered away (and + * not change the previous content in + * the respective element if it + * exists). The default value is + * false, i.e., even zero + * values are treated. + */ + template + void set (const unsigned int row, + const std::vector &col_indices, + const std::vector &values, + const bool elide_zero_values = false); + + /** + * Set several elements to values + * given by values in a + * given row in columns given by + * col_indices into the sparse + * matrix. + * + * The optional parameter + * elide_zero_values can be + * used to specify whether zero + * values should be inserted anyway + * or they should be filtered + * away. The default value is + * false, i.e., even zero + * values are inserted/replaced. + */ + template + void set (const unsigned int row, + const unsigned int n_cols, + const unsigned int *col_indices, + const number *values, + const bool elide_zero_values = false); + + /** + * Add value to the + * element (i,j). Throws + * an error if the entry does not + * exist or if value is + * not a finite number. Still, it + * is allowed to store zero + * values in non-existent fields. + */ + void add (const unsigned int i, + const unsigned int j, + const value_type value); + + /** + * Add all elements given in a + * FullMatrix into sparse + * matrix locations given by + * indices. In other words, + * this function adds the elements in + * full_matrix to the + * respective entries in calling + * matrix, using the local-to-global + * indexing specified by + * indices for both the rows + * and the columns of the + * matrix. This function assumes a + * quadratic sparse matrix and a + * quadratic full_matrix, the usual + * situation in FE calculations. + * + * The optional parameter + * elide_zero_values can be + * used to specify whether zero + * values should be added anyway or + * these should be filtered away and + * only non-zero data is added. The + * default value is true, + * i.e., zero values won't be added + * into the matrix. + */ + template + void add (const std::vector &indices, + const FullMatrix &full_matrix, + const bool elide_zero_values = true); + + /** + * Same function as before, but now + * including the possibility to use + * rectangular full_matrices and + * different local-to-global indexing + * on rows and columns, respectively. + */ + template + void add (const std::vector &row_indices, + const std::vector &col_indices, + const FullMatrix &full_matrix, + const bool elide_zero_values = true); + + /** + * Set several elements in the + * specified row of the matrix with + * column indices as given by + * col_indices to the + * respective value. + * + * The optional parameter + * elide_zero_values can be + * used to specify whether zero + * values should be added anyway or + * these should be filtered away and + * only non-zero data is added. The + * default value is true, + * i.e., zero values won't be added + * into the matrix. + */ + template + void add (const unsigned int row, + const std::vector &col_indices, + const std::vector &values, + const bool elide_zero_values = true); + + /** + * Add an array of values given by + * values in the given + * global matrix row at columns + * specified by col_indices in the + * sparse matrix. + * + * The optional parameter + * elide_zero_values can be + * used to specify whether zero + * values should be added anyway or + * these should be filtered away and + * only non-zero data is added. The + * default value is true, + * i.e., zero values won't be added + * into the matrix. + */ + template + void add (const unsigned int row, + const unsigned int n_cols, + const unsigned int *col_indices, + const number *values, + const bool elide_zero_values = true, + const bool col_indices_are_sorted = false); + + /** + * Return the value of the entry + * (i,j). This may be an + * expensive operation and you + * should always take care where + * to call this function. In + * order to avoid abuse, this + * function throws an exception + * if the wanted element does not + * exist in the matrix. + */ + value_type operator () (const unsigned int i, + const unsigned int j) const; + + /** + * This function is mostly like + * operator()() in that it + * returns the value of the + * matrix entry (i,j). The only + * difference is that if this + * entry does not exist in the + * sparsity pattern, then instead + * of raising an exception, zero + * is returned. While this may be + * convenient in some cases, note + * that it is simple to write + * algorithms that are slow + * compared to an optimal + * solution, since the sparsity + * of the matrix is not used. + */ + value_type el (const unsigned int i, + const unsigned int j) const; + + /** + * Return the main diagonal element in + * the ith row. This function + * throws an error if the matrix is not + * quadratic and also if the diagonal + * blocks of the matrix are not + * quadratic. + * + * This function is considerably + * faster than the operator()(), + * since for quadratic matrices, the + * diagonal entry may be the + * first to be stored in each row + * and access therefore does not + * involve searching for the + * right column number. + */ + value_type diag_element (const unsigned int i) const; + + /** + * Call the compress() function on all + * the subblocks of the matrix. + * + * + * See @ref GlossCompress "Compressing + * distributed objects" for more + * information. + */ + void compress (::dealii::VectorOperation::values operation + =::dealii::VectorOperation::unknown); + + /** + * Multiply the entire matrix by a + * fixed factor. + */ + BlockMatrixBase &operator *= (const value_type factor); + + /** + * Divide the entire matrix by a + * fixed factor. + */ + BlockMatrixBase &operator /= (const value_type factor); + + /** + * Add matrix scaled by + * factor to this matrix, + * i.e. the matrix factor*matrix + * is added to this. This + * function throws an error if the + * sparsity patterns of the two involved + * matrices do not point to the same + * object, since in this case the + * operation is cheaper. + * + * The source matrix may be a sparse + * matrix over an arbitrary underlying + * scalar type, as long as its data type + * is convertible to the data type of + * this matrix. + */ + template + void add (const value_type factor, + const BlockMatrixType &matrix); + + + /** + * Adding Matrix-vector + * multiplication. Add $M*src$ on + * $dst$ with $M$ being this + * matrix. + */ + template + void vmult_add (BlockVectorType &dst, + const BlockVectorType &src) const; + + /** + * Adding Matrix-vector + * multiplication. Add + * MTsrc to + * dst with M being + * this matrix. This function + * does the same as vmult_add() + * but takes the transposed + * matrix. + */ + template + void Tvmult_add (BlockVectorType &dst, + const BlockVectorType &src) const; + + /** + * Return the norm of the vector + * v with respect to the + * norm induced by this matrix, + * i.e. vTMv). This + * is useful, e.g. in the finite + * element context, where the + * LT-norm of a + * function equals the matrix + * norm with respect to the mass + * matrix of the vector + * representing the nodal values + * of the finite element + * function. Note that even + * though the function's name + * might suggest something + * different, for historic + * reasons not the norm but its + * square is returned, as defined + * above by the scalar product. + * + * Obviously, the matrix needs to + * be square for this operation. + */ + template + value_type + matrix_norm_square (const BlockVectorType &v) const; + + /** + * Compute the matrix scalar + * product $\left(u,Mv\right)$. + */ + template + value_type + matrix_scalar_product (const BlockVectorType &u, + const BlockVectorType &v) const; + + /** + * Compute the residual + * r=b-Ax. Write the + * residual into dst. + */ + template + value_type residual (BlockVectorType &dst, + const BlockVectorType &x, + const BlockVectorType &b) const; + + /** + * STL-like iterator with the + * first entry. + */ + iterator begin (); + + /** + * Final iterator. + */ + iterator end (); + + /** + * STL-like iterator with the + * first entry of row r. + */ + iterator begin (const unsigned int r); + + /** + * Final iterator of row r. + */ + iterator end (const unsigned int r); + /** + * STL-like iterator with the + * first entry. + */ + const_iterator begin () const; + + /** + * Final iterator. + */ + const_iterator end () const; + + /** + * STL-like iterator with the + * first entry of row r. + */ + const_iterator begin (const unsigned int r) const; + + /** + * Final iterator of row r. + */ + const_iterator end (const unsigned int r) const; + + /** + * Return a reference to the underlying + * BlockIndices data of the rows. + */ + const BlockIndices &get_row_indices () const; + + /** + * Return a reference to the underlying + * BlockIndices data of the rows. + */ + const BlockIndices &get_column_indices () const; + + /** + * Determine an estimate for the memory + * consumption (in bytes) of this + * object. Note that only the memory + * reserved on the current processor is + * returned in case this is called in + * an MPI-based program. + */ + std::size_t memory_consumption () const; + + /** @addtogroup Exceptions + * @{ */ + + /** + * Exception + */ + DeclException4 (ExcIncompatibleRowNumbers, + int, int, int, int, + << "The blocks [" << arg1 << ',' << arg2 << "] and [" + << arg3 << ',' << arg4 << "] have differing row numbers."); + /** + * Exception + */ + DeclException4 (ExcIncompatibleColNumbers, + int, int, int, int, + << "The blocks [" << arg1 << ',' << arg2 << "] and [" + << arg3 << ',' << arg4 << "] have differing column numbers."); + //@} + protected: + /** + * Release all memory and return + * to a state just like after + * having called the default + * constructor. It also forgets + * the sparsity pattern it was + * previously tied to. + * + * This calls clear for all + * sub-matrices and then resets this + * object to have no blocks at all. + * + * This function is protected + * since it may be necessary to + * release additional structures. + * A derived class can make it + * public again, if it is + * sufficient. + */ + void clear (); + + /** + * Index arrays for rows and columns. + */ + BlockIndices row_block_indices; + BlockIndices column_block_indices; + + /** + * Array of sub-matrices. + */ + Table<2,SmartPointer > > sub_objects; + + /** + * This function collects the + * sizes of the sub-objects and + * stores them in internal + * arrays, in order to be able to + * relay global indices into the + * matrix to indices into the + * subobjects. You *must* call + * this function each time after + * you have changed the size of + * the sub-objects. + * + * Derived classes should call this + * function whenever the size of the + * sub-objects has changed and the @p + * X_block_indices arrays need to be + * updated. + * + * Note that this function is not public + * since not all derived classes need to + * export its interface. For example, for + * the usual deal.II SparseMatrix class, + * the sizes are implicitly determined + * whenever reinit() is called, and + * individual blocks cannot be + * resized. For that class, this function + * therefore does not have to be + * public. On the other hand, for the + * PETSc classes, there is no associated + * sparsity pattern object that + * determines the block sizes, and for + * these the function needs to be + * publicly available. These classes + * therefore export this function. + */ + void collect_sizes (); + + /** + * Matrix-vector multiplication: + * let $dst = M*src$ with $M$ + * being this matrix. + * + * Due to problems with deriving template + * arguments between the block and + * non-block versions of the vmult/Tvmult + * functions, the actual functions are + * implemented in derived classes, with + * implementations forwarding the calls + * to the implementations provided here + * under a unique name for which template + * arguments can be derived by the + * compiler. + */ + template + void vmult_block_block (BlockVectorType &dst, + const BlockVectorType &src) const; + + /** + * Matrix-vector + * multiplication. Just like the + * previous function, but only + * applicable if the matrix has + * only one block column. + * + * Due to problems with deriving template + * arguments between the block and + * non-block versions of the vmult/Tvmult + * functions, the actual functions are + * implemented in derived classes, with + * implementations forwarding the calls + * to the implementations provided here + * under a unique name for which template + * arguments can be derived by the + * compiler. + */ + template + void vmult_block_nonblock (BlockVectorType &dst, + const VectorType &src) const; + + /** + * Matrix-vector + * multiplication. Just like the + * previous function, but only + * applicable if the matrix has + * only one block row. + * + * Due to problems with deriving template + * arguments between the block and + * non-block versions of the vmult/Tvmult + * functions, the actual functions are + * implemented in derived classes, with + * implementations forwarding the calls + * to the implementations provided here + * under a unique name for which template + * arguments can be derived by the + * compiler. + */ + template + void vmult_nonblock_block (VectorType &dst, const BlockVectorType &src) const; - /** - * Matrix-vector - * multiplication. Just like the - * previous function, but only - * applicable if the matrix has - * only one block row. - * - * Due to problems with deriving template - * arguments between the block and - * non-block versions of the vmult/Tvmult - * functions, the actual functions are - * implemented in derived classes, with - * implementations forwarding the calls - * to the implementations provided here - * under a unique name for which template - * arguments can be derived by the - * compiler. - */ - template - void Tvmult_block_nonblock (BlockVectorType &dst, + /** + * Matrix-vector + * multiplication. Just like the + * previous function, but only + * applicable if the matrix has + * only one block. + * + * Due to problems with deriving template + * arguments between the block and + * non-block versions of the vmult/Tvmult + * functions, the actual functions are + * implemented in derived classes, with + * implementations forwarding the calls + * to the implementations provided here + * under a unique name for which template + * arguments can be derived by the + * compiler. + */ + template + void vmult_nonblock_nonblock (VectorType &dst, const VectorType &src) const; - /** - * Matrix-vector - * multiplication. Just like the - * previous function, but only - * applicable if the matrix has - * only one block column. - * - * Due to problems with deriving template - * arguments between the block and - * non-block versions of the vmult/Tvmult - * functions, the actual functions are - * implemented in derived classes, with - * implementations forwarding the calls - * to the implementations provided here - * under a unique name for which template - * arguments can be derived by the - * compiler. - */ - template - void Tvmult_nonblock_block (VectorType &dst, - const BlockVectorType &src) const; - - /** - * Matrix-vector - * multiplication. Just like the - * previous function, but only - * applicable if the matrix has - * only one block. - * - * Due to problems with deriving template - * arguments between the block and - * non-block versions of the vmult/Tvmult - * functions, the actual functions are - * implemented in derived classes, with - * implementations forwarding the calls - * to the implementations provided here - * under a unique name for which template - * arguments can be derived by the - * compiler. - */ - template - void Tvmult_nonblock_nonblock (VectorType &dst, - const VectorType &src) const; - - - protected: - - /** - * Some matrix types, in particular PETSc, - * need to synchronize set and add - * operations. This has to be done for all - * matrices in the BlockMatrix. - * This routine prepares adding of elements - * by notifying all blocks. Called by all - * internal routines before adding - * elements. - */ - void prepare_add_operation(); - - /** - * Notifies all blocks to let them prepare - * for setting elements, see - * prepare_add_operation(). - */ - void prepare_set_operation(); - - - private: - /** - * Temporary vector for counting the - * elements written into the - * individual blocks when doing a - * collective add or set. - */ - std::vector counter_within_block; - - /** - * Temporary vector for column - * indices on each block when writing - * local to global data on each - * sparse matrix. - */ - std::vector > column_indices; - - /** - * Temporary vector for storing the - * local values (they need to be - * reordered when writing local to - * global). - */ - std::vector > column_values; - - - /** - * Make the iterator class a - * friend. We have to work around - * a compiler bug here again. - */ - template - friend class BlockMatrixIterators::Accessor; - - template - friend class MatrixIterator; + /** + * Matrix-vector multiplication: + * let $dst = M^T*src$ with $M$ + * being this matrix. This + * function does the same as + * vmult() but takes the + * transposed matrix. + * + * Due to problems with deriving template + * arguments between the block and + * non-block versions of the vmult/Tvmult + * functions, the actual functions are + * implemented in derived classes, with + * implementations forwarding the calls + * to the implementations provided here + * under a unique name for which template + * arguments can be derived by the + * compiler. + */ + template + void Tvmult_block_block (BlockVectorType &dst, + const BlockVectorType &src) const; + + /** + * Matrix-vector + * multiplication. Just like the + * previous function, but only + * applicable if the matrix has + * only one block row. + * + * Due to problems with deriving template + * arguments between the block and + * non-block versions of the vmult/Tvmult + * functions, the actual functions are + * implemented in derived classes, with + * implementations forwarding the calls + * to the implementations provided here + * under a unique name for which template + * arguments can be derived by the + * compiler. + */ + template - void Tvmult_block_nonblock (BlockVectorType &dst, ++ void Tvmult_block_nonblock (BlockVectorType &dst, + const VectorType &src) const; + + /** + * Matrix-vector + * multiplication. Just like the + * previous function, but only + * applicable if the matrix has + * only one block column. + * + * Due to problems with deriving template + * arguments between the block and + * non-block versions of the vmult/Tvmult + * functions, the actual functions are + * implemented in derived classes, with + * implementations forwarding the calls + * to the implementations provided here + * under a unique name for which template + * arguments can be derived by the + * compiler. + */ + template + void Tvmult_nonblock_block (VectorType &dst, + const BlockVectorType &src) const; + + /** + * Matrix-vector + * multiplication. Just like the + * previous function, but only + * applicable if the matrix has + * only one block. + * + * Due to problems with deriving template + * arguments between the block and + * non-block versions of the vmult/Tvmult + * functions, the actual functions are + * implemented in derived classes, with + * implementations forwarding the calls + * to the implementations provided here + * under a unique name for which template + * arguments can be derived by the + * compiler. + */ + template + void Tvmult_nonblock_nonblock (VectorType &dst, + const VectorType &src) const; + + + protected: + + /** + * Some matrix types, in particular PETSc, + * need to synchronize set and add + * operations. This has to be done for all + * matrices in the BlockMatrix. + * This routine prepares adding of elements + * by notifying all blocks. Called by all + * internal routines before adding + * elements. + */ + void prepare_add_operation(); + + /** + * Notifies all blocks to let them prepare + * for setting elements, see + * prepare_add_operation(). + */ + void prepare_set_operation(); + + + private: + /** + * Temporary vector for counting the + * elements written into the + * individual blocks when doing a + * collective add or set. + */ + std::vector counter_within_block; + + /** + * Temporary vector for column + * indices on each block when writing + * local to global data on each + * sparse matrix. + */ + std::vector > column_indices; + + /** + * Temporary vector for storing the + * local values (they need to be + * reordered when writing local to + * global). + */ + std::vector > column_values; + + + /** + * Make the iterator class a + * friend. We have to work around + * a compiler bug here again. + */ + template + friend class BlockMatrixIterators::Accessor; + + template + friend class MatrixIterator; }; @@@ -1289,12 -1289,12 +1289,12 @@@ namespace BlockMatrixIterator template inline Accessor::Accessor ( - const BlockMatrix *matrix, + const BlockMatrix *matrix, const unsigned int row, const unsigned int col) - : - matrix(matrix), - base_iterator(matrix->block(0,0).begin()) + : + matrix(matrix), + base_iterator(matrix->block(0,0).begin()) { Assert(col==0, ExcNotImplemented()); @@@ -1498,16 -1498,16 +1498,16 @@@ template inline Accessor::Accessor ( - BlockMatrix *matrix, + BlockMatrix *matrix, const unsigned int row, const unsigned int col) - : - matrix(matrix), - base_iterator(matrix->block(0,0).begin()) + : + matrix(matrix), + base_iterator(matrix->block(0,0).begin()) { Assert(col==0, ExcNotImplemented()); - // check if this is a regular row or - // the end of the matrix + // check if this is a regular row or + // the end of the matrix if (row < matrix->m()) { const std::pair indices @@@ -2424,10 -2424,10 +2424,10 @@@ vmult_nonblock_block (VectorType &ds template template + class VectorType> void BlockMatrixBase:: -vmult_block_nonblock (BlockVectorType &dst, +vmult_block_nonblock (BlockVectorType &dst, const VectorType &src) const { Assert (dst.n_blocks() == n_block_rows(), @@@ -2509,10 -2509,10 +2509,10 @@@ Tvmult_block_block (BlockVectorTyp template template + class VectorType> void BlockMatrixBase:: -Tvmult_block_nonblock (BlockVectorType &dst, +Tvmult_block_nonblock (BlockVectorType &dst, const VectorType &src) const { Assert (dst.n_blocks() == n_block_cols(), diff --cc deal.II/include/deal.II/lac/block_sparse_matrix.h index 1e423bb5a6,32abdf9674..5bc235ae13 --- a/deal.II/include/deal.II/lac/block_sparse_matrix.h +++ b/deal.II/include/deal.II/lac/block_sparse_matrix.h @@@ -44,416 -44,416 +44,416 @@@ DEAL_II_NAMESPACE_OPE template class BlockSparseMatrix : public BlockMatrixBase > { - public: - /** - * Typedef the base class for simpler - * access to its own typedefs. - */ - typedef BlockMatrixBase > BaseClass; - - /** - * Typedef the type of the underlying - * matrix. - */ - typedef typename BaseClass::BlockType BlockType; - - /** - * Import the typedefs from the base - * class. - */ - typedef typename BaseClass::value_type value_type; - typedef typename BaseClass::pointer pointer; - typedef typename BaseClass::const_pointer const_pointer; - typedef typename BaseClass::reference reference; - typedef typename BaseClass::const_reference const_reference; - typedef typename BaseClass::size_type size_type; - typedef typename BaseClass::iterator iterator; - typedef typename BaseClass::const_iterator const_iterator; - - /** - * @name Constructors and initalization - */ + public: + /** + * Typedef the base class for simpler + * access to its own typedefs. + */ + typedef BlockMatrixBase > BaseClass; + + /** + * Typedef the type of the underlying + * matrix. + */ + typedef typename BaseClass::BlockType BlockType; + + /** + * Import the typedefs from the base + * class. + */ + typedef typename BaseClass::value_type value_type; + typedef typename BaseClass::pointer pointer; + typedef typename BaseClass::const_pointer const_pointer; + typedef typename BaseClass::reference reference; + typedef typename BaseClass::const_reference const_reference; + typedef typename BaseClass::size_type size_type; + typedef typename BaseClass::iterator iterator; + typedef typename BaseClass::const_iterator const_iterator; + + /** + * @name Constructors and initalization + */ //@{ - /** - * Constructor; initializes the - * matrix to be empty, without - * any structure, i.e. the - * matrix is not usable at - * all. This constructor is - * therefore only useful for - * matrices which are members of - * a class. All other matrices - * should be created at a point - * in the data flow where all - * necessary information is - * available. - * - * You have to initialize the - * matrix before usage with - * reinit(BlockSparsityPattern). The - * number of blocks per row and - * column are then determined by - * that function. - */ - BlockSparseMatrix (); - - /** - * Constructor. Takes the given - * matrix sparsity structure to - * represent the sparsity pattern - * of this matrix. You can change - * the sparsity pattern later on - * by calling the reinit() - * function. - * - * This constructor initializes - * all sub-matrices with the - * sub-sparsity pattern within - * the argument. - * - * You have to make sure that the - * lifetime of the sparsity - * structure is at least as long - * as that of this matrix or as - * long as reinit() is not called - * with a new sparsity structure. - */ - BlockSparseMatrix (const BlockSparsityPattern &sparsity); - - /** - * Destructor. - */ - virtual ~BlockSparseMatrix (); - - - - /** - * Pseudo copy operator only copying - * empty objects. The sizes of the block - * matrices need to be the same. - */ - BlockSparseMatrix & - operator = (const BlockSparseMatrix &); - - /** - * This operator assigns a scalar to a - * matrix. Since this does usually not - * make much sense (should we set all - * matrix entries to this value? Only - * the nonzero entries of the sparsity - * pattern?), this operation is only - * allowed if the actual value to be - * assigned is zero. This operator only - * exists to allow for the obvious - * notation matrix=0, which - * sets all elements of the matrix to - * zero, but keep the sparsity pattern - * previously used. - */ - BlockSparseMatrix & - operator = (const double d); - - /** - * Release all memory and return - * to a state just like after - * having called the default - * constructor. It also forgets - * the sparsity pattern it was - * previously tied to. - * - * This calls SparseMatrix::clear on all - * sub-matrices and then resets this - * object to have no blocks at all. - */ - void clear (); - - /** - * Reinitialize the sparse matrix - * with the given sparsity - * pattern. The latter tells the - * matrix how many nonzero - * elements there need to be - * reserved. - * - * Basically, this function only - * calls SparseMatrix::reinit() of the - * sub-matrices with the block - * sparsity patterns of the - * parameter. - * - * The elements of the matrix are - * set to zero by this function. - */ - virtual void reinit (const BlockSparsityPattern &sparsity); + /** + * Constructor; initializes the + * matrix to be empty, without + * any structure, i.e. the + * matrix is not usable at + * all. This constructor is + * therefore only useful for + * matrices which are members of + * a class. All other matrices + * should be created at a point + * in the data flow where all + * necessary information is + * available. + * + * You have to initialize the + * matrix before usage with + * reinit(BlockSparsityPattern). The + * number of blocks per row and + * column are then determined by + * that function. + */ + BlockSparseMatrix (); + + /** + * Constructor. Takes the given + * matrix sparsity structure to + * represent the sparsity pattern + * of this matrix. You can change + * the sparsity pattern later on + * by calling the reinit() + * function. + * + * This constructor initializes + * all sub-matrices with the + * sub-sparsity pattern within + * the argument. + * + * You have to make sure that the + * lifetime of the sparsity + * structure is at least as long + * as that of this matrix or as + * long as reinit() is not called + * with a new sparsity structure. + */ + BlockSparseMatrix (const BlockSparsityPattern &sparsity); + + /** + * Destructor. + */ + virtual ~BlockSparseMatrix (); + + + + /** + * Pseudo copy operator only copying + * empty objects. The sizes of the block + * matrices need to be the same. + */ + BlockSparseMatrix & + operator = (const BlockSparseMatrix &); + + /** + * This operator assigns a scalar to a + * matrix. Since this does usually not + * make much sense (should we set all + * matrix entries to this value? Only + * the nonzero entries of the sparsity + * pattern?), this operation is only + * allowed if the actual value to be + * assigned is zero. This operator only + * exists to allow for the obvious + * notation matrix=0, which + * sets all elements of the matrix to + * zero, but keep the sparsity pattern + * previously used. + */ + BlockSparseMatrix & + operator = (const double d); + + /** + * Release all memory and return + * to a state just like after + * having called the default + * constructor. It also forgets + * the sparsity pattern it was + * previously tied to. + * + * This calls SparseMatrix::clear on all + * sub-matrices and then resets this + * object to have no blocks at all. + */ + void clear (); + + /** + * Reinitialize the sparse matrix + * with the given sparsity + * pattern. The latter tells the + * matrix how many nonzero + * elements there need to be + * reserved. + * + * Basically, this function only + * calls SparseMatrix::reinit() of the + * sub-matrices with the block + * sparsity patterns of the + * parameter. + * + * The elements of the matrix are + * set to zero by this function. + */ + virtual void reinit (const BlockSparsityPattern &sparsity); //@} - /** - * @name Information on the matrix - */ + /** + * @name Information on the matrix + */ //@{ - /** - * Return whether the object is - * empty. It is empty if either - * both dimensions are zero or no - * BlockSparsityPattern is - * associated. - */ - bool empty () const; - - /** - * Return the number of entries - * in a specific row. - */ - unsigned int get_row_length (const unsigned int row) const; - - /** - * Return the number of nonzero - * elements of this - * matrix. Actually, it returns - * the number of entries in the - * sparsity pattern; if any of - * the entries should happen to - * be zero, it is counted anyway. - */ - unsigned int n_nonzero_elements () const; - - /** - * Return the number of actually - * nonzero elements. Just counts the - * number of actually nonzero elements - * (with absolute value larger than - * threshold) of all the blocks. - */ - unsigned int n_actually_nonzero_elements (const double threshold = 0.0) const; - - /** - * Return a (constant) reference - * to the underlying sparsity - * pattern of this matrix. - * - * Though the return value is - * declared const, you - * should be aware that it may - * change if you call any - * nonconstant function of - * objects which operate on it. - */ - const BlockSparsityPattern & - get_sparsity_pattern () const; - - /** - * Determine an estimate for the - * memory consumption (in bytes) - * of this object. - */ - std::size_t memory_consumption () const; + /** + * Return whether the object is + * empty. It is empty if either + * both dimensions are zero or no + * BlockSparsityPattern is + * associated. + */ + bool empty () const; + + /** + * Return the number of entries + * in a specific row. + */ + unsigned int get_row_length (const unsigned int row) const; + + /** + * Return the number of nonzero + * elements of this + * matrix. Actually, it returns + * the number of entries in the + * sparsity pattern; if any of + * the entries should happen to + * be zero, it is counted anyway. + */ + unsigned int n_nonzero_elements () const; + + /** + * Return the number of actually + * nonzero elements. Just counts the + * number of actually nonzero elements + * (with absolute value larger than + * threshold) of all the blocks. + */ + unsigned int n_actually_nonzero_elements (const double threshold = 0.0) const; + + /** + * Return a (constant) reference + * to the underlying sparsity + * pattern of this matrix. + * + * Though the return value is + * declared const, you + * should be aware that it may + * change if you call any + * nonconstant function of + * objects which operate on it. + */ + const BlockSparsityPattern & + get_sparsity_pattern () const; + + /** + * Determine an estimate for the + * memory consumption (in bytes) + * of this object. + */ + std::size_t memory_consumption () const; //@} - /** - * @name Multiplications - */ + /** + * @name Multiplications + */ //@{ - /** - * Matrix-vector multiplication: - * let $dst = M*src$ with $M$ - * being this matrix. - */ - template - void vmult (BlockVector &dst, - const BlockVector &src) const; - - /** - * Matrix-vector - * multiplication. Just like the - * previous function, but only - * applicable if the matrix has - * only one block column. - */ - template - void vmult (BlockVector &dst, - const Vector &src) const; - - /** - * Matrix-vector - * multiplication. Just like the - * previous function, but only - * applicable if the matrix has - * only one block row. - */ - template - void vmult (Vector &dst, - const BlockVector &src) const; - - /** - * Matrix-vector - * multiplication. Just like the - * previous function, but only - * applicable if the matrix has - * only one block. - */ - template - void vmult (Vector &dst, - const Vector &src) const; - - /** - * Matrix-vector multiplication: - * let $dst = M^T*src$ with $M$ - * being this matrix. This - * function does the same as - * vmult() but takes the - * transposed matrix. - */ - template - void Tvmult (BlockVector &dst, - const BlockVector &src) const; - - /** - * Matrix-vector - * multiplication. Just like the - * previous function, but only - * applicable if the matrix has - * only one block row. - */ - template - void Tvmult (BlockVector &dst, - const Vector &src) const; - - /** - * Matrix-vector - * multiplication. Just like the - * previous function, but only - * applicable if the matrix has - * only one block column. - */ - template - void Tvmult (Vector &dst, - const BlockVector &src) const; - - /** - * Matrix-vector - * multiplication. Just like the - * previous function, but only - * applicable if the matrix has - * only one block. - */ - template - void Tvmult (Vector &dst, - const Vector &src) const; + /** + * Matrix-vector multiplication: + * let $dst = M*src$ with $M$ + * being this matrix. + */ + template + void vmult (BlockVector &dst, + const BlockVector &src) const; + + /** + * Matrix-vector + * multiplication. Just like the + * previous function, but only + * applicable if the matrix has + * only one block column. + */ + template + void vmult (BlockVector &dst, + const Vector &src) const; + + /** + * Matrix-vector + * multiplication. Just like the + * previous function, but only + * applicable if the matrix has + * only one block row. + */ + template + void vmult (Vector &dst, + const BlockVector &src) const; + + /** + * Matrix-vector + * multiplication. Just like the + * previous function, but only + * applicable if the matrix has + * only one block. + */ + template + void vmult (Vector &dst, + const Vector &src) const; + + /** + * Matrix-vector multiplication: + * let $dst = M^T*src$ with $M$ + * being this matrix. This + * function does the same as + * vmult() but takes the + * transposed matrix. + */ + template + void Tvmult (BlockVector &dst, + const BlockVector &src) const; + + /** + * Matrix-vector + * multiplication. Just like the + * previous function, but only + * applicable if the matrix has + * only one block row. + */ + template - void Tvmult (BlockVector &dst, ++ void Tvmult (BlockVector &dst, + const Vector &src) const; + + /** + * Matrix-vector + * multiplication. Just like the + * previous function, but only + * applicable if the matrix has + * only one block column. + */ + template + void Tvmult (Vector &dst, + const BlockVector &src) const; + + /** + * Matrix-vector + * multiplication. Just like the + * previous function, but only + * applicable if the matrix has + * only one block. + */ + template + void Tvmult (Vector &dst, + const Vector &src) const; //@} - /** - * @name Preconditioning methods - */ + /** + * @name Preconditioning methods + */ //@{ - /** - * Apply the Jacobi - * preconditioner, which - * multiplies every element of - * the src vector by the - * inverse of the respective - * diagonal element and - * multiplies the result with the - * relaxation parameter - * omega. - * - * All diagonal blocks must be - * square matrices for this - * operation. - */ - template - void precondition_Jacobi (BlockVectorType &dst, - const BlockVectorType &src, - const number omega = 1.) const; - - /** - * Apply the Jacobi - * preconditioner to a simple vector. - * - * The matrix must be a single - * square block for this. - */ - template - void precondition_Jacobi (Vector &dst, - const Vector &src, - const number omega = 1.) const; + /** + * Apply the Jacobi + * preconditioner, which + * multiplies every element of + * the src vector by the + * inverse of the respective + * diagonal element and + * multiplies the result with the + * relaxation parameter + * omega. + * + * All diagonal blocks must be + * square matrices for this + * operation. + */ + template + void precondition_Jacobi (BlockVectorType &dst, + const BlockVectorType &src, + const number omega = 1.) const; + + /** + * Apply the Jacobi + * preconditioner to a simple vector. + * + * The matrix must be a single + * square block for this. + */ + template + void precondition_Jacobi (Vector &dst, + const Vector &src, + const number omega = 1.) const; //@} - /** - * @name Input/Output - */ + /** + * @name Input/Output + */ //@{ - /** - * Print the matrix in the usual - * format, i.e. as a matrix and - * not as a list of nonzero - * elements. For better - * readability, elements not in - * the matrix are displayed as - * empty space, while matrix - * elements which are explicitly - * set to zero are displayed as - * such. - * - * The parameters allow for a - * flexible setting of the output - * format: precision and - * scientific are used - * to determine the number - * format, where scientific = - * false means fixed point - * notation. A zero entry for - * width makes the - * function compute a width, but - * it may be changed to a - * positive value, if output is - * crude. - * - * Additionally, a character for - * an empty value may be - * specified. - * - * Finally, the whole matrix can - * be multiplied with a common - * denominator to produce more - * readable output, even - * integers. - * - * @attention This function may - * produce large amounts - * of output if applied to a - * large matrix! - */ - void print_formatted (std::ostream &out, - const unsigned int precision = 3, - const bool scientific = true, - const unsigned int width = 0, - const char *zero_string = " ", - const double denominator = 1.) const; + /** + * Print the matrix in the usual + * format, i.e. as a matrix and + * not as a list of nonzero + * elements. For better + * readability, elements not in + * the matrix are displayed as + * empty space, while matrix + * elements which are explicitly + * set to zero are displayed as + * such. + * + * The parameters allow for a + * flexible setting of the output + * format: precision and + * scientific are used + * to determine the number + * format, where scientific = + * false means fixed point + * notation. A zero entry for + * width makes the + * function compute a width, but + * it may be changed to a + * positive value, if output is + * crude. + * + * Additionally, a character for + * an empty value may be + * specified. + * + * Finally, the whole matrix can + * be multiplied with a common + * denominator to produce more + * readable output, even + * integers. + * + * @attention This function may + * produce large amounts + * of output if applied to a + * large matrix! + */ + void print_formatted (std::ostream &out, + const unsigned int precision = 3, + const bool scientific = true, + const unsigned int width = 0, + const char *zero_string = " ", + const double denominator = 1.) const; //@} - /** @addtogroup Exceptions - * @{ */ - - /** - * Exception - */ - DeclException0 (ExcBlockDimensionMismatch); - //@} - - private: - /** - * Pointer to the block sparsity - * pattern used for this - * matrix. In order to guarantee - * that it is not deleted while - * still in use, we subscribe to - * it using the SmartPointer - * class. - */ - SmartPointer > sparsity_pattern; + /** @addtogroup Exceptions + * @{ */ + + /** + * Exception + */ + DeclException0 (ExcBlockDimensionMismatch); + //@} + + private: + /** + * Pointer to the block sparsity + * pattern used for this + * matrix. In order to guarantee + * that it is not deleted while + * still in use, we subscribe to + * it using the SmartPointer + * class. + */ + SmartPointer > sparsity_pattern; }; diff --cc deal.II/include/deal.II/lac/chunk_sparse_matrix.h index 15f2ef006a,4784340b14..1ca267a262 --- a/deal.II/include/deal.II/lac/chunk_sparse_matrix.h +++ b/deal.II/include/deal.II/lac/chunk_sparse_matrix.h @@@ -48,1148 -48,1148 +48,1148 @@@ template class FullMat template class ChunkSparseMatrix : public virtual Subscriptor { - public: - /** - * Type of matrix entries. In analogy to - * the STL container classes. - */ - typedef number value_type; - - /** - * Declare a type that has holds - * real-valued numbers with the - * same precision as the template - * argument to this class. If the - * template argument of this - * class is a real data type, - * then real_type equals the - * template argument. If the - * template argument is a - * std::complex type then - * real_type equals the type - * underlying the complex - * numbers. - * - * This typedef is used to - * represent the return type of - * norms. - */ - typedef typename numbers::NumberTraits::real_type real_type; - - /** - * A structure that describes some of the - * traits of this class in terms of its - * run-time behavior. Some other classes - * (such as the block matrix classes) - * that take one or other of the matrix - * classes as its template parameters can - * tune their behavior based on the - * variables in this class. - */ - struct Traits - { - /** - * It is safe to elide additions of - * zeros to individual elements of - * this matrix. - */ - static const bool zero_addition_can_be_elided = true; - }; - - /** - * @name Constructors and initalization. - */ + public: + /** + * Type of matrix entries. In analogy to + * the STL container classes. + */ + typedef number value_type; + + /** + * Declare a type that has holds + * real-valued numbers with the + * same precision as the template + * argument to this class. If the + * template argument of this + * class is a real data type, + * then real_type equals the + * template argument. If the + * template argument is a + * std::complex type then + * real_type equals the type + * underlying the complex + * numbers. + * + * This typedef is used to + * represent the return type of + * norms. + */ + typedef typename numbers::NumberTraits::real_type real_type; + + /** + * A structure that describes some of the + * traits of this class in terms of its + * run-time behavior. Some other classes + * (such as the block matrix classes) + * that take one or other of the matrix + * classes as its template parameters can + * tune their behavior based on the + * variables in this class. + */ + struct Traits + { + /** + * It is safe to elide additions of + * zeros to individual elements of + * this matrix. + */ + static const bool zero_addition_can_be_elided = true; + }; + + /** + * @name Constructors and initalization. + */ //@{ - /** - * Constructor; initializes the matrix to - * be empty, without any structure, i.e. - * the matrix is not usable at all. This - * constructor is therefore only useful - * for matrices which are members of a - * class. All other matrices should be - * created at a point in the data flow - * where all necessary information is - * available. - * - * You have to initialize - * the matrix before usage with - * reinit(const ChunkSparsityPattern&). - */ - ChunkSparseMatrix (); - - /** - * Copy constructor. This constructor is - * only allowed to be called if the matrix - * to be copied is empty. This is for the - * same reason as for the - * ChunkSparsityPattern, see there for the - * details. - * - * If you really want to copy a whole - * matrix, you can do so by using the - * copy_from() function. - */ - ChunkSparseMatrix (const ChunkSparseMatrix &); - - /** - * Constructor. Takes the given - * matrix sparsity structure to - * represent the sparsity pattern - * of this matrix. You can change - * the sparsity pattern later on - * by calling the reinit(const - * ChunkSparsityPattern&) function. - * - * You have to make sure that the - * lifetime of the sparsity - * structure is at least as long - * as that of this matrix or as - * long as reinit(const - * ChunkSparsityPattern&) is not - * called with a new sparsity - * pattern. - * - * The constructor is marked - * explicit so as to disallow - * that someone passes a sparsity - * pattern in place of a sparse - * matrix to some function, where - * an empty matrix would be - * generated then. - */ - explicit ChunkSparseMatrix (const ChunkSparsityPattern &sparsity); - - /** - * Copy constructor: initialize - * the matrix with the identity - * matrix. This constructor will - * throw an exception if the - * sizes of the sparsity pattern - * and the identity matrix do not - * coincide, or if the sparsity - * pattern does not provide for - * nonzero entries on the entire - * diagonal. - */ - ChunkSparseMatrix (const ChunkSparsityPattern &sparsity, - const IdentityMatrix &id); - - /** - * Destructor. Free all memory, but do not - * release the memory of the sparsity - * structure. - */ - virtual ~ChunkSparseMatrix (); - - /** - * Copy operator. Since copying - * entire sparse matrices is a - * very expensive operation, we - * disallow doing so except for - * the special case of empty - * matrices of size zero. This - * doesn't seem particularly - * useful, but is exactly what - * one needs if one wanted to - * have a - * std::vector@ - * @>: in that case, one - * can create a vector (which - * needs the ability to copy - * objects) of empty matrices - * that are then later filled - * with something useful. - */ - ChunkSparseMatrix& operator = (const ChunkSparseMatrix &); - - /** - * Copy operator: initialize - * the matrix with the identity - * matrix. This operator will - * throw an exception if the - * sizes of the sparsity pattern - * and the identity matrix do not - * coincide, or if the sparsity - * pattern does not provide for - * nonzero entries on the entire - * diagonal. - */ - ChunkSparseMatrix & - operator= (const IdentityMatrix &id); - - /** - * This operator assigns a scalar to - * a matrix. Since this does usually - * not make much sense (should we set - * all matrix entries to this value? - * Only the nonzero entries of the - * sparsity pattern?), this operation - * is only allowed if the actual - * value to be assigned is zero. This - * operator only exists to allow for - * the obvious notation - * matrix=0, which sets all - * elements of the matrix to zero, - * but keep the sparsity pattern - * previously used. - */ - ChunkSparseMatrix & operator = (const double d); - - /** - * Reinitialize the sparse matrix - * with the given sparsity - * pattern. The latter tells the - * matrix how many nonzero - * elements there need to be - * reserved. - * - * Regarding memory allocation, - * the same applies as said - * above. - * - * You have to make sure that the - * lifetime of the sparsity - * structure is at least as long - * as that of this matrix or as - * long as reinit(const - * ChunkSparsityPattern &) is not - * called with a new sparsity - * structure. - * - * The elements of the matrix are - * set to zero by this function. - */ - virtual void reinit (const ChunkSparsityPattern &sparsity); - - /** - * Release all memory and return - * to a state just like after - * having called the default - * constructor. It also forgets - * the sparsity pattern it was - * previously tied to. - */ - virtual void clear (); + /** + * Constructor; initializes the matrix to + * be empty, without any structure, i.e. + * the matrix is not usable at all. This + * constructor is therefore only useful + * for matrices which are members of a + * class. All other matrices should be + * created at a point in the data flow + * where all necessary information is + * available. + * + * You have to initialize + * the matrix before usage with + * reinit(const ChunkSparsityPattern&). + */ + ChunkSparseMatrix (); + + /** + * Copy constructor. This constructor is + * only allowed to be called if the matrix + * to be copied is empty. This is for the + * same reason as for the + * ChunkSparsityPattern, see there for the + * details. + * + * If you really want to copy a whole + * matrix, you can do so by using the + * copy_from() function. + */ + ChunkSparseMatrix (const ChunkSparseMatrix &); + + /** + * Constructor. Takes the given + * matrix sparsity structure to + * represent the sparsity pattern + * of this matrix. You can change + * the sparsity pattern later on + * by calling the reinit(const + * ChunkSparsityPattern&) function. + * + * You have to make sure that the + * lifetime of the sparsity + * structure is at least as long + * as that of this matrix or as + * long as reinit(const + * ChunkSparsityPattern&) is not + * called with a new sparsity + * pattern. + * + * The constructor is marked + * explicit so as to disallow + * that someone passes a sparsity + * pattern in place of a sparse + * matrix to some function, where + * an empty matrix would be + * generated then. + */ + explicit ChunkSparseMatrix (const ChunkSparsityPattern &sparsity); + + /** + * Copy constructor: initialize + * the matrix with the identity + * matrix. This constructor will + * throw an exception if the + * sizes of the sparsity pattern + * and the identity matrix do not + * coincide, or if the sparsity + * pattern does not provide for + * nonzero entries on the entire + * diagonal. + */ + ChunkSparseMatrix (const ChunkSparsityPattern &sparsity, - const IdentityMatrix &id); ++ const IdentityMatrix &id); + + /** + * Destructor. Free all memory, but do not + * release the memory of the sparsity + * structure. + */ + virtual ~ChunkSparseMatrix (); + + /** + * Copy operator. Since copying + * entire sparse matrices is a + * very expensive operation, we + * disallow doing so except for + * the special case of empty + * matrices of size zero. This + * doesn't seem particularly + * useful, but is exactly what + * one needs if one wanted to + * have a + * std::vector@ + * @>: in that case, one + * can create a vector (which + * needs the ability to copy + * objects) of empty matrices + * that are then later filled + * with something useful. + */ + ChunkSparseMatrix &operator = (const ChunkSparseMatrix &); + + /** + * Copy operator: initialize + * the matrix with the identity + * matrix. This operator will + * throw an exception if the + * sizes of the sparsity pattern + * and the identity matrix do not + * coincide, or if the sparsity + * pattern does not provide for + * nonzero entries on the entire + * diagonal. + */ + ChunkSparseMatrix & - operator= (const IdentityMatrix &id); ++ operator= (const IdentityMatrix &id); + + /** + * This operator assigns a scalar to + * a matrix. Since this does usually + * not make much sense (should we set + * all matrix entries to this value? + * Only the nonzero entries of the + * sparsity pattern?), this operation + * is only allowed if the actual + * value to be assigned is zero. This + * operator only exists to allow for + * the obvious notation + * matrix=0, which sets all + * elements of the matrix to zero, + * but keep the sparsity pattern + * previously used. + */ + ChunkSparseMatrix &operator = (const double d); + + /** + * Reinitialize the sparse matrix + * with the given sparsity + * pattern. The latter tells the + * matrix how many nonzero + * elements there need to be + * reserved. + * + * Regarding memory allocation, + * the same applies as said + * above. + * + * You have to make sure that the + * lifetime of the sparsity + * structure is at least as long + * as that of this matrix or as + * long as reinit(const + * ChunkSparsityPattern &) is not + * called with a new sparsity + * structure. + * + * The elements of the matrix are + * set to zero by this function. + */ + virtual void reinit (const ChunkSparsityPattern &sparsity); + + /** + * Release all memory and return + * to a state just like after + * having called the default + * constructor. It also forgets + * the sparsity pattern it was + * previously tied to. + */ + virtual void clear (); //@} - /** - * @name Information on the matrix - */ + /** + * @name Information on the matrix + */ //@{ - /** - * Return whether the object is - * empty. It is empty if either - * both dimensions are zero or no - * ChunkSparsityPattern is - * associated. - */ - bool empty () const; - - /** - * Return the dimension of the - * image space. To remember: the - * matrix is of dimension - * $m \times n$. - */ - unsigned int m () const; - - /** - * Return the dimension of the - * range space. To remember: the - * matrix is of dimension - * $m \times n$. - */ - unsigned int n () const; - - /** - * Return the number of nonzero - * elements of this - * matrix. Actually, it returns - * the number of entries in the - * sparsity pattern; if any of - * the entries should happen to - * be zero, it is counted anyway. - */ - unsigned int n_nonzero_elements () const; - - /** - * Return the number of actually - * nonzero elements of this - * matrix. - * - * Note, that this function does - * (in contrary to - * n_nonzero_elements()) not - * count all entries of the - * sparsity pattern but only the - * ones that are nonzero. - */ - unsigned int n_actually_nonzero_elements () const; - - /** - * Return a (constant) reference - * to the underlying sparsity - * pattern of this matrix. - * - * Though the return value is - * declared const, you - * should be aware that it may - * change if you call any - * nonconstant function of - * objects which operate on it. - */ - const ChunkSparsityPattern & get_sparsity_pattern () const; - - /** - * Determine an estimate for the - * memory consumption (in bytes) - * of this object. See - * MemoryConsumption. - */ - std::size_t memory_consumption () const; + /** + * Return whether the object is + * empty. It is empty if either + * both dimensions are zero or no + * ChunkSparsityPattern is + * associated. + */ + bool empty () const; + + /** + * Return the dimension of the + * image space. To remember: the + * matrix is of dimension + * $m \times n$. + */ + unsigned int m () const; + + /** + * Return the dimension of the + * range space. To remember: the + * matrix is of dimension + * $m \times n$. + */ + unsigned int n () const; + + /** + * Return the number of nonzero + * elements of this + * matrix. Actually, it returns + * the number of entries in the + * sparsity pattern; if any of + * the entries should happen to + * be zero, it is counted anyway. + */ + unsigned int n_nonzero_elements () const; + + /** + * Return the number of actually + * nonzero elements of this + * matrix. + * + * Note, that this function does + * (in contrary to + * n_nonzero_elements()) not + * count all entries of the + * sparsity pattern but only the + * ones that are nonzero. + */ + unsigned int n_actually_nonzero_elements () const; + + /** + * Return a (constant) reference + * to the underlying sparsity + * pattern of this matrix. + * + * Though the return value is + * declared const, you + * should be aware that it may + * change if you call any + * nonconstant function of + * objects which operate on it. + */ + const ChunkSparsityPattern &get_sparsity_pattern () const; + + /** + * Determine an estimate for the + * memory consumption (in bytes) + * of this object. See + * MemoryConsumption. + */ + std::size_t memory_consumption () const; //@} - /** - * @name Modifying entries - */ + /** + * @name Modifying entries + */ //@{ - /** - * Set the element (i,j) - * to value. Throws an - * error if the entry does not - * exist or if value is - * not a finite number. Still, it - * is allowed to store zero - * values in non-existent fields. - */ - void set (const unsigned int i, - const unsigned int j, - const number value); - - /** - * Add value to the - * element (i,j). Throws - * an error if the entry does not - * exist or if value is - * not a finite number. Still, it - * is allowed to store zero - * values in non-existent fields. - */ - void add (const unsigned int i, - const unsigned int j, - const number value); - - /** - * Multiply the entire matrix by a - * fixed factor. - */ - ChunkSparseMatrix & operator *= (const number factor); - - /** - * Divide the entire matrix by a - * fixed factor. - */ - ChunkSparseMatrix & operator /= (const number factor); - - /** - * Symmetrize the matrix by - * forming the mean value between - * the existing matrix and its - * transpose, $A = \frac 12(A+A^T)$. - * - * This operation assumes that - * the underlying sparsity - * pattern represents a symmetric - * object. If this is not the - * case, then the result of this - * operation will not be a - * symmetric matrix, since it - * only explicitly symmetrizes - * by looping over the lower left - * triangular part for efficiency - * reasons; if there are entries - * in the upper right triangle, - * then these elements are missed - * in the - * symmetrization. Symmetrization - * of the sparsity pattern can be - * obtain by - * ChunkSparsityPattern::symmetrize(). - */ - void symmetrize (); - - /** - * Copy the given matrix to this - * one. The operation throws an - * error if the sparsity patterns - * of the two involved matrices - * do not point to the same - * object, since in this case the - * copy operation is - * cheaper. Since this operation - * is notheless not for free, we - * do not make it available - * through operator =, - * since this may lead to - * unwanted usage, e.g. in copy - * arguments to functions, which - * should really be arguments by - * reference. - * - * The source matrix may be a matrix - * of arbitrary type, as long as its - * data type is convertible to the - * data type of this matrix. - * - * The function returns a reference to - * *this. - */ - template - ChunkSparseMatrix & - copy_from (const ChunkSparseMatrix &source); - - /** - * This function is complete - * analogous to the - * ChunkSparsityPattern::copy_from() - * function in that it allows to - * initialize a whole matrix in - * one step. See there for more - * information on argument types - * and their meaning. You can - * also find a small example on - * how to use this function - * there. - * - * The only difference to the - * cited function is that the - * objects which the inner - * iterator points to need to be - * of type std::pair, where - * value needs to be - * convertible to the element - * type of this class, as - * specified by the - * number template - * argument. - * - * Previous content of the matrix - * is overwritten. Note that the - * entries specified by the input - * parameters need not - * necessarily cover all elements - * of the matrix. Elements not - * covered remain untouched. - */ - template - void copy_from (const ForwardIterator begin, - const ForwardIterator end); - - /** - * Copy the nonzero entries of a - * full matrix into this - * object. Previous content is - * deleted. Note that the - * underlying sparsity pattern - * must be appropriate to hold - * the nonzero entries of the - * full matrix. - */ - template - void copy_from (const FullMatrix &matrix); - - /** - * Add matrix scaled by - * factor to this matrix, - * i.e. the matrix factor*matrix - * is added to this. This - * function throws an error if the - * sparsity patterns of the two involved - * matrices do not point to the same - * object, since in this case the - * operation is cheaper. - * - * The source matrix may be a sparse - * matrix over an arbitrary underlying - * scalar type, as long as its data type - * is convertible to the data type of - * this matrix. - */ - template - void add (const number factor, - const ChunkSparseMatrix &matrix); + /** + * Set the element (i,j) + * to value. Throws an + * error if the entry does not + * exist or if value is + * not a finite number. Still, it + * is allowed to store zero + * values in non-existent fields. + */ + void set (const unsigned int i, + const unsigned int j, + const number value); + + /** + * Add value to the + * element (i,j). Throws + * an error if the entry does not + * exist or if value is + * not a finite number. Still, it + * is allowed to store zero + * values in non-existent fields. + */ + void add (const unsigned int i, + const unsigned int j, + const number value); + + /** + * Multiply the entire matrix by a + * fixed factor. + */ + ChunkSparseMatrix &operator *= (const number factor); + + /** + * Divide the entire matrix by a + * fixed factor. + */ + ChunkSparseMatrix &operator /= (const number factor); + + /** + * Symmetrize the matrix by + * forming the mean value between + * the existing matrix and its + * transpose, $A = \frac 12(A+A^T)$. + * + * This operation assumes that + * the underlying sparsity + * pattern represents a symmetric + * object. If this is not the + * case, then the result of this + * operation will not be a + * symmetric matrix, since it + * only explicitly symmetrizes + * by looping over the lower left + * triangular part for efficiency + * reasons; if there are entries + * in the upper right triangle, + * then these elements are missed + * in the + * symmetrization. Symmetrization + * of the sparsity pattern can be + * obtain by + * ChunkSparsityPattern::symmetrize(). + */ + void symmetrize (); + + /** + * Copy the given matrix to this + * one. The operation throws an + * error if the sparsity patterns + * of the two involved matrices + * do not point to the same + * object, since in this case the + * copy operation is + * cheaper. Since this operation + * is notheless not for free, we + * do not make it available + * through operator =, + * since this may lead to + * unwanted usage, e.g. in copy + * arguments to functions, which + * should really be arguments by + * reference. + * + * The source matrix may be a matrix + * of arbitrary type, as long as its + * data type is convertible to the + * data type of this matrix. + * + * The function returns a reference to + * *this. + */ + template + ChunkSparseMatrix & + copy_from (const ChunkSparseMatrix &source); + + /** + * This function is complete + * analogous to the + * ChunkSparsityPattern::copy_from() + * function in that it allows to + * initialize a whole matrix in + * one step. See there for more + * information on argument types + * and their meaning. You can + * also find a small example on + * how to use this function + * there. + * + * The only difference to the + * cited function is that the + * objects which the inner + * iterator points to need to be + * of type std::pair, where + * value needs to be + * convertible to the element + * type of this class, as + * specified by the + * number template + * argument. + * + * Previous content of the matrix + * is overwritten. Note that the + * entries specified by the input + * parameters need not + * necessarily cover all elements + * of the matrix. Elements not + * covered remain untouched. + */ + template + void copy_from (const ForwardIterator begin, + const ForwardIterator end); + + /** + * Copy the nonzero entries of a + * full matrix into this + * object. Previous content is + * deleted. Note that the + * underlying sparsity pattern + * must be appropriate to hold + * the nonzero entries of the + * full matrix. + */ + template + void copy_from (const FullMatrix &matrix); + + /** + * Add matrix scaled by + * factor to this matrix, + * i.e. the matrix factor*matrix + * is added to this. This + * function throws an error if the + * sparsity patterns of the two involved + * matrices do not point to the same + * object, since in this case the + * operation is cheaper. + * + * The source matrix may be a sparse + * matrix over an arbitrary underlying + * scalar type, as long as its data type + * is convertible to the data type of + * this matrix. + */ + template + void add (const number factor, + const ChunkSparseMatrix &matrix); //@} - /** - * @name Entry Access - */ + /** + * @name Entry Access + */ //@{ - /** - * Return the value of the entry - * (i,j). This may be an - * expensive operation and you - * should always take care where - * to call this function. In - * order to avoid abuse, this - * function throws an exception - * if the required element does - * not exist in the matrix. - * - * In case you want a function - * that returns zero instead (for - * entries that are not in the - * sparsity pattern of the - * matrix), use the el() - * function. - * - * If you are looping over all elements, - * consider using one of the iterator - * classes instead, since they are - * tailored better to a sparse matrix - * structure. - */ - number operator () (const unsigned int i, - const unsigned int j) const; - - /** - * This function is mostly like - * operator()() in that it - * returns the value of the - * matrix entry (i,j). The - * only difference is that if - * this entry does not exist in - * the sparsity pattern, then - * instead of raising an - * exception, zero is - * returned. While this may be - * convenient in some cases, note - * that it is simple to write - * algorithms that are slow - * compared to an optimal - * solution, since the sparsity - * of the matrix is not used. - * - * If you are looping over all elements, - * consider using one of the iterator - * classes instead, since they are - * tailored better to a sparse matrix - * structure. - */ - number el (const unsigned int i, - const unsigned int j) const; - - /** - * Return the main diagonal - * element in the ith - * row. This function throws an - * error if the matrix is not - * quadratic (see - * ChunkSparsityPattern::optimize_diagonal()). - * - * This function is considerably - * faster than the operator()(), - * since for quadratic matrices, the - * diagonal entry may be the - * first to be stored in each row - * and access therefore does not - * involve searching for the - * right column number. - */ - number diag_element (const unsigned int i) const; - - /** - * Same as above, but return a - * writeable reference. You're - * sure you know what you do? - */ - number & diag_element (const unsigned int i); + /** + * Return the value of the entry + * (i,j). This may be an + * expensive operation and you + * should always take care where + * to call this function. In + * order to avoid abuse, this + * function throws an exception + * if the required element does + * not exist in the matrix. + * + * In case you want a function + * that returns zero instead (for + * entries that are not in the + * sparsity pattern of the + * matrix), use the el() + * function. + * + * If you are looping over all elements, + * consider using one of the iterator + * classes instead, since they are + * tailored better to a sparse matrix + * structure. + */ + number operator () (const unsigned int i, + const unsigned int j) const; + + /** + * This function is mostly like + * operator()() in that it + * returns the value of the + * matrix entry (i,j). The + * only difference is that if + * this entry does not exist in + * the sparsity pattern, then + * instead of raising an + * exception, zero is + * returned. While this may be + * convenient in some cases, note + * that it is simple to write + * algorithms that are slow + * compared to an optimal + * solution, since the sparsity + * of the matrix is not used. + * + * If you are looping over all elements, + * consider using one of the iterator + * classes instead, since they are + * tailored better to a sparse matrix + * structure. + */ + number el (const unsigned int i, + const unsigned int j) const; + + /** + * Return the main diagonal + * element in the ith + * row. This function throws an + * error if the matrix is not + * quadratic (see + * ChunkSparsityPattern::optimize_diagonal()). + * + * This function is considerably + * faster than the operator()(), + * since for quadratic matrices, the + * diagonal entry may be the + * first to be stored in each row + * and access therefore does not + * involve searching for the + * right column number. + */ + number diag_element (const unsigned int i) const; + + /** + * Same as above, but return a + * writeable reference. You're + * sure you know what you do? + */ + number &diag_element (const unsigned int i); //@} - /** - * @name Matrix vector multiplications - */ + /** + * @name Matrix vector multiplications + */ //@{ - /** - * Matrix-vector multiplication: - * let dst = M*src with - * M being this matrix. - * - * Note that while this function can - * operate on all vectors that offer - * iterator classes, it is only really - * effective for objects of type @ref - * Vector. For all classes for which - * iterating over elements, or random - * member access is expensive, this - * function is not efficient. In - * particular, if you want to multiply - * with BlockVector objects, you should - * consider using a BlockChunkSparseMatrix as - * well. - * - * Source and destination must - * not be the same vector. - */ - template - void vmult (OutVector& dst, - const InVector& src) const; - - /** - * Matrix-vector multiplication: - * let dst = MT*src with - * M being this - * matrix. This function does the - * same as vmult() but takes - * the transposed matrix. - * - * Note that while this function can - * operate on all vectors that offer - * iterator classes, it is only really - * effective for objects of type @ref - * Vector. For all classes for which - * iterating over elements, or random - * member access is expensive, this - * function is not efficient. In - * particular, if you want to multiply - * with BlockVector objects, you should - * consider using a BlockChunkSparseMatrix as - * well. - * - * Source and destination must - * not be the same vector. - */ - template - void Tvmult (OutVector& dst, - const InVector& src) const; - - /** - * Adding Matrix-vector - * multiplication. Add - * M*src on dst - * with M being this - * matrix. - * - * Note that while this function can - * operate on all vectors that offer - * iterator classes, it is only really - * effective for objects of type @ref - * Vector. For all classes for which - * iterating over elements, or random - * member access is expensive, this - * function is not efficient. In - * particular, if you want to multiply - * with BlockVector objects, you should - * consider using a BlockChunkSparseMatrix as - * well. - * - * Source and destination must - * not be the same vector. - */ - template - void vmult_add (OutVector& dst, - const InVector& src) const; - - /** - * Adding Matrix-vector - * multiplication. Add - * MT*src to - * dst with M being - * this matrix. This function - * does the same as vmult_add() - * but takes the transposed - * matrix. - * - * Note that while this function can - * operate on all vectors that offer - * iterator classes, it is only really - * effective for objects of type @ref - * Vector. For all classes for which - * iterating over elements, or random - * member access is expensive, this - * function is not efficient. In - * particular, if you want to multiply - * with BlockVector objects, you should - * consider using a BlockChunkSparseMatrix as - * well. - * - * Source and destination must - * not be the same vector. - */ - template - void Tvmult_add (OutVector& dst, - const InVector& src) const; - - /** - * Return the square of the norm - * of the vector $v$ with respect - * to the norm induced by this - * matrix, - * i.e. $\left(v,Mv\right)$. This - * is useful, e.g. in the finite - * element context, where the - * $L_2$ norm of a function - * equals the matrix norm with - * respect to the mass matrix of - * the vector representing the - * nodal values of the finite - * element function. - * - * Obviously, the matrix needs to be - * quadratic for this operation, and for - * the result to actually be a norm it - * also needs to be either real symmetric - * or complex hermitian. - * - * The underlying template types of both - * this matrix and the given vector - * should either both be real or - * complex-valued, but not mixed, for - * this function to make sense. - */ - template - somenumber matrix_norm_square (const Vector &v) const; - - /** - * Compute the matrix scalar - * product $\left(u,Mv\right)$. - */ - template - somenumber matrix_scalar_product (const Vector &u, - const Vector &v) const; - /** - * Compute the residual of an - * equation Mx=b, where - * the residual is defined to be - * r=b-Mx. Write the - * residual into - * dst. The - * l2 norm of - * the residual vector is - * returned. - * - * Source x and destination - * dst must not be the same - * vector. - */ - template - somenumber residual (Vector &dst, - const Vector &x, - const Vector &b) const; + /** + * Matrix-vector multiplication: + * let dst = M*src with + * M being this matrix. + * + * Note that while this function can + * operate on all vectors that offer + * iterator classes, it is only really + * effective for objects of type @ref + * Vector. For all classes for which + * iterating over elements, or random + * member access is expensive, this + * function is not efficient. In + * particular, if you want to multiply + * with BlockVector objects, you should + * consider using a BlockChunkSparseMatrix as + * well. + * + * Source and destination must + * not be the same vector. + */ + template + void vmult (OutVector &dst, + const InVector &src) const; + + /** + * Matrix-vector multiplication: + * let dst = MT*src with + * M being this + * matrix. This function does the + * same as vmult() but takes + * the transposed matrix. + * + * Note that while this function can + * operate on all vectors that offer + * iterator classes, it is only really + * effective for objects of type @ref + * Vector. For all classes for which + * iterating over elements, or random + * member access is expensive, this + * function is not efficient. In + * particular, if you want to multiply + * with BlockVector objects, you should + * consider using a BlockChunkSparseMatrix as + * well. + * + * Source and destination must + * not be the same vector. + */ + template + void Tvmult (OutVector &dst, + const InVector &src) const; + + /** + * Adding Matrix-vector + * multiplication. Add + * M*src on dst + * with M being this + * matrix. + * + * Note that while this function can + * operate on all vectors that offer + * iterator classes, it is only really + * effective for objects of type @ref + * Vector. For all classes for which + * iterating over elements, or random + * member access is expensive, this + * function is not efficient. In + * particular, if you want to multiply + * with BlockVector objects, you should + * consider using a BlockChunkSparseMatrix as + * well. + * + * Source and destination must + * not be the same vector. + */ + template + void vmult_add (OutVector &dst, + const InVector &src) const; + + /** + * Adding Matrix-vector + * multiplication. Add + * MT*src to + * dst with M being + * this matrix. This function + * does the same as vmult_add() + * but takes the transposed + * matrix. + * + * Note that while this function can + * operate on all vectors that offer + * iterator classes, it is only really + * effective for objects of type @ref + * Vector. For all classes for which + * iterating over elements, or random + * member access is expensive, this + * function is not efficient. In + * particular, if you want to multiply + * with BlockVector objects, you should + * consider using a BlockChunkSparseMatrix as + * well. + * + * Source and destination must + * not be the same vector. + */ + template + void Tvmult_add (OutVector &dst, + const InVector &src) const; + + /** + * Return the square of the norm + * of the vector $v$ with respect + * to the norm induced by this + * matrix, + * i.e. $\left(v,Mv\right)$. This + * is useful, e.g. in the finite + * element context, where the + * $L_2$ norm of a function + * equals the matrix norm with + * respect to the mass matrix of + * the vector representing the + * nodal values of the finite + * element function. + * + * Obviously, the matrix needs to be + * quadratic for this operation, and for + * the result to actually be a norm it + * also needs to be either real symmetric + * or complex hermitian. + * + * The underlying template types of both + * this matrix and the given vector + * should either both be real or + * complex-valued, but not mixed, for + * this function to make sense. + */ + template + somenumber matrix_norm_square (const Vector &v) const; + + /** + * Compute the matrix scalar + * product $\left(u,Mv\right)$. + */ + template + somenumber matrix_scalar_product (const Vector &u, + const Vector &v) const; + /** + * Compute the residual of an + * equation Mx=b, where + * the residual is defined to be + * r=b-Mx. Write the + * residual into + * dst. The + * l2 norm of + * the residual vector is + * returned. + * + * Source x and destination + * dst must not be the same + * vector. + */ + template + somenumber residual (Vector &dst, + const Vector &x, + const Vector &b) const; //@} - /** - * @name Matrix norms - */ + /** + * @name Matrix norms + */ //@{ - /** - * Return the l1-norm of the matrix, that is - * $|M|_1=max_{all columns j}\sum_{all - * rows i} |M_ij|$, - * (max. sum of columns). - * This is the - * natural matrix norm that is compatible - * to the l1-norm for vectors, i.e. - * $|Mv|_1\leq |M|_1 |v|_1$. - * (cf. Haemmerlin-Hoffmann : Numerische Mathematik) - */ - real_type l1_norm () const; - - /** - * Return the linfty-norm of the - * matrix, that is - * $|M|_infty=max_{all rows i}\sum_{all - * columns j} |M_ij|$, - * (max. sum of rows). - * This is the - * natural matrix norm that is compatible - * to the linfty-norm of vectors, i.e. - * $|Mv|_infty \leq |M|_infty |v|_infty$. - * (cf. Haemmerlin-Hoffmann : Numerische Mathematik) - */ - real_type linfty_norm () const; - - /** - * Return the frobenius norm of the - * matrix, i.e. the square root of the - * sum of squares of all entries in the - * matrix. - */ - real_type frobenius_norm () const; + /** + * Return the l1-norm of the matrix, that is + * $|M|_1=max_{all columns j}\sum_{all + * rows i} |M_ij|$, + * (max. sum of columns). + * This is the + * natural matrix norm that is compatible + * to the l1-norm for vectors, i.e. + * $|Mv|_1\leq |M|_1 |v|_1$. + * (cf. Haemmerlin-Hoffmann : Numerische Mathematik) + */ + real_type l1_norm () const; + + /** + * Return the linfty-norm of the + * matrix, that is + * $|M|_infty=max_{all rows i}\sum_{all + * columns j} |M_ij|$, + * (max. sum of rows). + * This is the + * natural matrix norm that is compatible + * to the linfty-norm of vectors, i.e. + * $|Mv|_infty \leq |M|_infty |v|_infty$. + * (cf. Haemmerlin-Hoffmann : Numerische Mathematik) + */ + real_type linfty_norm () const; + + /** + * Return the frobenius norm of the + * matrix, i.e. the square root of the + * sum of squares of all entries in the + * matrix. + */ + real_type frobenius_norm () const; //@} - /** - * @name Preconditioning methods - */ + /** + * @name Preconditioning methods + */ //@{ - /** - * Apply the Jacobi - * preconditioner, which - * multiplies every element of - * the src vector by the - * inverse of the respective - * diagonal element and - * multiplies the result with the - * relaxation factor omega. - */ - template - void precondition_Jacobi (Vector &dst, - const Vector &src, - const number omega = 1.) const; - - /** - * Apply SSOR preconditioning to - * src. - */ - template - void precondition_SSOR (Vector &dst, - const Vector &src, - const number om = 1.) const; - - /** - * Apply SOR preconditioning - * matrix to src. - */ - template - void precondition_SOR (Vector &dst, - const Vector &src, - const number om = 1.) const; - - /** - * Apply transpose SOR - * preconditioning matrix to - * src. - */ - template - void precondition_TSOR (Vector &dst, + /** + * Apply the Jacobi + * preconditioner, which + * multiplies every element of + * the src vector by the + * inverse of the respective + * diagonal element and + * multiplies the result with the + * relaxation factor omega. + */ + template + void precondition_Jacobi (Vector &dst, const Vector &src, - const number om = 1.) const; - - /** - * Perform SSOR preconditioning - * in-place. Apply the - * preconditioner matrix without - * copying to a second vector. - * omega is the relaxation - * parameter. - */ - template - void SSOR (Vector &v, - const number omega = 1.) const; - - /** - * Perform an SOR preconditioning - * in-place. omega is - * the relaxation parameter. - */ - template - void SOR (Vector &v, + const number omega = 1.) const; + + /** + * Apply SSOR preconditioning to + * src. + */ + template + void precondition_SSOR (Vector &dst, + const Vector &src, + const number om = 1.) const; + + /** + * Apply SOR preconditioning + * matrix to src. + */ + template + void precondition_SOR (Vector &dst, + const Vector &src, + const number om = 1.) const; + + /** + * Apply transpose SOR + * preconditioning matrix to + * src. + */ + template + void precondition_TSOR (Vector &dst, + const Vector &src, + const number om = 1.) const; + + /** + * Perform SSOR preconditioning + * in-place. Apply the + * preconditioner matrix without + * copying to a second vector. + * omega is the relaxation + * parameter. + */ + template + void SSOR (Vector &v, + const number omega = 1.) const; + + /** + * Perform an SOR preconditioning + * in-place. omega is + * the relaxation parameter. + */ + template + void SOR (Vector &v, + const number om = 1.) const; + + /** + * Perform a transpose SOR + * preconditioning in-place. + * omega is the + * relaxation parameter. + */ + template + void TSOR (Vector &v, + const number om = 1.) const; + + /** + * Perform a permuted SOR + * preconditioning in-place. + * + * The standard SOR method is + * applied in the order + * prescribed by permutation, + * that is, first the row + * permutation[0], then + * permutation[1] and so + * on. For efficiency reasons, + * the permutation as well as its + * inverse are required. + * + * omega is the + * relaxation parameter. + */ + template + void PSOR (Vector &v, + const std::vector &permutation, + const std::vector &inverse_permutation, + const number om = 1.) const; + + /** + * Perform a transposed permuted SOR + * preconditioning in-place. + * + * The transposed SOR method is + * applied in the order + * prescribed by + * permutation, that is, + * first the row + * permutation[m()-1], + * then + * permutation[m()-2] + * and so on. For efficiency + * reasons, the permutation as + * well as its inverse are + * required. + * + * omega is the + * relaxation parameter. + */ + template + void TPSOR (Vector &v, + const std::vector &permutation, + const std::vector &inverse_permutation, const number om = 1.) const; - /** - * Perform a transpose SOR - * preconditioning in-place. - * omega is the - * relaxation parameter. - */ - template - void TSOR (Vector &v, - const number om = 1.) const; - - /** - * Perform a permuted SOR - * preconditioning in-place. - * - * The standard SOR method is - * applied in the order - * prescribed by permutation, - * that is, first the row - * permutation[0], then - * permutation[1] and so - * on. For efficiency reasons, - * the permutation as well as its - * inverse are required. - * - * omega is the - * relaxation parameter. - */ - template - void PSOR (Vector &v, - const std::vector& permutation, - const std::vector& inverse_permutation, - const number om = 1.) const; - - /** - * Perform a transposed permuted SOR - * preconditioning in-place. - * - * The transposed SOR method is - * applied in the order - * prescribed by - * permutation, that is, - * first the row - * permutation[m()-1], - * then - * permutation[m()-2] - * and so on. For efficiency - * reasons, the permutation as - * well as its inverse are - * required. - * - * omega is the - * relaxation parameter. - */ - template - void TPSOR (Vector &v, - const std::vector& permutation, - const std::vector& inverse_permutation, - const number om = 1.) const; - - /** - * Do one SOR step on v. - * Performs a direct SOR step - * with right hand side - * b. - */ - template - void SOR_step (Vector &v, - const Vector &b, - const number om = 1.) const; - - /** - * Do one adjoint SOR step on - * v. Performs a direct - * TSOR step with right hand side - * b. - */ - template - void TSOR_step (Vector &v, - const Vector &b, - const number om = 1.) const; - - /** - * Do one SSOR step on - * v. Performs a direct - * SSOR step with right hand side - * b by performing TSOR - * after SOR. - */ - template - void SSOR_step (Vector &v, - const Vector &b, - const number om = 1.) const; + /** + * Do one SOR step on v. + * Performs a direct SOR step + * with right hand side + * b. + */ + template + void SOR_step (Vector &v, + const Vector &b, + const number om = 1.) const; + + /** + * Do one adjoint SOR step on + * v. Performs a direct + * TSOR step with right hand side + * b. + */ + template + void TSOR_step (Vector &v, + const Vector &b, + const number om = 1.) const; + + /** + * Do one SSOR step on + * v. Performs a direct + * SSOR step with right hand side + * b by performing TSOR + * after SOR. + */ + template + void SSOR_step (Vector &v, + const Vector &b, + const number om = 1.) const; //@} - /** - * @name Input/Output - */ + /** + * @name Input/Output + */ //@{ - /** - * Print the matrix to the given - * stream, using the format - * (line,col) value, - * i.e. one nonzero entry of the - * matrix per line. - */ - void print (std::ostream &out) const; - - /** - * Print the matrix in the usual - * format, i.e. as a matrix and - * not as a list of nonzero - * elements. For better - * readability, elements not in - * the matrix are displayed as - * empty space, while matrix - * elements which are explicitly - * set to zero are displayed as - * such. - * - * The parameters allow for a - * flexible setting of the output - * format: precision and - * scientific are used - * to determine the number - * format, where scientific = - * false means fixed point - * notation. A zero entry for - * width makes the - * function compute a width, but - * it may be changed to a - * positive value, if output is - * crude. - * - * Additionally, a character for - * an empty value may be - * specified. - * - * Finally, the whole matrix can - * be multiplied with a common - * denominator to produce more - * readable output, even - * integers. - * - * @attention This function may - * produce large amounts - * of output if applied to a - * large matrix! - */ - void print_formatted (std::ostream &out, - const unsigned int precision = 3, - const bool scientific = true, - const unsigned int width = 0, - const char *zero_string = " ", - const double denominator = 1.) const; - - /** - * Print the actual pattern of - * the matrix. For each entry - * with an absolute value larger - * than threshold, a '*' is - * printed, a ':' for every value - * smaller and a '.' for every - * entry not allocated. - */ - void print_pattern(std::ostream& out, - const double threshold = 0.) const; - - /** - * Write the data of this object - * en bloc to a file. This is - * done in a binary mode, so the - * output is neither readable by - * humans nor (probably) by other - * computers using a different - * operating system of number - * format. - * - * The purpose of this function - * is that you can swap out - * matrices and sparsity pattern - * if you are short of memory, - * want to communicate between - * different programs, or allow - * objects to be persistent - * across different runs of the - * program. - */ - void block_write (std::ostream &out) const; - - /** - * Read data that has previously - * been written by block_write() - * from a file. This is done - * using the inverse operations - * to the above function, so it - * is reasonably fast because the - * bitstream is not interpreted - * except for a few numbers up - * front. - * - * The object is resized on this - * operation, and all previous - * contents are lost. Note, - * however, that no checks are - * performed whether new data and - * the underlying ChunkSparsityPattern - * object fit together. It is - * your responsibility to make - * sure that the sparsity pattern - * and the data to be read match. - * - * A primitive form of error - * checking is performed which - * will recognize the bluntest - * attempts to interpret some - * data as a matrix stored - * bitwise to a file that wasn't - * actually created that way, but - * not more. - */ - void block_read (std::istream &in); + /** + * Print the matrix to the given + * stream, using the format + * (line,col) value, + * i.e. one nonzero entry of the + * matrix per line. + */ + void print (std::ostream &out) const; + + /** + * Print the matrix in the usual + * format, i.e. as a matrix and + * not as a list of nonzero + * elements. For better + * readability, elements not in + * the matrix are displayed as + * empty space, while matrix + * elements which are explicitly + * set to zero are displayed as + * such. + * + * The parameters allow for a + * flexible setting of the output + * format: precision and + * scientific are used + * to determine the number + * format, where scientific = + * false means fixed point + * notation. A zero entry for + * width makes the + * function compute a width, but + * it may be changed to a + * positive value, if output is + * crude. + * + * Additionally, a character for + * an empty value may be + * specified. + * + * Finally, the whole matrix can + * be multiplied with a common + * denominator to produce more + * readable output, even + * integers. + * + * @attention This function may + * produce large amounts + * of output if applied to a + * large matrix! + */ + void print_formatted (std::ostream &out, + const unsigned int precision = 3, + const bool scientific = true, + const unsigned int width = 0, + const char *zero_string = " ", + const double denominator = 1.) const; + + /** + * Print the actual pattern of + * the matrix. For each entry + * with an absolute value larger + * than threshold, a '*' is + * printed, a ':' for every value + * smaller and a '.' for every + * entry not allocated. + */ + void print_pattern(std::ostream &out, + const double threshold = 0.) const; + + /** + * Write the data of this object + * en bloc to a file. This is + * done in a binary mode, so the + * output is neither readable by + * humans nor (probably) by other + * computers using a different + * operating system of number + * format. + * + * The purpose of this function + * is that you can swap out + * matrices and sparsity pattern + * if you are short of memory, + * want to communicate between + * different programs, or allow + * objects to be persistent + * across different runs of the + * program. + */ + void block_write (std::ostream &out) const; + + /** + * Read data that has previously + * been written by block_write() + * from a file. This is done + * using the inverse operations + * to the above function, so it + * is reasonably fast because the + * bitstream is not interpreted + * except for a few numbers up + * front. + * + * The object is resized on this + * operation, and all previous + * contents are lost. Note, + * however, that no checks are + * performed whether new data and + * the underlying ChunkSparsityPattern + * object fit together. It is + * your responsibility to make + * sure that the sparsity pattern + * and the data to be read match. + * + * A primitive form of error + * checking is performed which + * will recognize the bluntest + * attempts to interpret some + * data as a matrix stored + * bitwise to a file that wasn't + * actually created that way, but + * not more. + */ + void block_read (std::istream &in); //@} - /** @addtogroup Exceptions - * @{ */ - - /** - * Exception - */ - DeclException2 (ExcInvalidIndex, - int, int, - << "The entry with index <" << arg1 << ',' << arg2 - << "> does not exist."); - /** - * Exception - */ - DeclException1 (ExcInvalidIndex1, - int, - << "The index " << arg1 << " is not in the allowed range."); - /** - * Exception - */ - DeclException0 (ExcDifferentChunkSparsityPatterns); - /** - * Exception - */ - DeclException2 (ExcIteratorRange, - int, int, - << "The iterators denote a range of " << arg1 - << " elements, but the given number of rows was " << arg2); - /** - * Exception - */ - DeclException0 (ExcSourceEqualsDestination); - //@} - private: - /** - * Pointer to the sparsity - * pattern used for this - * matrix. In order to guarantee - * that it is not deleted while - * still in use, we subscribe to - * it using the SmartPointer - * class. - */ - SmartPointer > cols; - - /** - * Array of values for all the - * nonzero entries. The position - * within the matrix, i.e. the - * row and column number for a - * given entry can only be - * deduced using the sparsity - * pattern. The same holds for - * the more common operation of - * finding an entry by its - * coordinates. - */ - number *val; - - /** - * Allocated size of #val. This - * can be larger than the - * actually used part if the size - * of the matrix was reduced - * somewhen in the past by - * associating a sparsity pattern - * with a smaller size to this - * object, using the reinit() - * function. - */ - unsigned int max_len; - - /** - * Return the location of entry - * $(i,j)$ within the val array. - */ - unsigned int compute_location (const unsigned int i, - const unsigned int j) const; - - // make all other sparse matrices - // friends - template friend class ChunkSparseMatrix; + /** @addtogroup Exceptions + * @{ */ + + /** + * Exception + */ + DeclException2 (ExcInvalidIndex, + int, int, + << "The entry with index <" << arg1 << ',' << arg2 + << "> does not exist."); + /** + * Exception + */ + DeclException1 (ExcInvalidIndex1, + int, + << "The index " << arg1 << " is not in the allowed range."); + /** + * Exception + */ + DeclException0 (ExcDifferentChunkSparsityPatterns); + /** + * Exception + */ + DeclException2 (ExcIteratorRange, + int, int, + << "The iterators denote a range of " << arg1 + << " elements, but the given number of rows was " << arg2); + /** + * Exception + */ + DeclException0 (ExcSourceEqualsDestination); + //@} + private: + /** + * Pointer to the sparsity + * pattern used for this + * matrix. In order to guarantee + * that it is not deleted while + * still in use, we subscribe to + * it using the SmartPointer + * class. + */ + SmartPointer > cols; + + /** + * Array of values for all the + * nonzero entries. The position + * within the matrix, i.e. the + * row and column number for a + * given entry can only be + * deduced using the sparsity + * pattern. The same holds for + * the more common operation of + * finding an entry by its + * coordinates. + */ + number *val; + + /** + * Allocated size of #val. This + * can be larger than the + * actually used part if the size + * of the matrix was reduced + * somewhen in the past by + * associating a sparsity pattern + * with a smaller size to this + * object, using the reinit() + * function. + */ + unsigned int max_len; + + /** + * Return the location of entry + * $(i,j)$ within the val array. + */ + unsigned int compute_location (const unsigned int i, + const unsigned int j) const; + + // make all other sparse matrices + // friends + template friend class ChunkSparseMatrix; }; /*@}*/ diff --cc deal.II/include/deal.II/lac/chunk_sparse_matrix.templates.h index 2547f96bde,f176c7827e..6c993b0e19 --- a/deal.II/include/deal.II/lac/chunk_sparse_matrix.templates.h +++ b/deal.II/include/deal.II/lac/chunk_sparse_matrix.templates.h @@@ -229,11 -229,11 +229,11 @@@ ChunkSparseMatrix::ChunkSparseM template ChunkSparseMatrix::ChunkSparseMatrix (const ChunkSparsityPattern &c, - const IdentityMatrix &id) + const IdentityMatrix &id) - : - cols(0, "ChunkSparseMatrix"), - val(0), - max_len(0) + : + cols(0, "ChunkSparseMatrix"), + val(0), + max_len(0) { Assert (c.n_rows() == id.m(), ExcDimensionMismatch (c.n_rows(), id.m())); Assert (c.n_cols() == id.n(), ExcDimensionMismatch (c.n_cols(), id.n())); diff --cc deal.II/include/deal.II/lac/constraint_matrix.h index 7b86d099fa,938b681f9e..121333674f --- a/deal.II/include/deal.II/lac/constraint_matrix.h +++ b/deal.II/include/deal.II/lac/constraint_matrix.h @@@ -138,1649 -138,1649 +138,1649 @@@ namespace internal */ class ConstraintMatrix : public Subscriptor { - public: - /** - * An enum that describes what should - * happen if the two ConstraintMatrix - * objects involved in a call to the - * merge() function happen to have - * constraints on the same degrees of - * freedom. - */ - enum MergeConflictBehavior - { - /** - * Throw an exception if the two - * objects concerned have - * conflicting constraints on the - * same degree of freedom. - */ - no_conflicts_allowed, - - /** - * In an operation - * cm1.merge(cm2), if - * cm1 and - * cm2 have - * constraints on the same degree - * of freedom, take the one from - * cm1. - */ - left_object_wins, - - /** - * In an operation - * cm1.merge(cm2), if - * cm1 and - * cm2 have - * constraints on the same degree - * of freedom, take the one from - * cm2. - */ - right_object_wins - }; - - /** - * Constructor. The supplied IndexSet - * defines which indices might be - * constrained inside this - * ConstraintMatrix. In a calculation - * with a - * parallel::distributed::DoFHandler one - * should use locally_relevant_dofs. The - * IndexSet allows the ConstraintMatrix - * to safe memory. Otherwise internal - * data structures for all possible - * indices will be created. - */ - ConstraintMatrix (const IndexSet & local_constraints = IndexSet()); - - /** - * Copy constructor - */ - ConstraintMatrix (const ConstraintMatrix &constraint_matrix); - - /** - * Reinit the ConstraintMatrix object and - * supply an IndexSet with lines that may - * be constrained. This function is only - * relevant in the distributed case to - * supply a different IndexSet. Otherwise - * this routine is equivalent to calling - * clear(). See the constructor for - * details. - */ - void reinit (const IndexSet & local_constraints = IndexSet()); - - /** - * Determines if we can store a - * constraint for the given @p - * line_index. This routine only matters - * in the distributed case and checks if - * the IndexSet allows storage of this - * line. Always returns true if not in - * the distributed case. - */ - bool can_store_line (const unsigned int line_index) const; - - /** - * This function copies the content of @p - * constraints_in with DoFs that are - * element of the IndexSet @p - * filter. Elements that are not present - * in the IndexSet are ignored. All DoFs - * will be transformed to local index - * space of the filter, both the - * constrained DoFs and the other DoFs - * these entries are constrained to. The - * local index space of the filter is a - * contiguous numbering of all (global) - * DoFs that are elements in the - * filter. - * - * If, for example, the filter represents - * the range [10,20), and the - * constraint matrix @p constraints_in - * includes the global indices - * {7,13,14}, the indices - * {3,4} are added to the - * calling constraint matrix (since 13 - * and 14 are elements in the filter and - * element 13 is the fourth element in - * the index, and 14 is the fifth). - * - * This function provides an easy way to - * create a ConstraintMatrix for certain - * vector components in a vector-valued - * problem from a full ConstraintMatrix, - * i.e. extracting a diagonal subblock - * from a larger ConstraintMatrix. The - * block is specified by the IndexSet - * argument. - */ - void add_selected_constraints (const ConstraintMatrix &constraints_in, - const IndexSet &filter); - - /** - * @name Adding constraints - * @{ - */ - - /** - * Add a new line to the matrix. If the - * line already exists, then the function - * simply returns without doing anything. - */ - void add_line (const unsigned int line); - - /** - * Call the first add_line() function for - * every index i for which - * lines[i] is true. - * - * This function essentially exists to - * allow adding several constraints of - * the form xi=0 all at once, where - * the set of indices i for which these - * constraints should be added are given - * by the argument of this function. On - * the other hand, just as if the - * single-argument add_line() function - * were called repeatedly, the - * constraints can later be modified to - * include linear dependencies using the - * add_entry() function as well as - * inhomogeneities using - * set_inhomogeneity(). - */ - void add_lines (const std::vector &lines); - - /** - * Call the first add_line() function for - * every index i that - * appears in the argument. - * - * This function essentially exists to - * allow adding several constraints of - * the form xi=0 all at once, where - * the set of indices i for which these - * constraints should be added are given - * by the argument of this function. On - * the other hand, just as if the - * single-argument add_line() function - * were called repeatedly, the - * constraints can later be modified to - * include linear dependencies using the - * add_entry() function as well as - * inhomogeneities using - * set_inhomogeneity(). - */ - void add_lines (const std::set &lines); - - /** - * Call the first add_line() function for - * every index i that - * appears in the argument. - * - * This function essentially exists to - * allow adding several constraints of - * the form xi=0 all at once, where - * the set of indices i for which these - * constraints should be added are given - * by the argument of this function. On - * the other hand, just as if the - * single-argument add_line() function - * were called repeatedly, the - * constraints can later be modified to - * include linear dependencies using the - * add_entry() function as well as - * inhomogeneities using - * set_inhomogeneity(). - */ - void add_lines (const IndexSet &lines); - - /** - * Add an entry to a given - * line. The list of lines is - * searched from the back to the - * front, so clever programming - * would add a new line (which is - * pushed to the back) and - * immediately afterwards fill - * the entries of that line. This - * way, no expensive searching is - * needed. - * - * If an entry with the same - * indices as the one this - * function call denotes already - * exists, then this function - * simply returns provided that - * the value of the entry is the - * same. Thus, it does no harm to - * enter a constraint twice. - */ - void add_entry (const unsigned int line, - const unsigned int column, - const double value); - - /** - * Add a whole series of entries, - * denoted by pairs of column indices - * and values, to a line of - * constraints. This function is - * equivalent to calling the preceding - * function several times, but is - * faster. - */ - void add_entries (const unsigned int line, - const std::vector > &col_val_pairs); - - /** - * Set an imhomogeneity to the - * constraint line i, according - * to the discussion in the general - * class description. - * - * @note the line needs to be added with - * one of the add_line() calls first. - */ - void set_inhomogeneity (const unsigned int line, - const double value); - - /** - * Close the filling of entries. Since - * the lines of a matrix of this type - * are usually filled in an arbitrary - * order and since we do not want to - * use associative constainers to store - * the lines, we need to sort the lines - * and within the lines the columns - * before usage of the matrix. This is - * done through this function. - * - * Also, zero entries are discarded, - * since they are not needed. - * - * After closing, no more entries are - * accepted. If the object was already - * closed, then this function returns - * immediately. - * - * This function also resolves chains - * of constraints. For example, degree - * of freedom 13 may be constrained to - * u13=u3/2+u7/2 while degree of - * freedom 7 is itself constrained as - * u7=u2/2+u4/2. Then, the - * resolution will be that - * u13=u3/2+u2/4+u4/4. Note, - * however, that cycles in this graph - * of constraints are not allowed, - * i.e. for example u4 may not be - * constrained, directly or indirectly, - * to u13 again. - */ - void close (); - - /** - * Merge the constraints represented by - * the object given as argument into - * the constraints represented by this - * object. Both objects may or may not - * be closed (by having their function - * close() called before). If this - * object was closed before, then it - * will be closed afterwards as - * well. Note, however, that if the - * other argument is closed, then - * merging may be significantly faster. - * - * Using the default value of the second - * arguments, the constraints in each of - * the two objects (the old one - * represented by this object and the - * argument) may not refer to the same - * degree of freedom, i.e. a degree of - * freedom that is constrained in one - * object may not be constrained in the - * second. If this is nevertheless the - * case, an exception is thrown. However, - * this behavior can be changed by - * providing a different value for the - * second argument. - */ - void merge (const ConstraintMatrix &other_constraints, - const MergeConflictBehavior merge_conflict_behavior = no_conflicts_allowed); - - /** - * Shift all entries of this matrix - * down @p offset rows and over @p - * offset columns. - * - * This function is useful if you are - * building block matrices, where all - * blocks are built by the same - * DoFHandler object, i.e. the matrix - * size is larger than the number of - * degrees of freedom. Since several - * matrix rows and columns correspond - * to the same degrees of freedom, - * you'd generate several constraint - * objects, then shift them, and - * finally merge() them together - * again. - */ - void shift (const unsigned int offset); - - /** - * Clear all entries of this - * matrix. Reset the flag determining - * whether new entries are accepted or - * not. - * - * This function may be called also on - * objects which are empty or already - * cleared. - */ - void clear (); - - /** - * @} - */ - - - /** - * @name Querying constraints - * @{ - */ - - /** - * Return number of constraints stored in - * this matrix. - */ - unsigned int n_constraints () const; - - /** - * Return whether the degree of freedom - * with number @p index is a - * constrained one. - * - * Note that if close() was called - * before, then this function is - * significantly faster, since then the - * constrained degrees of freedom are - * sorted and we can do a binary - * search, while before close() was - * called, we have to perform a linear - * search through all entries. - */ - bool is_constrained (const unsigned int index) const; - - /** - * Return whether the dof is - * constrained, and whether it is - * constrained to only one other degree - * of freedom with weight one. The - * function therefore returns whether - * the degree of freedom would simply - * be eliminated in favor of exactly - * one other degree of freedom. - * - * The function returns @p false if - * either the degree of freedom is not - * constrained at all, or if it is - * constrained to more than one other - * degree of freedom, or if it is - * constrained to only one degree of - * freedom but with a weight different - * from one. - */ - bool is_identity_constrained (const unsigned int index) const; - - /** - * Return the maximum number of other - * dofs that one dof is constrained - * to. For example, in 2d a hanging - * node is constrained only to its two - * neighbors, so the returned value - * would be 2. However, for higher - * order elements and/or higher - * dimensions, or other types of - * constraints, this number is no more - * obvious. - * - * The name indicates that within the - * system matrix, references to a - * constrained node are indirected to - * the nodes it is constrained to. - */ - unsigned int max_constraint_indirections () const; - - /** - * Returns true in case the - * dof is constrained and there is a - * non-trivial inhomogeneous valeus set - * to the dof. - */ - bool is_inhomogeneously_constrained (const unsigned int index) const; - - /** - * Returns false if all - * constraints in the ConstraintMatrix - * are homogeneous ones, and - * true if there is at least - * one inhomogeneity. - */ - bool has_inhomogeneities () const; - - /** - * Returns a pointer to the the vector of - * entries if a line is constrained, and a - * zero pointer in case the dof is not - * constrained. - */ - const std::vector >* - get_constraint_entries (const unsigned int line) const; - - /** - * Returns the value of the inhomogeneity - * stored in the constrained dof @p - * line. Unconstrained dofs also return a - * zero value. - */ - double get_inhomogeneity (const unsigned int line) const; - - /** - * Print the constraint lines. Mainly - * for debugging purposes. - * - * This function writes out all entries - * in the constraint matrix lines with - * their value in the form row col - * : value. Unconstrained lines - * containing only one identity entry - * are not stored in this object and - * are not printed. - */ - void print (std::ostream &) const; - - /** - * Write the graph of constraints in - * 'dot' format. 'dot' is a program - * that can take a list of nodes and - * produce a graphical representation - * of the graph of constrained degrees - * of freedom and the degrees of - * freedom they are constrained to. - * - * The output of this function can be - * used as input to the 'dot' program - * that can convert the graph into a - * graphical representation in - * postscript, png, xfig, and a number - * of other formats. - * - * This function exists mostly for - * debugging purposes. - */ - void write_dot (std::ostream &) const; - - /** - * Determine an estimate for the memory - * consumption (in bytes) of this - * object. - */ + public: + /** + * An enum that describes what should + * happen if the two ConstraintMatrix + * objects involved in a call to the + * merge() function happen to have + * constraints on the same degrees of + * freedom. + */ + enum MergeConflictBehavior + { + /** + * Throw an exception if the two + * objects concerned have + * conflicting constraints on the + * same degree of freedom. + */ + no_conflicts_allowed, + + /** + * In an operation + * cm1.merge(cm2), if + * cm1 and + * cm2 have + * constraints on the same degree + * of freedom, take the one from + * cm1. + */ + left_object_wins, + + /** + * In an operation + * cm1.merge(cm2), if + * cm1 and + * cm2 have + * constraints on the same degree + * of freedom, take the one from + * cm2. + */ + right_object_wins + }; + + /** + * Constructor. The supplied IndexSet + * defines which indices might be + * constrained inside this + * ConstraintMatrix. In a calculation + * with a + * parallel::distributed::DoFHandler one + * should use locally_relevant_dofs. The + * IndexSet allows the ConstraintMatrix + * to safe memory. Otherwise internal + * data structures for all possible + * indices will be created. + */ + ConstraintMatrix (const IndexSet &local_constraints = IndexSet()); + + /** + * Copy constructor + */ + ConstraintMatrix (const ConstraintMatrix &constraint_matrix); + + /** + * Reinit the ConstraintMatrix object and + * supply an IndexSet with lines that may + * be constrained. This function is only + * relevant in the distributed case to + * supply a different IndexSet. Otherwise + * this routine is equivalent to calling + * clear(). See the constructor for + * details. + */ + void reinit (const IndexSet &local_constraints = IndexSet()); + + /** + * Determines if we can store a + * constraint for the given @p + * line_index. This routine only matters + * in the distributed case and checks if + * the IndexSet allows storage of this + * line. Always returns true if not in + * the distributed case. + */ + bool can_store_line (const unsigned int line_index) const; + + /** + * This function copies the content of @p + * constraints_in with DoFs that are + * element of the IndexSet @p + * filter. Elements that are not present + * in the IndexSet are ignored. All DoFs + * will be transformed to local index + * space of the filter, both the + * constrained DoFs and the other DoFs + * these entries are constrained to. The + * local index space of the filter is a + * contiguous numbering of all (global) + * DoFs that are elements in the + * filter. + * + * If, for example, the filter represents + * the range [10,20), and the + * constraint matrix @p constraints_in + * includes the global indices + * {7,13,14}, the indices + * {3,4} are added to the + * calling constraint matrix (since 13 + * and 14 are elements in the filter and + * element 13 is the fourth element in + * the index, and 14 is the fifth). + * + * This function provides an easy way to + * create a ConstraintMatrix for certain + * vector components in a vector-valued + * problem from a full ConstraintMatrix, + * i.e. extracting a diagonal subblock + * from a larger ConstraintMatrix. The + * block is specified by the IndexSet + * argument. + */ + void add_selected_constraints (const ConstraintMatrix &constraints_in, + const IndexSet &filter); + + /** + * @name Adding constraints + * @{ + */ + + /** + * Add a new line to the matrix. If the + * line already exists, then the function + * simply returns without doing anything. + */ + void add_line (const unsigned int line); + + /** + * Call the first add_line() function for + * every index i for which + * lines[i] is true. + * + * This function essentially exists to + * allow adding several constraints of + * the form xi=0 all at once, where + * the set of indices i for which these + * constraints should be added are given + * by the argument of this function. On + * the other hand, just as if the + * single-argument add_line() function + * were called repeatedly, the + * constraints can later be modified to + * include linear dependencies using the + * add_entry() function as well as + * inhomogeneities using + * set_inhomogeneity(). + */ + void add_lines (const std::vector &lines); + + /** + * Call the first add_line() function for + * every index i that + * appears in the argument. + * + * This function essentially exists to + * allow adding several constraints of + * the form xi=0 all at once, where + * the set of indices i for which these + * constraints should be added are given + * by the argument of this function. On + * the other hand, just as if the + * single-argument add_line() function + * were called repeatedly, the + * constraints can later be modified to + * include linear dependencies using the + * add_entry() function as well as + * inhomogeneities using + * set_inhomogeneity(). + */ + void add_lines (const std::set &lines); + + /** + * Call the first add_line() function for + * every index i that + * appears in the argument. + * + * This function essentially exists to + * allow adding several constraints of + * the form xi=0 all at once, where + * the set of indices i for which these + * constraints should be added are given + * by the argument of this function. On + * the other hand, just as if the + * single-argument add_line() function + * were called repeatedly, the + * constraints can later be modified to + * include linear dependencies using the + * add_entry() function as well as + * inhomogeneities using + * set_inhomogeneity(). + */ + void add_lines (const IndexSet &lines); + + /** + * Add an entry to a given + * line. The list of lines is + * searched from the back to the + * front, so clever programming + * would add a new line (which is + * pushed to the back) and + * immediately afterwards fill + * the entries of that line. This + * way, no expensive searching is + * needed. + * + * If an entry with the same + * indices as the one this + * function call denotes already + * exists, then this function + * simply returns provided that + * the value of the entry is the + * same. Thus, it does no harm to + * enter a constraint twice. + */ + void add_entry (const unsigned int line, + const unsigned int column, + const double value); + + /** + * Add a whole series of entries, + * denoted by pairs of column indices + * and values, to a line of + * constraints. This function is + * equivalent to calling the preceding + * function several times, but is + * faster. + */ + void add_entries (const unsigned int line, + const std::vector > &col_val_pairs); + + /** + * Set an imhomogeneity to the + * constraint line i, according + * to the discussion in the general + * class description. + * + * @note the line needs to be added with + * one of the add_line() calls first. + */ + void set_inhomogeneity (const unsigned int line, + const double value); + + /** + * Close the filling of entries. Since + * the lines of a matrix of this type + * are usually filled in an arbitrary + * order and since we do not want to + * use associative constainers to store + * the lines, we need to sort the lines + * and within the lines the columns + * before usage of the matrix. This is + * done through this function. + * + * Also, zero entries are discarded, + * since they are not needed. + * + * After closing, no more entries are + * accepted. If the object was already + * closed, then this function returns + * immediately. + * + * This function also resolves chains + * of constraints. For example, degree + * of freedom 13 may be constrained to + * u13=u3/2+u7/2 while degree of + * freedom 7 is itself constrained as + * u7=u2/2+u4/2. Then, the + * resolution will be that + * u13=u3/2+u2/4+u4/4. Note, + * however, that cycles in this graph + * of constraints are not allowed, + * i.e. for example u4 may not be + * constrained, directly or indirectly, + * to u13 again. + */ + void close (); + + /** + * Merge the constraints represented by + * the object given as argument into + * the constraints represented by this + * object. Both objects may or may not + * be closed (by having their function + * close() called before). If this + * object was closed before, then it + * will be closed afterwards as + * well. Note, however, that if the + * other argument is closed, then + * merging may be significantly faster. + * + * Using the default value of the second + * arguments, the constraints in each of + * the two objects (the old one + * represented by this object and the + * argument) may not refer to the same + * degree of freedom, i.e. a degree of + * freedom that is constrained in one + * object may not be constrained in the + * second. If this is nevertheless the + * case, an exception is thrown. However, + * this behavior can be changed by + * providing a different value for the + * second argument. + */ + void merge (const ConstraintMatrix &other_constraints, + const MergeConflictBehavior merge_conflict_behavior = no_conflicts_allowed); + + /** + * Shift all entries of this matrix + * down @p offset rows and over @p + * offset columns. + * + * This function is useful if you are + * building block matrices, where all + * blocks are built by the same + * DoFHandler object, i.e. the matrix + * size is larger than the number of + * degrees of freedom. Since several + * matrix rows and columns correspond + * to the same degrees of freedom, + * you'd generate several constraint + * objects, then shift them, and + * finally merge() them together + * again. + */ + void shift (const unsigned int offset); + + /** + * Clear all entries of this + * matrix. Reset the flag determining + * whether new entries are accepted or + * not. + * + * This function may be called also on + * objects which are empty or already + * cleared. + */ + void clear (); + + /** + * @} + */ + + + /** + * @name Querying constraints + * @{ + */ + + /** + * Return number of constraints stored in + * this matrix. + */ + unsigned int n_constraints () const; + + /** + * Return whether the degree of freedom + * with number @p index is a + * constrained one. + * + * Note that if close() was called + * before, then this function is + * significantly faster, since then the + * constrained degrees of freedom are + * sorted and we can do a binary + * search, while before close() was + * called, we have to perform a linear + * search through all entries. + */ + bool is_constrained (const unsigned int index) const; + + /** + * Return whether the dof is + * constrained, and whether it is + * constrained to only one other degree + * of freedom with weight one. The + * function therefore returns whether + * the degree of freedom would simply + * be eliminated in favor of exactly + * one other degree of freedom. + * + * The function returns @p false if + * either the degree of freedom is not + * constrained at all, or if it is + * constrained to more than one other + * degree of freedom, or if it is + * constrained to only one degree of + * freedom but with a weight different + * from one. + */ + bool is_identity_constrained (const unsigned int index) const; + + /** + * Return the maximum number of other + * dofs that one dof is constrained + * to. For example, in 2d a hanging + * node is constrained only to its two + * neighbors, so the returned value + * would be 2. However, for higher + * order elements and/or higher + * dimensions, or other types of + * constraints, this number is no more + * obvious. + * + * The name indicates that within the + * system matrix, references to a + * constrained node are indirected to + * the nodes it is constrained to. + */ + unsigned int max_constraint_indirections () const; + + /** + * Returns true in case the + * dof is constrained and there is a + * non-trivial inhomogeneous valeus set + * to the dof. + */ + bool is_inhomogeneously_constrained (const unsigned int index) const; + + /** + * Returns false if all + * constraints in the ConstraintMatrix + * are homogeneous ones, and + * true if there is at least + * one inhomogeneity. + */ + bool has_inhomogeneities () const; + + /** + * Returns a pointer to the the vector of + * entries if a line is constrained, and a + * zero pointer in case the dof is not + * constrained. + */ + const std::vector > * + get_constraint_entries (const unsigned int line) const; + + /** + * Returns the value of the inhomogeneity + * stored in the constrained dof @p + * line. Unconstrained dofs also return a + * zero value. + */ + double get_inhomogeneity (const unsigned int line) const; + + /** + * Print the constraint lines. Mainly + * for debugging purposes. + * + * This function writes out all entries + * in the constraint matrix lines with + * their value in the form row col + * : value. Unconstrained lines + * containing only one identity entry + * are not stored in this object and + * are not printed. + */ + void print (std::ostream &) const; + + /** + * Write the graph of constraints in + * 'dot' format. 'dot' is a program + * that can take a list of nodes and + * produce a graphical representation + * of the graph of constrained degrees + * of freedom and the degrees of + * freedom they are constrained to. + * + * The output of this function can be + * used as input to the 'dot' program + * that can convert the graph into a + * graphical representation in + * postscript, png, xfig, and a number + * of other formats. + * + * This function exists mostly for + * debugging purposes. + */ + void write_dot (std::ostream &) const; + + /** + * Determine an estimate for the memory + * consumption (in bytes) of this + * object. + */ + std::size_t memory_consumption () const; + + /** + * @} + */ + + /** + * @name Eliminating constraints from linear systems after their creation + * @{ + */ + + /** + * Condense a given sparsity + * pattern. This function assumes the + * uncondensed matrix struct to be + * compressed and the one to be filled + * to be empty. The condensed structure + * is compressed afterwards. + * + * The constraint matrix object must be + * closed to call this function. + * + * @note The hanging nodes are + * completely eliminated from the + * linear system referring to + * condensed. Therefore, the + * dimension of condensed is + * the dimension of + * uncondensed minus the + * number of constrained degrees of + * freedom. + */ + void condense (const SparsityPattern &uncondensed, + SparsityPattern &condensed) const; + + + /** + * This function does much the same as + * the above one, except that it + * condenses the matrix struct + * 'in-place'. It does not remove + * nonzero entries from the matrix but + * adds those needed for the process of + * distribution of the constrained + * degrees of freedom. + * + * Since this function adds new nonzero + * entries to the sparsity pattern, the + * argument must not be + * compressed. However the constraint + * matrix must be closed. The matrix + * struct is compressed at the end of + * the function. + */ + void condense (SparsityPattern &sparsity) const; + + /** + * Same function as above, but + * condenses square block sparsity + * patterns. + */ + void condense (BlockSparsityPattern &sparsity) const; + + /** + * Same function as above, but + * condenses square compressed sparsity + * patterns. + * + * Given the data structure used by + * CompressedSparsityPattern, this + * function becomes quadratic in the + * number of degrees of freedom for + * large problems and can dominate + * setting up linear systems when + * several hundred thousand or millions + * of unknowns are involved and for + * problems with many nonzero elements + * per row (for example for + * vector-valued problems or hp finite + * elements). In this case, it is + * advisable to use the + * CompressedSetSparsityPattern class + * instead, see for example @ref + * step_27 "step-27", or to use the + * CompressedSimpleSparsityPattern + * class, see for example @ref step_31 + * "step-31". + */ + void condense (CompressedSparsityPattern &sparsity) const; + + /** + * Same function as above, but + * condenses compressed sparsity + * patterns, which are based on the + * std::set container. + */ + void condense (CompressedSetSparsityPattern &sparsity) const; + + /** + * Same function as above, but + * condenses compressed sparsity + * patterns, which are based on the + * ''simple'' aproach. + */ + void condense (CompressedSimpleSparsityPattern &sparsity) const; + + /** + * Same function as above, but + * condenses square compressed sparsity + * patterns. + * + * Given the data structure used by + * BlockCompressedSparsityPattern, this + * function becomes quadratic in the + * number of degrees of freedom for + * large problems and can dominate + * setting up linear systems when + * several hundred thousand or millions + * of unknowns are involved and for + * problems with many nonzero elements + * per row (for example for + * vector-valued problems or hp finite + * elements). In this case, it is + * advisable to use the + * BlockCompressedSetSparsityPattern + * class instead, see for example @ref + * step_27 "step-27" and @ref step_31 + * "step-31". + */ + void condense (BlockCompressedSparsityPattern &sparsity) const; + + /** + * Same function as above, but + * condenses square compressed sparsity + * patterns. + */ + void condense (BlockCompressedSetSparsityPattern &sparsity) const; + + /** + * Same function as above, but + * condenses square compressed sparsity + * patterns. + */ + void condense (BlockCompressedSimpleSparsityPattern &sparsity) const; + + + /** + * Condense a given matrix. The + * associated matrix struct should be + * condensed and compressed. It is the + * user's responsibility to guarantee + * that all entries in the @p condensed + * matrix be zero! + * + * The constraint matrix object must be + * closed to call this function. + */ + template + void condense (const SparseMatrix &uncondensed, + SparseMatrix &condensed) const; + + /** + * This function does much the same as + * the above one, except that it + * condenses the matrix 'in-place'. See + * the general documentation of this + * class for more detailed information. + */ + template + void condense (SparseMatrix &matrix) const; + + /** + * Same function as above, but + * condenses square block sparse + * matrices. + */ + template + void condense (BlockSparseMatrix &matrix) const; + + /** + * Condense the given vector @p + * uncondensed into @p condensed. It is + * the user's responsibility to + * guarantee that all entries of @p + * condensed be zero. Note that this + * function does not take any + * inhomogeneity into account and + * throws an exception in case there + * are any inhomogeneities. Use + * the function using both a matrix and + * vector for that case. + * + * The @p VectorType may be a + * Vector, Vector, + * BlockVector<...>, a PETSc + * or Trilinos vector wrapper class, or + * any other type having the same + * interface. + */ + template + void condense (const VectorType &uncondensed, + VectorType &condensed) const; + + /** + * Condense the given vector + * in-place. The @p VectorType may be a + * Vector, Vector, + * BlockVector<...>, a PETSc + * or Trilinos vector wrapper class, or + * any other type having the same + * interface. Note that this function + * does not take any inhomogeneity into + * account and throws an exception in + * case there are any + * inhomogeneities. Use the function + * using both a matrix and vector for + * that case. + */ + template + void condense (VectorType &vec) const; + + /** + * Condense a given matrix and a given + * vector. The associated matrix struct + * should be condensed and + * compressed. It is the user's + * responsibility to guarantee that all + * entries in the @p condensed matrix + * and vector be zero! This function is + * the appropriate choice for applying + * inhomogeneous constraints. + * + * The constraint matrix object must be + * closed to call this function. + */ + template + void condense (const SparseMatrix &uncondensed_matrix, + const VectorType &uncondensed_vector, + SparseMatrix &condensed_matrix, + VectorType &condensed_vector) const; + + /** + * This function does much the same as + * the above one, except that it + * condenses matrix and vector + * 'in-place'. See the general + * documentation of this class for more + * detailed information. + */ + template + void condense (SparseMatrix &matrix, + VectorType &vector) const; + + /** + * Same function as above, but + * condenses square block sparse + * matrices and vectors. + */ + template + void condense (BlockSparseMatrix &matrix, + BlockVectorType &vector) const; + + /** + * Sets the values of all constrained + * DoFs in a vector to zero. + * The @p VectorType may be a + * Vector, Vector, + * BlockVector<...>, a + * PETSc or Trilinos vector + * wrapper class, or any other + * type having the same + * interface. + */ + template + void set_zero (VectorType &vec) const; + + /** + * @} + */ + + /** + * @name Eliminating constraints from linear systems during their creation + * @{ + */ + + /** + * This function takes a vector of + * local contributions (@p + * local_vector) corresponding to the + * degrees of freedom indices given in + * @p local_dof_indices and distributes + * them to the global vector. In most + * cases, these local contributions + * will be the result of an integration + * over a cell or face of a + * cell. However, as long as @p + * local_vector and @p + * local_dof_indices have the same + * number of elements, this function is + * happy with whatever it is + * given. + * + * In contrast to the similar function + * in the DoFAccessor class, this + * function also takes care of + * constraints, i.e. if one of the + * elements of @p local_dof_indices + * belongs to a constrained node, then + * rather than writing the + * corresponding element of @p + * local_vector into @p global_vector, + * the element is distributed to the + * entries in the global vector to + * which this particular degree of + * freedom is constrained. + * + * Thus, by using this function to + * distribute local contributions to the + * global object, one saves the call to + * the condense function after the + * vectors and matrices are fully + * assembled. On the other hand, by + * consequence, the function does not + * only write into the entries enumerated + * by the @p local_dof_indices array, but + * also (possibly) others as necessary. + * + * Note that this function will apply all + * constraints as if they were + * homogeneous. For correctly setting + * inhomogeneous constraints, use the + * similar function with a matrix + * argument or the function with both + * matrix and vector arguments. + * + * @note This function is not + * thread-safe, so you will need to make + * sure that only one process at a time + * calls this function. + */ + template + void + distribute_local_to_global (const InVector &local_vector, + const std::vector &local_dof_indices, + OutVector &global_vector) const; + + /** + * This function takes a vector of + * local contributions (@p + * local_vector) corresponding to the + * degrees of freedom indices given in + * @p local_dof_indices and distributes + * them to the global vector. In most + * cases, these local contributions + * will be the result of an integration + * over a cell or face of a + * cell. However, as long as @p + * local_vector and @p + * local_dof_indices have the same + * number of elements, this function is + * happy with whatever it is + * given. + * + * In contrast to the similar function in + * the DoFAccessor class, this function + * also takes care of constraints, + * i.e. if one of the elements of @p + * local_dof_indices belongs to a + * constrained node, then rather than + * writing the corresponding element of + * @p local_vector into @p global_vector, + * the element is distributed to the + * entries in the global vector to which + * this particular degree of freedom is + * constrained. + * + * Thus, by using this function to + * distribute local contributions to the + * global object, one saves the call to + * the condense function after the + * vectors and matrices are fully + * assembled. On the other hand, by + * consequence, the function does not + * only write into the entries enumerated + * by the @p local_dof_indices array, but + * also (possibly) others as + * necessary. This includes writing into + * diagonal elements of the matrix if the + * corresponding degree of freedom is + * constrained. + * + * The fourth argument + * local_matrix is intended to + * be used in case one wants to apply + * inhomogeneous constraints on the + * vector only. Such a situation could be + * where one wants to assemble of a right + * hand side vector on a problem with + * inhomogeneous constraints, but the + * global matrix has been assembled + * previously. A typical example of this + * is a time stepping algorithm where the + * stiffness matrix is assembled once, + * and the right hand side updated every + * time step. Note that, however, the + * entries in the columns of the local + * matrix have to be exactly the same as + * those that have been written into the + * global matrix. Otherwise, this + * function will not be able to correctly + * handle inhomogeneities. + * + * @note This function is not + * thread-safe, so you will need to make + * sure that only one process at a time + * calls this function. + */ + template + void + distribute_local_to_global (const Vector &local_vector, + const std::vector &local_dof_indices, + VectorType &global_vector, + const FullMatrix &local_matrix) const; + + /** + * Enter a single value into a + * result vector, obeying constraints. + */ + template + void + distribute_local_to_global (const unsigned int index, + const double value, + VectorType &global_vector) const; + + /** + * This function takes a pointer to a + * vector of local contributions (@p + * local_vector) corresponding to the + * degrees of freedom indices given in + * @p local_dof_indices and distributes + * them to the global vector. In most + * cases, these local contributions + * will be the result of an integration + * over a cell or face of a + * cell. However, as long as the + * entries in @p local_dof_indices + * indicate reasonable global vector + * entries, this function is happy with + * whatever it is given. + * + * If one of the elements of @p + * local_dof_indices belongs to a + * constrained node, then rather than + * writing the corresponding element of + * @p local_vector into @p + * global_vector, the element is + * distributed to the entries in the + * global vector to which this + * particular degree of freedom is + * constrained. + * + * Thus, by using this function to + * distribute local contributions to + * the global object, one saves the + * call to the condense function after + * the vectors and matrices are fully + * assembled. Note that this function + * completely ignores inhomogeneous + * constraints. + * + * @note This function is not + * thread-safe, so you will need to + * make sure that only one process at a + * time calls this function. + */ + template + void + distribute_local_to_global (ForwardIteratorVec local_vector_begin, + ForwardIteratorVec local_vector_end, + ForwardIteratorInd local_indices_begin, + VectorType &global_vector) const; + + /** + * This function takes a matrix of + * local contributions (@p + * local_matrix) corresponding to the + * degrees of freedom indices given in + * @p local_dof_indices and distributes + * them to the global matrix. In most + * cases, these local contributions + * will be the result of an integration + * over a cell or face of a + * cell. However, as long as @p + * local_matrix and @p + * local_dof_indices have the same + * number of elements, this function is + * happy with whatever it is given. + * + * In contrast to the similar function + * in the DoFAccessor class, this + * function also takes care of + * constraints, i.e. if one of the + * elements of @p local_dof_indices + * belongs to a constrained node, then + * rather than writing the + * corresponding element of @p + * local_matrix into @p global_matrix, + * the element is distributed to the + * entries in the global matrix to + * which this particular degree of + * freedom is constrained. + * + * With this scheme, we never write + * into rows or columns of constrained + * degrees of freedom. In order to make + * sure that the resulting matrix can + * still be inverted, we need to do + * something with the diagonal elements + * corresponding to constrained + * nodes. Thus, if a degree of freedom + * in @p local_dof_indices is + * constrained, we distribute the + * corresponding entries in the matrix, + * but also add the absolute value of + * the diagonal entry of the local + * matrix to the corresponding entry in + * the global matrix. Since the exact + * value of the diagonal element is not + * important (the value of the + * respective degree of freedom will be + * overwritten by the distribute() call + * later on anyway), this guarantees + * that the diagonal entry is always + * non-zero, positive, and of the same + * order of magnitude as the other + * entries of the matrix. + * + * Thus, by using this function to + * distribute local contributions to + * the global object, one saves the + * call to the condense function after + * the vectors and matrices are fully + * assembled. + * + * @note This function is not + * thread-safe, so you will need to + * make sure that only one process at a + * time calls this function. + */ + template + void + distribute_local_to_global (const FullMatrix &local_matrix, + const std::vector &local_dof_indices, + MatrixType &global_matrix) const; + + /** + * Does the same as the function + * above but can treat non + * quadratic matrices. + */ + template + void + distribute_local_to_global (const FullMatrix &local_matrix, + const std::vector &row_indices, + const std::vector &col_indices, + MatrixType &global_matrix) const; + + /** + * This function simultaneously + * writes elements into matrix + * and vector, according to the + * constraints specified by the + * calling ConstraintMatrix. This + * function can correctly handle + * inhomogeneous constraints as + * well. For the parameter + * use_inhomogeneities_for_rhs + * see the documentation in @ref + * constraints module. + * + * @note This function is not + * thread-safe, so you will need to + * make sure that only one process at a + * time calls this function. + */ + template + void + distribute_local_to_global (const FullMatrix &local_matrix, + const Vector &local_vector, + const std::vector &local_dof_indices, + MatrixType &global_matrix, + VectorType &global_vector, + bool use_inhomogeneities_for_rhs = false) const; + + /** + * Do a similar operation as the + * distribute_local_to_global() function + * that distributes writing entries into + * a matrix for constrained degrees of + * freedom, except that here we don't + * write into a matrix but only allocate + * sparsity pattern entries. + * + * As explained in the + * @ref hp_paper "hp paper" + * and in step-27, + * first allocating a sparsity pattern + * and later coming back and allocating + * additional entries for those matrix + * entries that will be written to due to + * the elimination of constrained degrees + * of freedom (using + * ConstraintMatrix::condense() ), can be + * a very expensive procedure. It is + * cheaper to allocate these entries + * right away without having to do a + * second pass over the sparsity pattern + * object. This function does exactly + * that. + * + * Because the function only allocates + * entries in a sparsity pattern, all it + * needs to know are the degrees of + * freedom that couple to each + * other. Unlike the previous function, + * no actual values are written, so the + * second input argument is not necessary + * here. + * + * The third argument to this function, + * keep_constrained_entries determines + * whether the function shall allocate + * entries in the sparsity pattern at + * all for entries that will later be + * set to zero upon condensation of the + * matrix. These entries are necessary + * if the matrix is built + * unconstrained, and only later + * condensed. They are not necessary if + * the matrix is built using the + * distribute_local_to_global() + * function of this class which + * distributes entries right away when + * copying a local matrix into a global + * object. The default of this argument + * is true, meaning to allocate the few + * entries that may later be set to + * zero. + * + * By default, the function adds + * entries for all pairs of indices + * given in the first argument to the + * sparsity pattern (unless + * keep_constrained_entries is + * false). However, sometimes one would + * like to only add a subset of all of + * these pairs. In that case, the last + * argument can be used which specifies + * a boolean mask which of the pairs of + * indices should be considered. If the + * mask is false for a pair of indices, + * then no entry will be added to the + * sparsity pattern for this pair, + * irrespective of whether one or both + * of the indices correspond to + * constrained degrees of freedom. + * + * This function is not typically called + * from user code, but is used in the + * DoFTools::make_sparsity_pattern() + * function when passed a constraint + * matrix object. + */ + template + void + add_entries_local_to_global (const std::vector &local_dof_indices, + SparsityType &sparsity_pattern, + const bool keep_constrained_entries = true, + const Table<2,bool> &dof_mask = default_empty_table) const; + + /** + * Similar to the other function, + * but for non-quadratic sparsity + * patterns. + */ + + template + void + add_entries_local_to_global (const std::vector &row_indices, + const std::vector &col_indices, + SparsityType &sparsity_pattern, + const bool keep_constrained_entries = true, + const Table<2,bool> &dof_mask = default_empty_table) const; + + /** + * This function imports values from a + * global vector (@p global_vector) by + * applying the constraints to a vector + * of local values, expressed in + * iterator format. In most cases, the + * local values will be identified by + * the local dof values on a + * cell. However, as long as the + * entries in @p local_dof_indices + * indicate reasonable global vector + * entries, this function is happy with + * whatever it is given. + * + * If one of the elements of @p + * local_dof_indices belongs to a + * constrained node, then rather than + * writing the corresponding element of + * @p global_vector into @p + * local_vector, the constraints are + * resolved as the respective + * distribute function does, i.e., the + * local entry is constructed from the + * global entries to which this + * particular degree of freedom is + * constrained. + * + * In contrast to the similar function + * get_dof_values in the DoFAccessor + * class, this function does not need + * the constrained values to be + * correctly set (i.e., distribute to + * be called). + */ + template + void - get_dof_values (const VectorType &global_vector, ++ get_dof_values (const VectorType &global_vector, + ForwardIteratorInd local_indices_begin, + ForwardIteratorVec local_vector_begin, + ForwardIteratorVec local_vector_end) const; + + /** + * @} + */ + + /** + * @name Dealing with constraints after solving a linear system + * @{ + */ + + /** + * Re-distribute the elements of the + * vector @p condensed to @p + * uncondensed. It is the user's + * responsibility to guarantee that all + * entries of @p uncondensed be zero! + * + * This function undoes the action of + * @p condense somehow, but it should + * be noted that it is not the inverse + * of @p condense. + * + * The @p VectorType may be a + * Vector, Vector, + * BlockVector<...>, a PETSc + * or Trilinos vector wrapper class, or + * any other type having the same + * interface. + */ + template + void distribute (const VectorType &condensed, + VectorType &uncondensed) const; + + /** + * Re-distribute the elements of the + * vector in-place. The @p VectorType + * may be a Vector, + * Vector, + * BlockVector<...>, a PETSc + * or Trilinos vector wrapper class, or + * any other type having the same + * interface. + * + * Note that if called with a + * TrilinosWrappers::MPI::Vector it may + * not contain ghost elements. + */ + template + void distribute (VectorType &vec) const; + + /** + * @} + */ + + /** + * Exception + * + * @ingroup Exceptions + */ + DeclException0 (ExcMatrixIsClosed); + /** + * Exception + * + * @ingroup Exceptions + */ + DeclException0 (ExcMatrixNotClosed); + /** + * Exception + * + * @ingroup Exceptions + */ + DeclException1 (ExcLineInexistant, + unsigned int, + << "The specified line " << arg1 + << " does not exist."); + /** + * Exception + * + * @ingroup Exceptions + */ + DeclException4 (ExcEntryAlreadyExists, + int, int, double, double, + << "The entry for the indices " << arg1 << " and " + << arg2 << " already exists, but the values " + << arg3 << " (old) and " << arg4 << " (new) differ " + << "by " << (arg4-arg3) << "."); + /** + * Exception + * + * @ingroup Exceptions + */ + DeclException2 (ExcDoFConstrainedToConstrainedDoF, + int, int, + << "You tried to constrain DoF " << arg1 + << " to DoF " << arg2 + << ", but that one is also constrained. This is not allowed!"); + /** + * Exception. + * + * @ingroup Exceptions + */ + DeclException1 (ExcDoFIsConstrainedFromBothObjects, + int, + << "Degree of freedom " << arg1 + << " is constrained from both object in a merge operation."); + /** + * Exception + * + * @ingroup Exceptions + */ + DeclException1 (ExcDoFIsConstrainedToConstrainedDoF, + int, + << "In the given argument a degree of freedom is constrained " + << "to another DoF with number " << arg1 + << ", which however is constrained by this object. This is not" + << " allowed."); + /** + * Exception + * + * @ingroup Exceptions + */ + DeclException1 (ExcRowNotStoredHere, + int, + << "The index set given to this constraint matrix indicates " + << "constraints for degree of freedom " << arg1 + << " should not be stored by this object, but a constraint " + << "is being added."); + + private: + + /** + * This class represents one line of a + * constraint matrix. + */ + struct ConstraintLine + { + /** + * A data type in which we store the list + * of entries that make up the homogenous + * part of a constraint. + */ + typedef std::vector > Entries; + + /** + * Number of this line. Since only + * very few lines are stored, we + * can not assume a specific order + * and have to store the line + * number explicitly. + */ + unsigned int line; + + /** + * Row numbers and values of the + * entries in this line. + * + * For the reason why we use a + * vector instead of a map and the + * consequences thereof, the same + * applies as what is said for + * ConstraintMatrix::lines. + */ + Entries entries; + + /** + * Value of the inhomogeneity. + */ + double inhomogeneity; + + /** + * This operator is a bit weird and + * unintuitive: it compares the + * line numbers of two lines. We + * need this to sort the lines; in + * fact we could do this using a + * comparison predicate. However, + * this way, it is easier, albeit + * unintuitive since two lines + * really have no god-given order + * relation. + */ + bool operator < (const ConstraintLine &) const; + + /** + * This operator is likewise weird: + * it checks whether the line + * indices of the two operands are + * equal, irrespective of the fact + * that the contents of the line + * may be different. + */ + bool operator == (const ConstraintLine &) const; + + /** + * Determine an estimate for the + * memory consumption (in bytes) of + * this object. + */ std::size_t memory_consumption () const; - - /** - * @} - */ - - /** - * @name Eliminating constraints from linear systems after their creation - * @{ - */ - - /** - * Condense a given sparsity - * pattern. This function assumes the - * uncondensed matrix struct to be - * compressed and the one to be filled - * to be empty. The condensed structure - * is compressed afterwards. - * - * The constraint matrix object must be - * closed to call this function. - * - * @note The hanging nodes are - * completely eliminated from the - * linear system referring to - * condensed. Therefore, the - * dimension of condensed is - * the dimension of - * uncondensed minus the - * number of constrained degrees of - * freedom. - */ - void condense (const SparsityPattern &uncondensed, - SparsityPattern &condensed) const; - - - /** - * This function does much the same as - * the above one, except that it - * condenses the matrix struct - * 'in-place'. It does not remove - * nonzero entries from the matrix but - * adds those needed for the process of - * distribution of the constrained - * degrees of freedom. - * - * Since this function adds new nonzero - * entries to the sparsity pattern, the - * argument must not be - * compressed. However the constraint - * matrix must be closed. The matrix - * struct is compressed at the end of - * the function. - */ - void condense (SparsityPattern &sparsity) const; - - /** - * Same function as above, but - * condenses square block sparsity - * patterns. - */ - void condense (BlockSparsityPattern &sparsity) const; - - /** - * Same function as above, but - * condenses square compressed sparsity - * patterns. - * - * Given the data structure used by - * CompressedSparsityPattern, this - * function becomes quadratic in the - * number of degrees of freedom for - * large problems and can dominate - * setting up linear systems when - * several hundred thousand or millions - * of unknowns are involved and for - * problems with many nonzero elements - * per row (for example for - * vector-valued problems or hp finite - * elements). In this case, it is - * advisable to use the - * CompressedSetSparsityPattern class - * instead, see for example @ref - * step_27 "step-27", or to use the - * CompressedSimpleSparsityPattern - * class, see for example @ref step_31 - * "step-31". - */ - void condense (CompressedSparsityPattern &sparsity) const; - - /** - * Same function as above, but - * condenses compressed sparsity - * patterns, which are based on the - * std::set container. - */ - void condense (CompressedSetSparsityPattern &sparsity) const; - - /** - * Same function as above, but - * condenses compressed sparsity - * patterns, which are based on the - * ''simple'' aproach. - */ - void condense (CompressedSimpleSparsityPattern &sparsity) const; - - /** - * Same function as above, but - * condenses square compressed sparsity - * patterns. - * - * Given the data structure used by - * BlockCompressedSparsityPattern, this - * function becomes quadratic in the - * number of degrees of freedom for - * large problems and can dominate - * setting up linear systems when - * several hundred thousand or millions - * of unknowns are involved and for - * problems with many nonzero elements - * per row (for example for - * vector-valued problems or hp finite - * elements). In this case, it is - * advisable to use the - * BlockCompressedSetSparsityPattern - * class instead, see for example @ref - * step_27 "step-27" and @ref step_31 - * "step-31". - */ - void condense (BlockCompressedSparsityPattern &sparsity) const; - - /** - * Same function as above, but - * condenses square compressed sparsity - * patterns. - */ - void condense (BlockCompressedSetSparsityPattern &sparsity) const; - - /** - * Same function as above, but - * condenses square compressed sparsity - * patterns. - */ - void condense (BlockCompressedSimpleSparsityPattern &sparsity) const; - - - /** - * Condense a given matrix. The - * associated matrix struct should be - * condensed and compressed. It is the - * user's responsibility to guarantee - * that all entries in the @p condensed - * matrix be zero! - * - * The constraint matrix object must be - * closed to call this function. - */ - template - void condense (const SparseMatrix &uncondensed, - SparseMatrix &condensed) const; - - /** - * This function does much the same as - * the above one, except that it - * condenses the matrix 'in-place'. See - * the general documentation of this - * class for more detailed information. - */ - template - void condense (SparseMatrix &matrix) const; - - /** - * Same function as above, but - * condenses square block sparse - * matrices. - */ - template - void condense (BlockSparseMatrix &matrix) const; - - /** - * Condense the given vector @p - * uncondensed into @p condensed. It is - * the user's responsibility to - * guarantee that all entries of @p - * condensed be zero. Note that this - * function does not take any - * inhomogeneity into account and - * throws an exception in case there - * are any inhomogeneities. Use - * the function using both a matrix and - * vector for that case. - * - * The @p VectorType may be a - * Vector, Vector, - * BlockVector<...>, a PETSc - * or Trilinos vector wrapper class, or - * any other type having the same - * interface. - */ - template - void condense (const VectorType &uncondensed, - VectorType &condensed) const; - - /** - * Condense the given vector - * in-place. The @p VectorType may be a - * Vector, Vector, - * BlockVector<...>, a PETSc - * or Trilinos vector wrapper class, or - * any other type having the same - * interface. Note that this function - * does not take any inhomogeneity into - * account and throws an exception in - * case there are any - * inhomogeneities. Use the function - * using both a matrix and vector for - * that case. - */ - template - void condense (VectorType &vec) const; - - /** - * Condense a given matrix and a given - * vector. The associated matrix struct - * should be condensed and - * compressed. It is the user's - * responsibility to guarantee that all - * entries in the @p condensed matrix - * and vector be zero! This function is - * the appropriate choice for applying - * inhomogeneous constraints. - * - * The constraint matrix object must be - * closed to call this function. - */ - template - void condense (const SparseMatrix &uncondensed_matrix, - const VectorType &uncondensed_vector, - SparseMatrix &condensed_matrix, - VectorType &condensed_vector) const; - - /** - * This function does much the same as - * the above one, except that it - * condenses matrix and vector - * 'in-place'. See the general - * documentation of this class for more - * detailed information. - */ - template - void condense (SparseMatrix &matrix, - VectorType &vector) const; - - /** - * Same function as above, but - * condenses square block sparse - * matrices and vectors. - */ - template - void condense (BlockSparseMatrix &matrix, - BlockVectorType &vector) const; - - /** - * Sets the values of all constrained - * DoFs in a vector to zero. - * The @p VectorType may be a - * Vector, Vector, - * BlockVector<...>, a - * PETSc or Trilinos vector - * wrapper class, or any other - * type having the same - * interface. - */ - template - void set_zero (VectorType &vec) const; - - /** - * @} - */ - - /** - * @name Eliminating constraints from linear systems during their creation - * @{ - */ - - /** - * This function takes a vector of - * local contributions (@p - * local_vector) corresponding to the - * degrees of freedom indices given in - * @p local_dof_indices and distributes - * them to the global vector. In most - * cases, these local contributions - * will be the result of an integration - * over a cell or face of a - * cell. However, as long as @p - * local_vector and @p - * local_dof_indices have the same - * number of elements, this function is - * happy with whatever it is - * given. - * - * In contrast to the similar function - * in the DoFAccessor class, this - * function also takes care of - * constraints, i.e. if one of the - * elements of @p local_dof_indices - * belongs to a constrained node, then - * rather than writing the - * corresponding element of @p - * local_vector into @p global_vector, - * the element is distributed to the - * entries in the global vector to - * which this particular degree of - * freedom is constrained. - * - * Thus, by using this function to - * distribute local contributions to the - * global object, one saves the call to - * the condense function after the - * vectors and matrices are fully - * assembled. On the other hand, by - * consequence, the function does not - * only write into the entries enumerated - * by the @p local_dof_indices array, but - * also (possibly) others as necessary. - * - * Note that this function will apply all - * constraints as if they were - * homogeneous. For correctly setting - * inhomogeneous constraints, use the - * similar function with a matrix - * argument or the function with both - * matrix and vector arguments. - * - * @note This function is not - * thread-safe, so you will need to make - * sure that only one process at a time - * calls this function. - */ - template - void - distribute_local_to_global (const InVector &local_vector, - const std::vector &local_dof_indices, - OutVector &global_vector) const; - - /** - * This function takes a vector of - * local contributions (@p - * local_vector) corresponding to the - * degrees of freedom indices given in - * @p local_dof_indices and distributes - * them to the global vector. In most - * cases, these local contributions - * will be the result of an integration - * over a cell or face of a - * cell. However, as long as @p - * local_vector and @p - * local_dof_indices have the same - * number of elements, this function is - * happy with whatever it is - * given. - * - * In contrast to the similar function in - * the DoFAccessor class, this function - * also takes care of constraints, - * i.e. if one of the elements of @p - * local_dof_indices belongs to a - * constrained node, then rather than - * writing the corresponding element of - * @p local_vector into @p global_vector, - * the element is distributed to the - * entries in the global vector to which - * this particular degree of freedom is - * constrained. - * - * Thus, by using this function to - * distribute local contributions to the - * global object, one saves the call to - * the condense function after the - * vectors and matrices are fully - * assembled. On the other hand, by - * consequence, the function does not - * only write into the entries enumerated - * by the @p local_dof_indices array, but - * also (possibly) others as - * necessary. This includes writing into - * diagonal elements of the matrix if the - * corresponding degree of freedom is - * constrained. - * - * The fourth argument - * local_matrix is intended to - * be used in case one wants to apply - * inhomogeneous constraints on the - * vector only. Such a situation could be - * where one wants to assemble of a right - * hand side vector on a problem with - * inhomogeneous constraints, but the - * global matrix has been assembled - * previously. A typical example of this - * is a time stepping algorithm where the - * stiffness matrix is assembled once, - * and the right hand side updated every - * time step. Note that, however, the - * entries in the columns of the local - * matrix have to be exactly the same as - * those that have been written into the - * global matrix. Otherwise, this - * function will not be able to correctly - * handle inhomogeneities. - * - * @note This function is not - * thread-safe, so you will need to make - * sure that only one process at a time - * calls this function. - */ - template - void - distribute_local_to_global (const Vector &local_vector, - const std::vector &local_dof_indices, - VectorType &global_vector, - const FullMatrix &local_matrix) const; - - /** - * Enter a single value into a - * result vector, obeying constraints. - */ - template - void - distribute_local_to_global (const unsigned int index, - const double value, - VectorType &global_vector) const; - - /** - * This function takes a pointer to a - * vector of local contributions (@p - * local_vector) corresponding to the - * degrees of freedom indices given in - * @p local_dof_indices and distributes - * them to the global vector. In most - * cases, these local contributions - * will be the result of an integration - * over a cell or face of a - * cell. However, as long as the - * entries in @p local_dof_indices - * indicate reasonable global vector - * entries, this function is happy with - * whatever it is given. - * - * If one of the elements of @p - * local_dof_indices belongs to a - * constrained node, then rather than - * writing the corresponding element of - * @p local_vector into @p - * global_vector, the element is - * distributed to the entries in the - * global vector to which this - * particular degree of freedom is - * constrained. - * - * Thus, by using this function to - * distribute local contributions to - * the global object, one saves the - * call to the condense function after - * the vectors and matrices are fully - * assembled. Note that this function - * completely ignores inhomogeneous - * constraints. - * - * @note This function is not - * thread-safe, so you will need to - * make sure that only one process at a - * time calls this function. - */ - template - void - distribute_local_to_global (ForwardIteratorVec local_vector_begin, - ForwardIteratorVec local_vector_end, - ForwardIteratorInd local_indices_begin, - VectorType &global_vector) const; - - /** - * This function takes a matrix of - * local contributions (@p - * local_matrix) corresponding to the - * degrees of freedom indices given in - * @p local_dof_indices and distributes - * them to the global matrix. In most - * cases, these local contributions - * will be the result of an integration - * over a cell or face of a - * cell. However, as long as @p - * local_matrix and @p - * local_dof_indices have the same - * number of elements, this function is - * happy with whatever it is given. - * - * In contrast to the similar function - * in the DoFAccessor class, this - * function also takes care of - * constraints, i.e. if one of the - * elements of @p local_dof_indices - * belongs to a constrained node, then - * rather than writing the - * corresponding element of @p - * local_matrix into @p global_matrix, - * the element is distributed to the - * entries in the global matrix to - * which this particular degree of - * freedom is constrained. - * - * With this scheme, we never write - * into rows or columns of constrained - * degrees of freedom. In order to make - * sure that the resulting matrix can - * still be inverted, we need to do - * something with the diagonal elements - * corresponding to constrained - * nodes. Thus, if a degree of freedom - * in @p local_dof_indices is - * constrained, we distribute the - * corresponding entries in the matrix, - * but also add the absolute value of - * the diagonal entry of the local - * matrix to the corresponding entry in - * the global matrix. Since the exact - * value of the diagonal element is not - * important (the value of the - * respective degree of freedom will be - * overwritten by the distribute() call - * later on anyway), this guarantees - * that the diagonal entry is always - * non-zero, positive, and of the same - * order of magnitude as the other - * entries of the matrix. - * - * Thus, by using this function to - * distribute local contributions to - * the global object, one saves the - * call to the condense function after - * the vectors and matrices are fully - * assembled. - * - * @note This function is not - * thread-safe, so you will need to - * make sure that only one process at a - * time calls this function. - */ - template - void - distribute_local_to_global (const FullMatrix &local_matrix, - const std::vector &local_dof_indices, - MatrixType &global_matrix) const; - - /** - * Does the same as the function - * above but can treat non - * quadratic matrices. - */ - template - void - distribute_local_to_global (const FullMatrix &local_matrix, - const std::vector &row_indices, - const std::vector &col_indices, - MatrixType &global_matrix) const; - - /** - * This function simultaneously - * writes elements into matrix - * and vector, according to the - * constraints specified by the - * calling ConstraintMatrix. This - * function can correctly handle - * inhomogeneous constraints as - * well. For the parameter - * use_inhomogeneities_for_rhs - * see the documentation in @ref - * constraints module. - * - * @note This function is not - * thread-safe, so you will need to - * make sure that only one process at a - * time calls this function. - */ - template - void - distribute_local_to_global (const FullMatrix &local_matrix, - const Vector &local_vector, - const std::vector &local_dof_indices, - MatrixType &global_matrix, - VectorType &global_vector, - bool use_inhomogeneities_for_rhs = false) const; - - /** - * Do a similar operation as the - * distribute_local_to_global() function - * that distributes writing entries into - * a matrix for constrained degrees of - * freedom, except that here we don't - * write into a matrix but only allocate - * sparsity pattern entries. - * - * As explained in the - * @ref hp_paper "hp paper" - * and in step-27, - * first allocating a sparsity pattern - * and later coming back and allocating - * additional entries for those matrix - * entries that will be written to due to - * the elimination of constrained degrees - * of freedom (using - * ConstraintMatrix::condense() ), can be - * a very expensive procedure. It is - * cheaper to allocate these entries - * right away without having to do a - * second pass over the sparsity pattern - * object. This function does exactly - * that. - * - * Because the function only allocates - * entries in a sparsity pattern, all it - * needs to know are the degrees of - * freedom that couple to each - * other. Unlike the previous function, - * no actual values are written, so the - * second input argument is not necessary - * here. - * - * The third argument to this function, - * keep_constrained_entries determines - * whether the function shall allocate - * entries in the sparsity pattern at - * all for entries that will later be - * set to zero upon condensation of the - * matrix. These entries are necessary - * if the matrix is built - * unconstrained, and only later - * condensed. They are not necessary if - * the matrix is built using the - * distribute_local_to_global() - * function of this class which - * distributes entries right away when - * copying a local matrix into a global - * object. The default of this argument - * is true, meaning to allocate the few - * entries that may later be set to - * zero. - * - * By default, the function adds - * entries for all pairs of indices - * given in the first argument to the - * sparsity pattern (unless - * keep_constrained_entries is - * false). However, sometimes one would - * like to only add a subset of all of - * these pairs. In that case, the last - * argument can be used which specifies - * a boolean mask which of the pairs of - * indices should be considered. If the - * mask is false for a pair of indices, - * then no entry will be added to the - * sparsity pattern for this pair, - * irrespective of whether one or both - * of the indices correspond to - * constrained degrees of freedom. - * - * This function is not typically called - * from user code, but is used in the - * DoFTools::make_sparsity_pattern() - * function when passed a constraint - * matrix object. - */ - template - void - add_entries_local_to_global (const std::vector &local_dof_indices, - SparsityType &sparsity_pattern, - const bool keep_constrained_entries = true, - const Table<2,bool> &dof_mask = default_empty_table) const; - - /** - * Similar to the other function, - * but for non-quadratic sparsity - * patterns. - */ - - template - void - add_entries_local_to_global (const std::vector &row_indices, - const std::vector &col_indices, - SparsityType &sparsity_pattern, - const bool keep_constrained_entries = true, - const Table<2,bool> &dof_mask = default_empty_table) const; - - /** - * This function imports values from a - * global vector (@p global_vector) by - * applying the constraints to a vector - * of local values, expressed in - * iterator format. In most cases, the - * local values will be identified by - * the local dof values on a - * cell. However, as long as the - * entries in @p local_dof_indices - * indicate reasonable global vector - * entries, this function is happy with - * whatever it is given. - * - * If one of the elements of @p - * local_dof_indices belongs to a - * constrained node, then rather than - * writing the corresponding element of - * @p global_vector into @p - * local_vector, the constraints are - * resolved as the respective - * distribute function does, i.e., the - * local entry is constructed from the - * global entries to which this - * particular degree of freedom is - * constrained. - * - * In contrast to the similar function - * get_dof_values in the DoFAccessor - * class, this function does not need - * the constrained values to be - * correctly set (i.e., distribute to - * be called). - */ - template - void - get_dof_values (const VectorType &global_vector, - ForwardIteratorInd local_indices_begin, - ForwardIteratorVec local_vector_begin, - ForwardIteratorVec local_vector_end) const; - - /** - * @} - */ - - /** - * @name Dealing with constraints after solving a linear system - * @{ - */ - - /** - * Re-distribute the elements of the - * vector @p condensed to @p - * uncondensed. It is the user's - * responsibility to guarantee that all - * entries of @p uncondensed be zero! - * - * This function undoes the action of - * @p condense somehow, but it should - * be noted that it is not the inverse - * of @p condense. - * - * The @p VectorType may be a - * Vector, Vector, - * BlockVector<...>, a PETSc - * or Trilinos vector wrapper class, or - * any other type having the same - * interface. - */ - template - void distribute (const VectorType &condensed, - VectorType &uncondensed) const; - - /** - * Re-distribute the elements of the - * vector in-place. The @p VectorType - * may be a Vector, - * Vector, - * BlockVector<...>, a PETSc - * or Trilinos vector wrapper class, or - * any other type having the same - * interface. - * - * Note that if called with a - * TrilinosWrappers::MPI::Vector it may - * not contain ghost elements. - */ - template - void distribute (VectorType &vec) const; - - /** - * @} - */ - - /** - * Exception - * - * @ingroup Exceptions - */ - DeclException0 (ExcMatrixIsClosed); - /** - * Exception - * - * @ingroup Exceptions - */ - DeclException0 (ExcMatrixNotClosed); - /** - * Exception - * - * @ingroup Exceptions - */ - DeclException1 (ExcLineInexistant, - unsigned int, - << "The specified line " << arg1 - << " does not exist."); - /** - * Exception - * - * @ingroup Exceptions - */ - DeclException4 (ExcEntryAlreadyExists, - int, int, double, double, - << "The entry for the indices " << arg1 << " and " - << arg2 << " already exists, but the values " - << arg3 << " (old) and " << arg4 << " (new) differ " - << "by " << (arg4-arg3) << "."); - /** - * Exception - * - * @ingroup Exceptions - */ - DeclException2 (ExcDoFConstrainedToConstrainedDoF, - int, int, - << "You tried to constrain DoF " << arg1 - << " to DoF " << arg2 - << ", but that one is also constrained. This is not allowed!"); - /** - * Exception. - * - * @ingroup Exceptions - */ - DeclException1 (ExcDoFIsConstrainedFromBothObjects, - int, - << "Degree of freedom " << arg1 - << " is constrained from both object in a merge operation."); - /** - * Exception - * - * @ingroup Exceptions - */ - DeclException1 (ExcDoFIsConstrainedToConstrainedDoF, - int, - << "In the given argument a degree of freedom is constrained " - << "to another DoF with number " << arg1 - << ", which however is constrained by this object. This is not" - << " allowed."); - /** - * Exception - * - * @ingroup Exceptions - */ - DeclException1 (ExcRowNotStoredHere, - int, - << "The index set given to this constraint matrix indicates " - << "constraints for degree of freedom " << arg1 - << " should not be stored by this object, but a constraint " - << "is being added."); - - private: - - /** - * This class represents one line of a - * constraint matrix. - */ - struct ConstraintLine - { - /** - * A data type in which we store the list - * of entries that make up the homogenous - * part of a constraint. - */ - typedef std::vector > Entries; - - /** - * Number of this line. Since only - * very few lines are stored, we - * can not assume a specific order - * and have to store the line - * number explicitly. - */ - unsigned int line; - - /** - * Row numbers and values of the - * entries in this line. - * - * For the reason why we use a - * vector instead of a map and the - * consequences thereof, the same - * applies as what is said for - * ConstraintMatrix::lines. - */ - Entries entries; - - /** - * Value of the inhomogeneity. - */ - double inhomogeneity; - - /** - * This operator is a bit weird and - * unintuitive: it compares the - * line numbers of two lines. We - * need this to sort the lines; in - * fact we could do this using a - * comparison predicate. However, - * this way, it is easier, albeit - * unintuitive since two lines - * really have no god-given order - * relation. - */ - bool operator < (const ConstraintLine &) const; - - /** - * This operator is likewise weird: - * it checks whether the line - * indices of the two operands are - * equal, irrespective of the fact - * that the contents of the line - * may be different. - */ - bool operator == (const ConstraintLine &) const; - - /** - * Determine an estimate for the - * memory consumption (in bytes) of - * this object. - */ - std::size_t memory_consumption () const; - }; - - /** - * Store the lines of the matrix. - * Entries are usually appended in an - * arbitrary order and insertion into a - * vector is done best at the end, so - * the order is unspecified after all - * entries are inserted. Sorting of the - * entries takes place when calling the - * close() function. - * - * We could, instead of using a vector, - * use an associative array, like a map - * to store the lines. This, however, - * would mean a much more fractioned - * heap since it allocates many small - * objects, and would additionally make - * usage of this matrix much slower. - */ - std::vector lines; - - /** - * A list of unsigned integers that - * contains the position of the - * ConstraintLine of a constrained degree - * of freedom, or - * numbers::invalid_unsigned_int if the - * degree of freedom is not - * constrained. The - * numbers::invalid_unsigned_int - * return value returns thus whether - * there is a constraint line for a given - * degree of freedom index. Note that - * this class has no notion of how many - * degrees of freedom there really are, - * so if we check whether there is a - * constraint line for a given degree of - * freedom, then this vector may actually - * be shorter than the index of the DoF - * we check for. - * - * This field exists since when adding a - * new constraint line we have to figure - * out whether it already - * exists. Previously, we would simply - * walk the unsorted list of constraint - * lines until we either hit the end or - * found it. This algorithm is O(N) if N - * is the number of constraints, which - * makes it O(N^2) when inserting all - * constraints. For large problems with - * many constraints, this could easily - * take 5-10 per cent of the total run - * time. With this field, we can save - * this time since we find any constraint - * in O(1) time or get to know that it a - * certain degree of freedom is not - * constrained. - * - * To make things worse, traversing the - * list of existing constraints requires - * reads from many different places in - * memory. Thus, in large 3d - * applications, the add_line() function - * showed up very prominently in the - * overall compute time, mainly because - * it generated a lot of cache - * misses. This should also be fixed by - * using the O(1) algorithm to access the - * fields of this array. - * - * The field is useful in a number of - * other contexts as well, e.g. when one - * needs random access to the constraints - * as in all the functions that apply - * constraints on the fly while add cell - * contributions into vectors and - * matrices. - */ - std::vector lines_cache; - - /** - * This IndexSet is used to limit the - * lines to save in the ContraintMatrix - * to a subset. This is necessary, - * because the lines_cache vector would - * become too big in a distributed - * calculation. - */ - IndexSet local_lines; - - /** - * Store whether the arrays are sorted. - * If so, no new entries can be added. - */ - bool sorted; - - /** - * Internal function to calculate the - * index of line @p line in the vector - * lines_cache using local_lines. - */ - unsigned int calculate_line_index (const unsigned int line) const; - - /** - * Return @p true if the weight of an - * entry (the second element of the - * pair) equals zero. This function is - * used to delete entries with zero - * weight. - */ - static bool check_zero_weight (const std::pair &p); - - /** - * Dummy table that serves as default - * argument for function - * add_entries_local_to_global(). - */ - static const Table<2,bool> default_empty_table; - - /** - * This function actually implements - * the local_to_global function for - * standard (non-block) matrices. - */ - template - void - distribute_local_to_global (const FullMatrix &local_matrix, - const Vector &local_vector, - const std::vector &local_dof_indices, - MatrixType &global_matrix, - VectorType &global_vector, - bool use_inhomogeneities_for_rhs, - internal::bool2type) const; - - /** - * This function actually implements - * the local_to_global function for - * block matrices. - */ - template - void - distribute_local_to_global (const FullMatrix &local_matrix, - const Vector &local_vector, - const std::vector &local_dof_indices, - MatrixType &global_matrix, - VectorType &global_vector, - bool use_inhomogeneities_for_rhs, - internal::bool2type) const; - - /** - * This function actually implements - * the local_to_global function for - * standard (non-block) sparsity types. - */ - template - void - add_entries_local_to_global (const std::vector &local_dof_indices, - SparsityType &sparsity_pattern, - const bool keep_constrained_entries, - const Table<2,bool> &dof_mask, - internal::bool2type) const; - - /** - * This function actually implements - * the local_to_global function for - * block sparsity types. - */ - template - void - add_entries_local_to_global (const std::vector &local_dof_indices, - SparsityType &sparsity_pattern, - const bool keep_constrained_entries, - const Table<2,bool> &dof_mask, - internal::bool2type) const; - - /** - * Internal helper function for - * distribute_local_to_global function. - * - * Creates a list of affected global rows - * for distribution, including the local - * rows where the entries come from. The - * list is sorted according to the global - * row indices. - */ - void - make_sorted_row_list (const std::vector &local_dof_indices, - internals::GlobalRowsFromLocal &global_rows) const; - - /** - * Internal helper function for - * add_entries_local_to_global function. - * - * Creates a list of affected rows for - * distribution without any additional - * information, otherwise similar to the - * other make_sorted_row_list() - * function. - */ - void - make_sorted_row_list (const std::vector &local_dof_indices, - std::vector &active_dofs) const; - - /** - * Internal helper function for - * distribute_local_to_global function. - */ - double - resolve_vector_entry (const unsigned int i, - const internals::GlobalRowsFromLocal &global_rows, - const Vector &local_vector, - const std::vector &local_dof_indices, - const FullMatrix &local_matrix) const; + }; + + /** + * Store the lines of the matrix. + * Entries are usually appended in an + * arbitrary order and insertion into a + * vector is done best at the end, so + * the order is unspecified after all + * entries are inserted. Sorting of the + * entries takes place when calling the + * close() function. + * + * We could, instead of using a vector, + * use an associative array, like a map + * to store the lines. This, however, + * would mean a much more fractioned + * heap since it allocates many small + * objects, and would additionally make + * usage of this matrix much slower. + */ + std::vector lines; + + /** + * A list of unsigned integers that + * contains the position of the + * ConstraintLine of a constrained degree + * of freedom, or + * numbers::invalid_unsigned_int if the + * degree of freedom is not + * constrained. The + * numbers::invalid_unsigned_int + * return value returns thus whether + * there is a constraint line for a given + * degree of freedom index. Note that + * this class has no notion of how many + * degrees of freedom there really are, + * so if we check whether there is a + * constraint line for a given degree of + * freedom, then this vector may actually + * be shorter than the index of the DoF + * we check for. + * + * This field exists since when adding a + * new constraint line we have to figure + * out whether it already + * exists. Previously, we would simply + * walk the unsorted list of constraint + * lines until we either hit the end or + * found it. This algorithm is O(N) if N + * is the number of constraints, which + * makes it O(N^2) when inserting all + * constraints. For large problems with + * many constraints, this could easily + * take 5-10 per cent of the total run + * time. With this field, we can save + * this time since we find any constraint + * in O(1) time or get to know that it a + * certain degree of freedom is not + * constrained. + * + * To make things worse, traversing the + * list of existing constraints requires + * reads from many different places in + * memory. Thus, in large 3d + * applications, the add_line() function + * showed up very prominently in the + * overall compute time, mainly because + * it generated a lot of cache + * misses. This should also be fixed by + * using the O(1) algorithm to access the + * fields of this array. + * + * The field is useful in a number of + * other contexts as well, e.g. when one + * needs random access to the constraints + * as in all the functions that apply + * constraints on the fly while add cell + * contributions into vectors and + * matrices. + */ + std::vector lines_cache; + + /** + * This IndexSet is used to limit the + * lines to save in the ContraintMatrix + * to a subset. This is necessary, + * because the lines_cache vector would + * become too big in a distributed + * calculation. + */ + IndexSet local_lines; + + /** + * Store whether the arrays are sorted. + * If so, no new entries can be added. + */ + bool sorted; + + /** + * Internal function to calculate the + * index of line @p line in the vector + * lines_cache using local_lines. + */ + unsigned int calculate_line_index (const unsigned int line) const; + + /** + * Return @p true if the weight of an + * entry (the second element of the + * pair) equals zero. This function is + * used to delete entries with zero + * weight. + */ + static bool check_zero_weight (const std::pair &p); + + /** + * Dummy table that serves as default + * argument for function + * add_entries_local_to_global(). + */ + static const Table<2,bool> default_empty_table; + + /** + * This function actually implements + * the local_to_global function for + * standard (non-block) matrices. + */ + template + void + distribute_local_to_global (const FullMatrix &local_matrix, + const Vector &local_vector, + const std::vector &local_dof_indices, + MatrixType &global_matrix, + VectorType &global_vector, + bool use_inhomogeneities_for_rhs, + internal::bool2type) const; + + /** + * This function actually implements + * the local_to_global function for + * block matrices. + */ + template + void + distribute_local_to_global (const FullMatrix &local_matrix, + const Vector &local_vector, + const std::vector &local_dof_indices, + MatrixType &global_matrix, + VectorType &global_vector, + bool use_inhomogeneities_for_rhs, + internal::bool2type) const; + + /** + * This function actually implements + * the local_to_global function for + * standard (non-block) sparsity types. + */ + template + void + add_entries_local_to_global (const std::vector &local_dof_indices, + SparsityType &sparsity_pattern, + const bool keep_constrained_entries, + const Table<2,bool> &dof_mask, + internal::bool2type) const; + + /** + * This function actually implements + * the local_to_global function for + * block sparsity types. + */ + template + void + add_entries_local_to_global (const std::vector &local_dof_indices, + SparsityType &sparsity_pattern, + const bool keep_constrained_entries, + const Table<2,bool> &dof_mask, + internal::bool2type) const; + + /** + * Internal helper function for + * distribute_local_to_global function. + * + * Creates a list of affected global rows + * for distribution, including the local + * rows where the entries come from. The + * list is sorted according to the global + * row indices. + */ + void + make_sorted_row_list (const std::vector &local_dof_indices, - internals::GlobalRowsFromLocal &global_rows) const; ++ internals::GlobalRowsFromLocal &global_rows) const; + + /** + * Internal helper function for + * add_entries_local_to_global function. + * + * Creates a list of affected rows for + * distribution without any additional + * information, otherwise similar to the + * other make_sorted_row_list() + * function. + */ + void + make_sorted_row_list (const std::vector &local_dof_indices, + std::vector &active_dofs) const; + + /** + * Internal helper function for + * distribute_local_to_global function. + */ + double + resolve_vector_entry (const unsigned int i, + const internals::GlobalRowsFromLocal &global_rows, + const Vector &local_vector, + const std::vector &local_dof_indices, + const FullMatrix &local_matrix) const; }; @@@ -2058,9 -2058,9 +2058,9 @@@ ConstraintMatrix::distribute_local_to_g template + class VectorType> inline -void ConstraintMatrix::get_dof_values (const VectorType &global_vector, +void ConstraintMatrix::get_dof_values (const VectorType &global_vector, ForwardIteratorInd local_indices_begin, ForwardIteratorVec local_vector_begin, ForwardIteratorVec local_vector_end) const diff --cc deal.II/include/deal.II/lac/constraint_matrix.templates.h index 715527800e,d7d0ca7548..b792559f9d --- a/deal.II/include/deal.II/lac/constraint_matrix.templates.h +++ b/deal.II/include/deal.II/lac/constraint_matrix.templates.h @@@ -1646,8 -1649,8 +1649,8 @@@ namespace internal const unsigned int column_start, const unsigned int column_end, const FullMatrix &local_matrix, - unsigned int * &col_ptr, - number * &val_ptr) - unsigned int *&col_ptr, - number *&val_ptr) ++ unsigned int *&col_ptr, ++ number *&val_ptr) { if (column_end == column_start) return; diff --cc deal.II/include/deal.II/lac/parallel_vector.h index e28dbc101f,d16f47870d..e1c53ceb32 --- a/deal.II/include/deal.II/lac/parallel_vector.h +++ b/deal.II/include/deal.II/lac/parallel_vector.h @@@ -83,896 -83,896 +83,896 @@@ namespace paralle template class Vector : public Subscriptor { - public: - /** - * Declare standard types used in all - * containers. These types parallel those in - * the C++ standard libraries - * vector<...> class. - */ - typedef Number value_type; - typedef value_type *pointer; - typedef const value_type *const_pointer; - typedef value_type *iterator; - typedef const value_type *const_iterator; - typedef value_type &reference; - typedef const value_type &const_reference; - typedef size_t size_type; - typedef typename numbers::NumberTraits::real_type real_type; - - /** - * @name 1: Basic Object-handling - */ - //@{ - /** - * Empty constructor. - */ - Vector (); - - /** - * Copy constructor. Uses the parallel - * partitioning of @p in_vector. - */ - Vector (const Vector &in_vector); - - /** - * Constructs a parallel vector of the given - * global size without any actual parallel - * distribution. - */ - Vector (const unsigned int size); - - /** - * Constructs a parallel vector. The local - * range is specified by @p locally_owned_set - * (note that this must be a contiguous - * interval, multiple intervals are not - * possible). The IndexSet @p ghost_indices - * specifies ghost indices, i.e., indices - * which one might need to read data from or - * accumulate data from. It is allowed that - * the set of ghost indices also contains the - * local range, but it does not need to. - * - * This function involves global - * communication, so it should only be called - * once for a given layout. Use the - * constructor with Vector argument to - * create additional vectors with the same - * parallel layout. - */ - Vector (const IndexSet &local_range, - const IndexSet &ghost_indices, - const MPI_Comm communicator); - - /** - * Create the vector based on the parallel - * partitioning described in @p - * partitioner. The input argument is a shared - * pointer, which store the partitioner data - * only once and share it between several - * vectors with the same layout. - */ - Vector (const std_cxx1x::shared_ptr &partitioner); - - /** - * Destructor. - */ - ~Vector (); - - /** - * Sets the global size of the vector to @p - * size without any actual parallel - * distribution. - */ - void reinit (const unsigned int size, - const bool fast = false); - - /** - * Uses the parallel layout of the input - * vector @p in_vector and allocates memory - * for this vector. Recommended initialization - * function when several vectors with the same - * layout should be created. - * - * If the flag @p fast is set to false, the - * memory will be initialized with zero, - * otherwise the memory will be untouched (and - * the user must make sure to fill it with - * reasonable data before using it). - */ - template - void reinit(const Vector &in_vector, - const bool fast = false); - - /** - * Initialize the vector. The local range is - * specified by @p locally_owned_set (note - * that this must be a contiguous interval, - * multiple intervals are not possible). The - * IndexSet @p ghost_indices specifies ghost - * indices, i.e., indices which one might need - * to read data from or accumulate data - * from. It is allowed that the set of ghost - * indices also contains the local range, but - * it does not need to. - * - * This function involves global - * communication, so it should only be called - * once for a given layout. Use the @p reinit - * function with Vector argument to - * create additional vectors with the same - * parallel layout. - */ - void reinit (const IndexSet &local_range, - const IndexSet &ghost_indices, - const MPI_Comm communicator); - - /** - * Initialize the vector given to the parallel - * partitioning described in @p - * partitioner. The input argument is a shared - * pointer, which store the partitioner data - * only once and share it between several - * vectors with the same layout. - */ - void reinit (const std_cxx1x::shared_ptr &partitioner); - - /** - * Swap the contents of this - * vector and the other vector - * @p v. One could do this - * operation with a temporary - * variable and copying over the - * data elements, but this - * function is significantly more - * efficient since it only swaps - * the pointers to the data of - * the two vectors and therefore - * does not need to allocate - * temporary storage and move - * data around. - * - * This function is analog to the - * the @p swap function of all C++ - * standard containers. Also, - * there is a global function - * swap(u,v) that simply calls - * u.swap(v), again in analogy - * to standard functions. - * - * This function is virtual in - * order to allow for derived - * classes to handle memory - * separately. - */ - void swap (Vector &v); - - /** - * Assigns the vector to the parallel - * partitioning of the input vector @p - * in_vector, and copies all the data. - */ - Vector & - operator = (const Vector &in_vector); - - /** - * Assigns the vector to the parallel - * partitioning of the input vector @p - * in_vector, and copies all the data. - */ - template - Vector & - operator = (const Vector &in_vector); - - /** - * This method copies the local range from - * another vector with the same local range, - * but possibly different layout of ghost - * indices. - */ - void copy_from (const Vector &in_vector, - const bool call_update_ghost_values = false); - - /** - * Sets all elements of the vector to the - * scalar @p s. If the scalar is zero, also - * ghost elements are set to zero, otherwise - * they remain unchanged. - */ - Vector& operator = (const Number s); - - /** - * This function copies the data that has - * accumulated in the data buffer for ghost - * indices to the owning processor. - * - * For the meaning of this argument, - * see the entry on @ref - * GlossCompress "Compressing - * distributed vectors and matrices" - * in the glossary. - */ - void compress (::dealii::VectorOperation::values operation - =::dealii::VectorOperation::unknown); - - - /** - * Fills the data field for ghost indices with - * the values stored in the respective - * positions of the owning processor. This - * function is needed before reading from - * ghosts. The function is @p const even - * though ghost data is changed. This is - * needed to allow functions with a @p const - * vector to perform the data exchange without - * creating temporaries. - */ - void update_ghost_values () const; - - /** - * Initiates communication for the @p - * compress() function with non-blocking - * communication. This function does not wait - * for the transfer to finish, in order to - * allow for other computations during the - * time it takes until all data arrives. - * - * Before the data is actually exchanged, the - * function must be followed by a call to @p - * compress_finish(). - * - * In case this function is called for more - * than one vector before @p - * compress_finish() is invoked, it is - * mandatory to specify a unique - * communication channel to each such call, in - * order to avoid several messages with the - * same ID that will corrupt this operation. - */ - void compress_start (const unsigned int communication_channel = 0); - - /** - * For all requests that have been initiated - * in compress_start, wait for the - * communication to finish. Once it is - * finished, add or set the data (depending on - * whether @p add_ghost_data is @p true or @p - * false) to the respective positions in the - * owning processor, and clear the contents in - * the ghost data fields. The meaning of - * this argument is the same as in compress(). - * - * Must follow a call to the @p compress_start - * function. - */ - void compress_finish (const bool add_ghost_data = true); - - - /** - * Initiates communication for the @p - * update_ghost_values() function with non-blocking - * communication. This function does not wait - * for the transfer to finish, in order to - * allow for other computations during the - * time it takes until all data arrives. - * - * Before the data is actually exchanged, the - * function must be followed by a call to @p - * update_ghost_values_finish(). - * - * In case this function is called for more - * than one vector before @p - * update_ghost_values_finish() is invoked, it is - * mandatory to specify a unique communication - * channel to each such call, in order to - * avoid several messages with the same ID - * that will corrupt this operation. - */ - void update_ghost_values_start (const unsigned int communication_channel = 0) const; - - - /** - * For all requests that have been started in - * update_ghost_values_start, wait for the communication - * to finish. - * - * Must follow a call to the @p - * update_ghost_values_start function before reading - * data from ghost indices. - */ - void update_ghost_values_finish () const; - - /** - * This method zeros the entries on ghost - * dofs, but does not touch locally owned - * DoFs. - */ - void zero_out_ghosts (); - - /** - * Return whether the vector contains only - * elements with value zero. This function - * is mainly for internal consistency - * checks and should seldom be used when - * not in debug mode since it uses quite - * some time. - */ - bool all_zero () const; - - /** - * Return @p true if the vector has no - * negative entries, i.e. all entries are - * zero or positive. This function is - * used, for example, to check whether - * refinement indicators are really all - * positive (or zero). - * - * The function obviously only makes - * sense if the template argument of this - * class is a real type. If it is a - * complex type, then an exception is - * thrown. - */ - bool is_non_negative () const; - - /** - * Checks for equality of the two vectors. - */ - template - bool operator == (const Vector &v) const; - - /** - * Checks for inequality of the two vectors. - */ - template - bool operator != (const Vector &v) const; - - /** - * Perform the inner product of two vectors. - */ - template - Number operator * (const Vector &V) const; - - /** - * Computes the square of the l2 - * norm of the vector (i.e., the sum of the - * squares of all entries among all - * processors). - */ - real_type norm_sqr () const; - - /** - * Computes the mean value of all the entries - * in the vector. - */ - Number mean_value () const; - - /** - * Returns the l1 norm of the - * vector (i.e., the sum of the absolute - * values of all entries among all - * processors). - */ - real_type l1_norm () const; - - /** - * Returns the l2 norm of the - * vector (i.e., square root of the sum of the - * square of all entries among all - * processors). - */ - real_type l2_norm () const; - - /** - * Returns the lp norm with real @p - * p of the vector (i.e., the pth root of sum - * of the pth power of all entries among all - * processors). - */ - real_type lp_norm (const real_type p) const; - - /** - * Returns the maximum norm of the vector - * (i.e., maximum absolute value among all - * entries among all processors). - */ - real_type linfty_norm () const; - - /** - * Returns the global size of the vector, - * equal to the sum of the number of locally - * owned indices among all the processors. - */ - types::global_dof_index size () const; - - /** - * Returns the local size of the vector, i.e., - * the number of indices owned locally. - */ - unsigned int local_size() const; - - /** - * Returns the half-open interval that - * specifies the locally owned range of the - * vector. Note that local_size() == - * local_range().second - - * local_range().first. - */ - std::pair local_range () const; - - /** - * Returns true if the given global index is - * in the local range of this processor. - */ - bool in_local_range (const types::global_dof_index global_index) const; - - /** - * Returns the number of ghost elements - * present on the vector. - */ - unsigned int n_ghost_entries () const; - - /** - * Returns whether the given global index is a - * ghost index on the present - * processor. Returns false for indices that - * are owned locally and for indices not - * present at all. - */ - bool is_ghost_entry (const types::global_dof_index global_index) const; - - /** - * Make the @p Vector class a bit like - * the vector<> class of the C++ - * standard library by returning - * iterators to the start and end of the - * locally owned elements of this vector. - */ - iterator begin (); - - /** - * Return constant iterator to the start of - * the vector. - */ - const_iterator begin () const; - - /** - * Return an iterator pointing to the - * element past the end of the array of - * locally owned entries. - */ - iterator end (); - - /** - * Return a constant iterator pointing to - * the element past the end of the array - * of the locally owned entries. - */ - const_iterator end () const; - //@} - - - /** - * @name 2: Data-Access - */ - //@{ - - /** - * Read access to the data in the - * position corresponding to @p - * global_index. The index must be - * either in the local range of the - * vector or be specified as a ghost - * index at construction. - */ - Number operator () (const types::global_dof_index global_index) const; - - /** - * Read and write access to the data - * in the position corresponding to - * @p global_index. The index must be - * either in the local range of the - * vector or be specified as a ghost - * index at construction. - */ - Number& operator () (const types::global_dof_index global_index); - - /** - * Read access to the data in the - * position corresponding to @p - * global_index. The index must be - * either in the local range of the - * vector or be specified as a ghost - * index at construction. - * - * This function does the same thing - * as operator(). - */ - Number operator [] (const types::global_dof_index global_index) const; - - /** - * Read and write access to the data - * in the position corresponding to - * @p global_index. The index must be - * either in the local range of the - * vector or be specified as a ghost - * index at construction. - * - * This function does the same thing - * as operator(). - */ - Number& operator [] (const types::global_dof_index global_index); - - /** - * Read access to the data field specified by - * @p local_index. Locally owned indices can - * be accessed with indices - * [0,local_size), and ghost - * indices with indices - * [local_size,local_size+ - * n_ghost_entries]. - */ - Number local_element (const unsigned int local_index) const; - - /** - * Read and write access to the data field - * specified by @p local_index. Locally owned - * indices can be accessed with indices - * [0,local_size), and ghost - * indices with indices - * [local_size,local_size+n_ghosts]. - */ - Number& local_element (const unsigned int local_index); - //@} - - - /** - * @name 3: Modification of vectors - */ - //@{ - - /** - * Add the given vector to the present - * one. - */ - Vector & operator += (const Vector &V); - - /** - * Subtract the given vector from the - * present one. - */ - Vector & operator -= (const Vector &V); - - /** - * A collective add operation: - * This funnction adds a whole - * set of values stored in @p - * values to the vector - * components specified by @p - * indices. - */ - template - void add (const std::vector &indices, - const std::vector &values); - - /** - * This is a second collective - * add operation. As a - * difference, this function - * takes a deal.II vector of - * values. - */ - template - void add (const std::vector &indices, - const ::dealii::Vector &values); - - /** - * Take an address where - * n_elements are stored - * contiguously and add them into - * the vector. Handles all cases - * which are not covered by the - * other two add() - * functions above. - */ - template - void add (const unsigned int n_elements, - const unsigned int *indices, - const OtherNumber *values); - - /** - * Addition of @p s to all - * components. Note that @p s is a - * scalar and not a vector. - */ - void add (const Number s); - - /** - * Simple vector addition, equal to the - * operator +=. - */ - void add (const Vector &V); - - /** - * Simple addition of a multiple of a - * vector, i.e. *this += a*V. - */ - void add (const Number a, const Vector &V); - - /** - * Multiple addition of scaled vectors, - * i.e. *this += a*V+b*W. - */ - void add (const Number a, const Vector &V, - const Number b, const Vector &W); - - /** - * Scaling and simple vector addition, - * i.e. - * *this = s*(*this)+V. - */ - void sadd (const Number s, - const Vector &V); - - /** - * Scaling and simple addition, i.e. - * *this = s*(*this)+a*V. - */ - void sadd (const Number s, - const Number a, - const Vector &V); - - /** - * Scaling and multiple addition. - */ - void sadd (const Number s, - const Number a, - const Vector &V, - const Number b, - const Vector &W); - - /** - * Scaling and multiple addition. - * *this = s*(*this)+a*V + b*W + c*X. - */ - void sadd (const Number s, - const Number a, - const Vector &V, - const Number b, - const Vector &W, - const Number c, - const Vector &X); - - /** - * Scale each element of the - * vector by the given factor. - * - * This function is deprecated - * and will be removed in a - * future version. Use - * operator *= and - * operator /= instead. - */ - void scale (const Number factor); - - - /** - * Scale each element of the - * vector by a constant - * value. - */ - Vector & operator *= (const Number factor); - - /** - * Scale each element of the - * vector by the inverse of the - * given value. - */ - Vector & operator /= (const Number factor); - - /** - * Scale each element of this - * vector by the corresponding - * element in the argument. This - * function is mostly meant to - * simulate multiplication (and - * immediate re-assignment) by a - * diagonal scaling matrix. - */ - void scale (const Vector &scaling_factors); - - /** - * Scale each element of this - * vector by the corresponding - * element in the argument. This - * function is mostly meant to - * simulate multiplication (and - * immediate re-assignment) by a - * diagonal scaling matrix. - */ - template - void scale (const Vector &scaling_factors); - - /** - * Assignment *this = a*u. - */ - void equ (const Number a, const Vector& u); - - /** - * Assignment *this = a*u. - */ - template - void equ (const Number a, const Vector& u); - - /** - * Assignment *this = a*u + b*v. - */ - void equ (const Number a, const Vector& u, - const Number b, const Vector& v); - - /** - * Assignment *this = a*u + b*v + b*w. - */ - void equ (const Number a, const Vector& u, - const Number b, const Vector& v, - const Number c, const Vector& w); - - /** - * Compute the elementwise ratio of the - * two given vectors, that is let - * this[i] = a[i]/b[i]. This is - * useful for example if you want to - * compute the cellwise ratio of true to - * estimated error. - * - * This vector is appropriately - * scaled to hold the result. - * - * If any of the b[i] is - * zero, the result is - * undefined. No attempt is made - * to catch such situations. - */ - void ratio (const Vector &a, - const Vector &b); - //@} - - - /** - * @name 4: Mixed stuff - */ - //@{ - /** - * Checks whether the given - * partitioner is compatible with the - * partitioner used for this - * vector. Two partitioners are - * compatible if the have the same - * local size and the same ghost - * indices. They do not necessarily - * need to be the same data - * field. This is a local operation - * only, i.e., if only some - * processors decide that the - * partitioning is not compatible, - * only these processors will return - * @p false, whereas the other - * processors will return @p true. - */ - bool - partitioners_are_compatible (const Utilities::MPI::Partitioner &part) const; - - - /** - * Prints the vector to the output stream @p - * out. - */ - void print (std::ostream &out, - const unsigned int precision = 3, - const bool scientific = true, - const bool across = true) const; - - /** - * Returns the memory consumption of this - * class in bytes. - */ - std::size_t memory_consumption () const; - //@} - - private: - /** - * Shared pointer to store the parallel - * partitioning information. This information - * can be shared between several vectors that - * have the same partitioning. - */ - std_cxx1x::shared_ptr partitioner; - - /** - * The size that is currently allocated in the - * val array. - */ - unsigned int allocated_size; - - /** - * Pointer to the array of - * local elements of this vector. - */ - Number *val; - - /** - * Temporary storage that holds the data that - * is sent to this processor in @p compress() - * or sent from this processor in @p - * update_ghost_values. - */ - mutable Number *import_data; - - /** - * Provide this class with all functionality - * of ::dealii::Vector by creating a - * VectorView object. - */ - VectorView vector_view; + public: + /** + * Declare standard types used in all + * containers. These types parallel those in + * the C++ standard libraries + * vector<...> class. + */ + typedef Number value_type; + typedef value_type *pointer; + typedef const value_type *const_pointer; + typedef value_type *iterator; + typedef const value_type *const_iterator; + typedef value_type &reference; + typedef const value_type &const_reference; + typedef size_t size_type; + typedef typename numbers::NumberTraits::real_type real_type; + + /** + * @name 1: Basic Object-handling + */ + //@{ + /** + * Empty constructor. + */ + Vector (); + + /** + * Copy constructor. Uses the parallel + * partitioning of @p in_vector. + */ + Vector (const Vector &in_vector); + + /** + * Constructs a parallel vector of the given + * global size without any actual parallel + * distribution. + */ + Vector (const unsigned int size); + + /** + * Constructs a parallel vector. The local + * range is specified by @p locally_owned_set + * (note that this must be a contiguous + * interval, multiple intervals are not + * possible). The IndexSet @p ghost_indices + * specifies ghost indices, i.e., indices + * which one might need to read data from or + * accumulate data from. It is allowed that + * the set of ghost indices also contains the + * local range, but it does not need to. + * + * This function involves global + * communication, so it should only be called + * once for a given layout. Use the + * constructor with Vector argument to + * create additional vectors with the same + * parallel layout. + */ + Vector (const IndexSet &local_range, + const IndexSet &ghost_indices, + const MPI_Comm communicator); + + /** + * Create the vector based on the parallel + * partitioning described in @p + * partitioner. The input argument is a shared + * pointer, which store the partitioner data + * only once and share it between several + * vectors with the same layout. + */ + Vector (const std_cxx1x::shared_ptr &partitioner); + + /** + * Destructor. + */ + ~Vector (); + + /** + * Sets the global size of the vector to @p + * size without any actual parallel + * distribution. + */ + void reinit (const unsigned int size, + const bool fast = false); + + /** + * Uses the parallel layout of the input + * vector @p in_vector and allocates memory + * for this vector. Recommended initialization + * function when several vectors with the same + * layout should be created. + * + * If the flag @p fast is set to false, the + * memory will be initialized with zero, + * otherwise the memory will be untouched (and + * the user must make sure to fill it with + * reasonable data before using it). + */ + template + void reinit(const Vector &in_vector, + const bool fast = false); + + /** + * Initialize the vector. The local range is + * specified by @p locally_owned_set (note + * that this must be a contiguous interval, + * multiple intervals are not possible). The + * IndexSet @p ghost_indices specifies ghost + * indices, i.e., indices which one might need + * to read data from or accumulate data + * from. It is allowed that the set of ghost + * indices also contains the local range, but + * it does not need to. + * + * This function involves global + * communication, so it should only be called + * once for a given layout. Use the @p reinit + * function with Vector argument to + * create additional vectors with the same + * parallel layout. + */ + void reinit (const IndexSet &local_range, + const IndexSet &ghost_indices, + const MPI_Comm communicator); + + /** + * Initialize the vector given to the parallel + * partitioning described in @p + * partitioner. The input argument is a shared + * pointer, which store the partitioner data + * only once and share it between several + * vectors with the same layout. + */ + void reinit (const std_cxx1x::shared_ptr &partitioner); + + /** + * Swap the contents of this + * vector and the other vector + * @p v. One could do this + * operation with a temporary + * variable and copying over the + * data elements, but this + * function is significantly more + * efficient since it only swaps + * the pointers to the data of + * the two vectors and therefore + * does not need to allocate + * temporary storage and move + * data around. + * + * This function is analog to the + * the @p swap function of all C++ + * standard containers. Also, + * there is a global function + * swap(u,v) that simply calls + * u.swap(v), again in analogy + * to standard functions. + * + * This function is virtual in + * order to allow for derived + * classes to handle memory + * separately. + */ + void swap (Vector &v); + + /** + * Assigns the vector to the parallel + * partitioning of the input vector @p + * in_vector, and copies all the data. + */ + Vector & - operator = (const Vector &in_vector); ++ operator = (const Vector &in_vector); + + /** + * Assigns the vector to the parallel + * partitioning of the input vector @p + * in_vector, and copies all the data. + */ + template + Vector & + operator = (const Vector &in_vector); + + /** + * This method copies the local range from + * another vector with the same local range, + * but possibly different layout of ghost + * indices. + */ + void copy_from (const Vector &in_vector, + const bool call_update_ghost_values = false); + + /** + * Sets all elements of the vector to the + * scalar @p s. If the scalar is zero, also + * ghost elements are set to zero, otherwise + * they remain unchanged. + */ + Vector &operator = (const Number s); + + /** + * This function copies the data that has + * accumulated in the data buffer for ghost + * indices to the owning processor. + * + * For the meaning of this argument, + * see the entry on @ref + * GlossCompress "Compressing + * distributed vectors and matrices" + * in the glossary. + */ + void compress (::dealii::VectorOperation::values operation + =::dealii::VectorOperation::unknown); + + + /** + * Fills the data field for ghost indices with + * the values stored in the respective + * positions of the owning processor. This + * function is needed before reading from + * ghosts. The function is @p const even + * though ghost data is changed. This is + * needed to allow functions with a @p const + * vector to perform the data exchange without + * creating temporaries. + */ + void update_ghost_values () const; + + /** + * Initiates communication for the @p + * compress() function with non-blocking + * communication. This function does not wait + * for the transfer to finish, in order to + * allow for other computations during the + * time it takes until all data arrives. + * + * Before the data is actually exchanged, the + * function must be followed by a call to @p + * compress_finish(). + * + * In case this function is called for more + * than one vector before @p + * compress_finish() is invoked, it is + * mandatory to specify a unique + * communication channel to each such call, in + * order to avoid several messages with the + * same ID that will corrupt this operation. + */ + void compress_start (const unsigned int communication_channel = 0); + + /** + * For all requests that have been initiated + * in compress_start, wait for the + * communication to finish. Once it is + * finished, add or set the data (depending on + * whether @p add_ghost_data is @p true or @p + * false) to the respective positions in the + * owning processor, and clear the contents in + * the ghost data fields. The meaning of + * this argument is the same as in compress(). + * + * Must follow a call to the @p compress_start + * function. + */ + void compress_finish (const bool add_ghost_data = true); + + + /** + * Initiates communication for the @p + * update_ghost_values() function with non-blocking + * communication. This function does not wait + * for the transfer to finish, in order to + * allow for other computations during the + * time it takes until all data arrives. + * + * Before the data is actually exchanged, the + * function must be followed by a call to @p + * update_ghost_values_finish(). + * + * In case this function is called for more + * than one vector before @p + * update_ghost_values_finish() is invoked, it is + * mandatory to specify a unique communication + * channel to each such call, in order to + * avoid several messages with the same ID + * that will corrupt this operation. + */ + void update_ghost_values_start (const unsigned int communication_channel = 0) const; + + + /** + * For all requests that have been started in + * update_ghost_values_start, wait for the communication + * to finish. + * + * Must follow a call to the @p + * update_ghost_values_start function before reading + * data from ghost indices. + */ + void update_ghost_values_finish () const; + + /** + * This method zeros the entries on ghost + * dofs, but does not touch locally owned + * DoFs. + */ + void zero_out_ghosts (); + + /** + * Return whether the vector contains only + * elements with value zero. This function + * is mainly for internal consistency + * checks and should seldom be used when + * not in debug mode since it uses quite + * some time. + */ + bool all_zero () const; + + /** + * Return @p true if the vector has no + * negative entries, i.e. all entries are + * zero or positive. This function is + * used, for example, to check whether + * refinement indicators are really all + * positive (or zero). + * + * The function obviously only makes + * sense if the template argument of this + * class is a real type. If it is a + * complex type, then an exception is + * thrown. + */ + bool is_non_negative () const; + + /** + * Checks for equality of the two vectors. + */ + template + bool operator == (const Vector &v) const; + + /** + * Checks for inequality of the two vectors. + */ + template + bool operator != (const Vector &v) const; + + /** + * Perform the inner product of two vectors. + */ + template + Number operator * (const Vector &V) const; + + /** + * Computes the square of the l2 + * norm of the vector (i.e., the sum of the + * squares of all entries among all + * processors). + */ + real_type norm_sqr () const; + + /** + * Computes the mean value of all the entries + * in the vector. + */ + Number mean_value () const; + + /** + * Returns the l1 norm of the + * vector (i.e., the sum of the absolute + * values of all entries among all + * processors). + */ + real_type l1_norm () const; + + /** + * Returns the l2 norm of the + * vector (i.e., square root of the sum of the + * square of all entries among all + * processors). + */ + real_type l2_norm () const; + + /** + * Returns the lp norm with real @p + * p of the vector (i.e., the pth root of sum + * of the pth power of all entries among all + * processors). + */ + real_type lp_norm (const real_type p) const; + + /** + * Returns the maximum norm of the vector + * (i.e., maximum absolute value among all + * entries among all processors). + */ + real_type linfty_norm () const; + + /** + * Returns the global size of the vector, + * equal to the sum of the number of locally + * owned indices among all the processors. + */ + types::global_dof_index size () const; + + /** + * Returns the local size of the vector, i.e., + * the number of indices owned locally. + */ + unsigned int local_size() const; + + /** + * Returns the half-open interval that + * specifies the locally owned range of the + * vector. Note that local_size() == + * local_range().second - + * local_range().first. + */ + std::pair local_range () const; + + /** + * Returns true if the given global index is + * in the local range of this processor. + */ + bool in_local_range (const types::global_dof_index global_index) const; + + /** + * Returns the number of ghost elements + * present on the vector. + */ + unsigned int n_ghost_entries () const; + + /** + * Returns whether the given global index is a + * ghost index on the present + * processor. Returns false for indices that + * are owned locally and for indices not + * present at all. + */ + bool is_ghost_entry (const types::global_dof_index global_index) const; + + /** + * Make the @p Vector class a bit like + * the vector<> class of the C++ + * standard library by returning + * iterators to the start and end of the + * locally owned elements of this vector. + */ + iterator begin (); + + /** + * Return constant iterator to the start of + * the vector. + */ + const_iterator begin () const; + + /** + * Return an iterator pointing to the + * element past the end of the array of + * locally owned entries. + */ + iterator end (); + + /** + * Return a constant iterator pointing to + * the element past the end of the array + * of the locally owned entries. + */ + const_iterator end () const; + //@} + + + /** + * @name 2: Data-Access + */ + //@{ + + /** + * Read access to the data in the + * position corresponding to @p + * global_index. The index must be + * either in the local range of the + * vector or be specified as a ghost + * index at construction. + */ + Number operator () (const types::global_dof_index global_index) const; + + /** + * Read and write access to the data + * in the position corresponding to + * @p global_index. The index must be + * either in the local range of the + * vector or be specified as a ghost + * index at construction. + */ + Number &operator () (const types::global_dof_index global_index); + + /** + * Read access to the data in the + * position corresponding to @p + * global_index. The index must be + * either in the local range of the + * vector or be specified as a ghost + * index at construction. + * + * This function does the same thing + * as operator(). + */ + Number operator [] (const types::global_dof_index global_index) const; + + /** + * Read and write access to the data + * in the position corresponding to + * @p global_index. The index must be + * either in the local range of the + * vector or be specified as a ghost + * index at construction. + * + * This function does the same thing + * as operator(). + */ + Number &operator [] (const types::global_dof_index global_index); + + /** + * Read access to the data field specified by + * @p local_index. Locally owned indices can + * be accessed with indices + * [0,local_size), and ghost + * indices with indices + * [local_size,local_size+ + * n_ghost_entries]. + */ + Number local_element (const unsigned int local_index) const; + + /** + * Read and write access to the data field + * specified by @p local_index. Locally owned + * indices can be accessed with indices + * [0,local_size), and ghost + * indices with indices + * [local_size,local_size+n_ghosts]. + */ + Number &local_element (const unsigned int local_index); + //@} + + + /** + * @name 3: Modification of vectors + */ + //@{ + + /** + * Add the given vector to the present + * one. + */ + Vector &operator += (const Vector &V); + + /** + * Subtract the given vector from the + * present one. + */ + Vector &operator -= (const Vector &V); + + /** + * A collective add operation: + * This funnction adds a whole + * set of values stored in @p + * values to the vector + * components specified by @p + * indices. + */ + template + void add (const std::vector &indices, - const std::vector &values); ++ const std::vector &values); + + /** + * This is a second collective + * add operation. As a + * difference, this function + * takes a deal.II vector of + * values. + */ + template + void add (const std::vector &indices, + const ::dealii::Vector &values); + + /** + * Take an address where + * n_elements are stored + * contiguously and add them into + * the vector. Handles all cases + * which are not covered by the + * other two add() + * functions above. + */ + template + void add (const unsigned int n_elements, + const unsigned int *indices, - const OtherNumber *values); ++ const OtherNumber *values); + + /** + * Addition of @p s to all + * components. Note that @p s is a + * scalar and not a vector. + */ + void add (const Number s); + + /** + * Simple vector addition, equal to the + * operator +=. + */ + void add (const Vector &V); + + /** + * Simple addition of a multiple of a + * vector, i.e. *this += a*V. + */ + void add (const Number a, const Vector &V); + + /** + * Multiple addition of scaled vectors, + * i.e. *this += a*V+b*W. + */ + void add (const Number a, const Vector &V, + const Number b, const Vector &W); + + /** + * Scaling and simple vector addition, + * i.e. + * *this = s*(*this)+V. + */ + void sadd (const Number s, + const Vector &V); + + /** + * Scaling and simple addition, i.e. + * *this = s*(*this)+a*V. + */ + void sadd (const Number s, + const Number a, + const Vector &V); + + /** + * Scaling and multiple addition. + */ + void sadd (const Number s, + const Number a, + const Vector &V, + const Number b, + const Vector &W); + + /** + * Scaling and multiple addition. + * *this = s*(*this)+a*V + b*W + c*X. + */ + void sadd (const Number s, + const Number a, + const Vector &V, + const Number b, + const Vector &W, + const Number c, + const Vector &X); + + /** + * Scale each element of the + * vector by the given factor. + * + * This function is deprecated + * and will be removed in a + * future version. Use + * operator *= and + * operator /= instead. + */ + void scale (const Number factor); + + + /** + * Scale each element of the + * vector by a constant + * value. + */ + Vector &operator *= (const Number factor); + + /** + * Scale each element of the + * vector by the inverse of the + * given value. + */ + Vector &operator /= (const Number factor); + + /** + * Scale each element of this + * vector by the corresponding + * element in the argument. This + * function is mostly meant to + * simulate multiplication (and + * immediate re-assignment) by a + * diagonal scaling matrix. + */ + void scale (const Vector &scaling_factors); + + /** + * Scale each element of this + * vector by the corresponding + * element in the argument. This + * function is mostly meant to + * simulate multiplication (and + * immediate re-assignment) by a + * diagonal scaling matrix. + */ + template + void scale (const Vector &scaling_factors); + + /** + * Assignment *this = a*u. + */ + void equ (const Number a, const Vector &u); + + /** + * Assignment *this = a*u. + */ + template + void equ (const Number a, const Vector &u); + + /** + * Assignment *this = a*u + b*v. + */ + void equ (const Number a, const Vector &u, + const Number b, const Vector &v); + + /** + * Assignment *this = a*u + b*v + b*w. + */ + void equ (const Number a, const Vector &u, + const Number b, const Vector &v, + const Number c, const Vector &w); + + /** + * Compute the elementwise ratio of the + * two given vectors, that is let + * this[i] = a[i]/b[i]. This is + * useful for example if you want to + * compute the cellwise ratio of true to + * estimated error. + * + * This vector is appropriately + * scaled to hold the result. + * + * If any of the b[i] is + * zero, the result is + * undefined. No attempt is made + * to catch such situations. + */ + void ratio (const Vector &a, + const Vector &b); + //@} + + + /** + * @name 4: Mixed stuff + */ + //@{ + /** + * Checks whether the given + * partitioner is compatible with the + * partitioner used for this + * vector. Two partitioners are + * compatible if the have the same + * local size and the same ghost + * indices. They do not necessarily + * need to be the same data + * field. This is a local operation + * only, i.e., if only some + * processors decide that the + * partitioning is not compatible, + * only these processors will return + * @p false, whereas the other + * processors will return @p true. + */ + bool + partitioners_are_compatible (const Utilities::MPI::Partitioner &part) const; + + + /** + * Prints the vector to the output stream @p + * out. + */ + void print (std::ostream &out, + const unsigned int precision = 3, + const bool scientific = true, + const bool across = true) const; + + /** + * Returns the memory consumption of this + * class in bytes. + */ + std::size_t memory_consumption () const; + //@} + + private: + /** + * Shared pointer to store the parallel + * partitioning information. This information + * can be shared between several vectors that + * have the same partitioning. + */ + std_cxx1x::shared_ptr partitioner; + + /** + * The size that is currently allocated in the + * val array. + */ + unsigned int allocated_size; + + /** + * Pointer to the array of + * local elements of this vector. + */ + Number *val; + + /** + * Temporary storage that holds the data that + * is sent to this processor in @p compress() + * or sent from this processor in @p + * update_ghost_values. + */ + mutable Number *import_data; + + /** + * Provide this class with all functionality + * of ::dealii::Vector by creating a + * VectorView object. + */ + VectorView vector_view; #ifdef DEAL_II_COMPILER_SUPPORTS_MPI - /** - * A vector that collects all requests from @p - * compress() operations. This class uses - * persistent MPI communicators, i.e., the - * communication channels are stored during - * successive calls to a given function. This - * reduces the overhead involved with setting - * up the MPI machinery, but it does not - * remove the need for a receive operation to - * be posted before the data can actually be - * sent. - */ - std::vector compress_requests; - - /** - * A vector that collects all requests from @p - * update_ghost_values() operations. This class uses - * persistent MPI communicators. - */ - mutable std::vector update_ghost_values_requests; + /** + * A vector that collects all requests from @p + * compress() operations. This class uses + * persistent MPI communicators, i.e., the + * communication channels are stored during + * successive calls to a given function. This + * reduces the overhead involved with setting + * up the MPI machinery, but it does not + * remove the need for a receive operation to + * be posted before the data can actually be + * sent. + */ + std::vector compress_requests; + + /** + * A vector that collects all requests from @p + * update_ghost_values() operations. This class uses + * persistent MPI communicators. + */ + mutable std::vector update_ghost_values_requests; #endif - /** - * A lock that makes sure that - * the @p compress and @p - * update_ghost_values functions - * give reasonable results also - * when used with several - * threads. - */ - mutable Threads::ThreadMutex mutex; - - /** - * A helper function that clears the - * compress_requests and update_ghost_values_requests - * field. Used in reinit functions. - */ - void clear_mpi_requests (); - - /** - * A helper function that is used to resize - * the val array. - */ - void resize_val (const unsigned int new_allocated_size); - - /* - * Make all other vector types - * friends. - */ - template friend class Vector; + /** + * A lock that makes sure that + * the @p compress and @p + * update_ghost_values functions + * give reasonable results also + * when used with several + * threads. + */ + mutable Threads::ThreadMutex mutex; + + /** + * A helper function that clears the + * compress_requests and update_ghost_values_requests + * field. Used in reinit functions. + */ + void clear_mpi_requests (); + + /** + * A helper function that is used to resize + * the val array. + */ + void resize_val (const unsigned int new_allocated_size); + + /* + * Make all other vector types + * friends. + */ + template friend class Vector; }; - /*@}*/ + /*@}*/ - /*----------------------- Inline functions ----------------------------------*/ + /*----------------------- Inline functions ----------------------------------*/ #ifndef DOXYGEN diff --cc deal.II/include/deal.II/lac/petsc_block_sparse_matrix.h index 704c2809db,35d21d1951..c703fcd1cf --- a/deal.II/include/deal.II/lac/petsc_block_sparse_matrix.h +++ b/deal.II/include/deal.II/lac/petsc_block_sparse_matrix.h @@@ -32,262 -32,262 +32,262 @@@ DEAL_II_NAMESPACE_OPE namespace PETScWrappers { - /*! @addtogroup PETScWrappers - *@{ - */ - - /** - * Blocked sparse matrix based on the PETScWrappers::SparseMatrix class. This - * class implements the functions that are specific to the PETSc SparseMatrix - * base objects for a blocked sparse matrix, and leaves the actual work - * relaying most of the calls to the individual blocks to the functions - * implemented in the base class. See there also for a description of when - * this class is useful. - * - * In contrast to the deal.II-type SparseMatrix class, the PETSc matrices do - * not have external objects for the sparsity patterns. Thus, one does not - * determine the size of the individual blocks of a block matrix of this type - * by attaching a block sparsity pattern, but by calling reinit() to set the - * number of blocks and then by setting the size of each block separately. In - * order to fix the data structures of the block matrix, it is then necessary - * to let it know that we have changed the sizes of the underlying - * matrices. For this, one has to call the collect_sizes() function, for much - * the same reason as is documented with the BlockSparsityPattern class. - * - * @ingroup Matrix1 - * @see @ref GlossBlockLA "Block (linear algebra)" - * @author Wolfgang Bangerth, 2004 - */ + /*! @addtogroup PETScWrappers + *@{ + */ + + /** + * Blocked sparse matrix based on the PETScWrappers::SparseMatrix class. This + * class implements the functions that are specific to the PETSc SparseMatrix + * base objects for a blocked sparse matrix, and leaves the actual work + * relaying most of the calls to the individual blocks to the functions + * implemented in the base class. See there also for a description of when + * this class is useful. + * + * In contrast to the deal.II-type SparseMatrix class, the PETSc matrices do + * not have external objects for the sparsity patterns. Thus, one does not + * determine the size of the individual blocks of a block matrix of this type + * by attaching a block sparsity pattern, but by calling reinit() to set the + * number of blocks and then by setting the size of each block separately. In + * order to fix the data structures of the block matrix, it is then necessary + * to let it know that we have changed the sizes of the underlying + * matrices. For this, one has to call the collect_sizes() function, for much + * the same reason as is documented with the BlockSparsityPattern class. + * + * @ingroup Matrix1 + * @see @ref GlossBlockLA "Block (linear algebra)" + * @author Wolfgang Bangerth, 2004 + */ class BlockSparseMatrix : public BlockMatrixBase { - public: - /** - * Typedef the base class for simpler - * access to its own typedefs. - */ - typedef BlockMatrixBase BaseClass; - - /** - * Typedef the type of the underlying - * matrix. - */ - typedef BaseClass::BlockType BlockType; - - /** - * Import the typedefs from the base - * class. - */ - typedef BaseClass::value_type value_type; - typedef BaseClass::pointer pointer; - typedef BaseClass::const_pointer const_pointer; - typedef BaseClass::reference reference; - typedef BaseClass::const_reference const_reference; - typedef BaseClass::size_type size_type; - typedef BaseClass::iterator iterator; - typedef BaseClass::const_iterator const_iterator; - - /** - * Constructor; initializes the - * matrix to be empty, without - * any structure, i.e. the - * matrix is not usable at - * all. This constructor is - * therefore only useful for - * matrices which are members of - * a class. All other matrices - * should be created at a point - * in the data flow where all - * necessary information is - * available. - * - * You have to initialize the - * matrix before usage with - * reinit(BlockSparsityPattern). The - * number of blocks per row and - * column are then determined by - * that function. - */ - BlockSparseMatrix (); - - /** - * Destructor. - */ - ~BlockSparseMatrix (); - - /** - * Pseudo copy operator only copying - * empty objects. The sizes of the block - * matrices need to be the same. - */ - BlockSparseMatrix & - operator = (const BlockSparseMatrix &); - - /** - * This operator assigns a scalar to a - * matrix. Since this does usually not - * make much sense (should we set all - * matrix entries to this value? Only - * the nonzero entries of the sparsity - * pattern?), this operation is only - * allowed if the actual value to be - * assigned is zero. This operator only - * exists to allow for the obvious - * notation matrix=0, which - * sets all elements of the matrix to - * zero, but keep the sparsity pattern - * previously used. - */ - BlockSparseMatrix & - operator = (const double d); - - /** - * Resize the matrix, by setting - * the number of block rows and - * columns. This deletes all - * blocks and replaces them by - * unitialized ones, i.e. ones - * for which also the sizes are - * not yet set. You have to do - * that by calling the @p reinit - * functions of the blocks - * themselves. Do not forget to - * call collect_sizes() after - * that on this object. - * - * The reason that you have to - * set sizes of the blocks - * yourself is that the sizes may - * be varying, the maximum number - * of elements per row may be - * varying, etc. It is simpler - * not to reproduce the interface - * of the @p SparsityPattern - * class here but rather let the - * user call whatever function - * she desires. - */ - void reinit (const unsigned int n_block_rows, - const unsigned int n_block_columns); - - /** - * This function collects the - * sizes of the sub-objects and - * stores them in internal - * arrays, in order to be able to - * relay global indices into the - * matrix to indices into the - * subobjects. You *must* call - * this function each time after - * you have changed the size of - * the sub-objects. - */ - void collect_sizes (); - - /** - * Matrix-vector multiplication: - * let $dst = M*src$ with $M$ - * being this matrix. - */ - void vmult (BlockVector &dst, - const BlockVector &src) const; - - /** - * Matrix-vector - * multiplication. Just like the - * previous function, but only - * applicable if the matrix has - * only one block column. - */ - void vmult (BlockVector &dst, - const Vector &src) const; - - /** - * Matrix-vector - * multiplication. Just like the - * previous function, but only - * applicable if the matrix has - * only one block row. - */ - void vmult (Vector &dst, - const BlockVector &src) const; - - /** - * Matrix-vector - * multiplication. Just like the - * previous function, but only - * applicable if the matrix has - * only one block. - */ - void vmult (Vector &dst, - const Vector &src) const; - - /** - * Matrix-vector multiplication: - * let $dst = M^T*src$ with $M$ - * being this matrix. This - * function does the same as - * vmult() but takes the - * transposed matrix. - */ - void Tvmult (BlockVector &dst, - const BlockVector &src) const; - - /** - * Matrix-vector - * multiplication. Just like the - * previous function, but only - * applicable if the matrix has - * only one block row. - */ - void Tvmult (BlockVector &dst, - const Vector &src) const; - - /** - * Matrix-vector - * multiplication. Just like the - * previous function, but only - * applicable if the matrix has - * only one block column. - */ - void Tvmult (Vector &dst, - const BlockVector &src) const; - - /** - * Matrix-vector - * multiplication. Just like the - * previous function, but only - * applicable if the matrix has - * only one block. - */ - void Tvmult (Vector &dst, - const Vector &src) const; - - /** - * Make the clear() function in the - * base class visible, though it is - * protected. - */ - using BlockMatrixBase::clear; - - /** @addtogroup Exceptions - * @{ - */ - - /** - * Exception - */ - DeclException4 (ExcIncompatibleRowNumbers, - int, int, int, int, - << "The blocks [" << arg1 << ',' << arg2 << "] and [" - << arg3 << ',' << arg4 << "] have differing row numbers."); - /** - * Exception - */ - DeclException4 (ExcIncompatibleColNumbers, - int, int, int, int, - << "The blocks [" << arg1 << ',' << arg2 << "] and [" - << arg3 << ',' << arg4 << "] have differing column numbers."); - ///@} + public: + /** + * Typedef the base class for simpler + * access to its own typedefs. + */ + typedef BlockMatrixBase BaseClass; + + /** + * Typedef the type of the underlying + * matrix. + */ + typedef BaseClass::BlockType BlockType; + + /** + * Import the typedefs from the base + * class. + */ + typedef BaseClass::value_type value_type; + typedef BaseClass::pointer pointer; + typedef BaseClass::const_pointer const_pointer; + typedef BaseClass::reference reference; + typedef BaseClass::const_reference const_reference; + typedef BaseClass::size_type size_type; + typedef BaseClass::iterator iterator; + typedef BaseClass::const_iterator const_iterator; + + /** + * Constructor; initializes the + * matrix to be empty, without + * any structure, i.e. the + * matrix is not usable at + * all. This constructor is + * therefore only useful for + * matrices which are members of + * a class. All other matrices + * should be created at a point + * in the data flow where all + * necessary information is + * available. + * + * You have to initialize the + * matrix before usage with + * reinit(BlockSparsityPattern). The + * number of blocks per row and + * column are then determined by + * that function. + */ + BlockSparseMatrix (); + + /** + * Destructor. + */ + ~BlockSparseMatrix (); + + /** + * Pseudo copy operator only copying + * empty objects. The sizes of the block + * matrices need to be the same. + */ + BlockSparseMatrix & + operator = (const BlockSparseMatrix &); + + /** + * This operator assigns a scalar to a + * matrix. Since this does usually not + * make much sense (should we set all + * matrix entries to this value? Only + * the nonzero entries of the sparsity + * pattern?), this operation is only + * allowed if the actual value to be + * assigned is zero. This operator only + * exists to allow for the obvious + * notation matrix=0, which + * sets all elements of the matrix to + * zero, but keep the sparsity pattern + * previously used. + */ + BlockSparseMatrix & + operator = (const double d); + + /** + * Resize the matrix, by setting + * the number of block rows and + * columns. This deletes all + * blocks and replaces them by + * unitialized ones, i.e. ones + * for which also the sizes are + * not yet set. You have to do + * that by calling the @p reinit + * functions of the blocks + * themselves. Do not forget to + * call collect_sizes() after + * that on this object. + * + * The reason that you have to + * set sizes of the blocks + * yourself is that the sizes may + * be varying, the maximum number + * of elements per row may be + * varying, etc. It is simpler + * not to reproduce the interface + * of the @p SparsityPattern + * class here but rather let the + * user call whatever function + * she desires. + */ + void reinit (const unsigned int n_block_rows, + const unsigned int n_block_columns); + + /** + * This function collects the + * sizes of the sub-objects and + * stores them in internal + * arrays, in order to be able to + * relay global indices into the + * matrix to indices into the + * subobjects. You *must* call + * this function each time after + * you have changed the size of + * the sub-objects. + */ + void collect_sizes (); + + /** + * Matrix-vector multiplication: + * let $dst = M*src$ with $M$ + * being this matrix. + */ + void vmult (BlockVector &dst, + const BlockVector &src) const; + + /** + * Matrix-vector + * multiplication. Just like the + * previous function, but only + * applicable if the matrix has + * only one block column. + */ + void vmult (BlockVector &dst, + const Vector &src) const; + + /** + * Matrix-vector + * multiplication. Just like the + * previous function, but only + * applicable if the matrix has + * only one block row. + */ + void vmult (Vector &dst, + const BlockVector &src) const; + + /** + * Matrix-vector + * multiplication. Just like the + * previous function, but only + * applicable if the matrix has + * only one block. + */ + void vmult (Vector &dst, + const Vector &src) const; + + /** + * Matrix-vector multiplication: + * let $dst = M^T*src$ with $M$ + * being this matrix. This + * function does the same as + * vmult() but takes the + * transposed matrix. + */ + void Tvmult (BlockVector &dst, + const BlockVector &src) const; + + /** + * Matrix-vector + * multiplication. Just like the + * previous function, but only + * applicable if the matrix has + * only one block row. + */ - void Tvmult (BlockVector &dst, ++ void Tvmult (BlockVector &dst, + const Vector &src) const; + + /** + * Matrix-vector + * multiplication. Just like the + * previous function, but only + * applicable if the matrix has + * only one block column. + */ + void Tvmult (Vector &dst, + const BlockVector &src) const; + + /** + * Matrix-vector + * multiplication. Just like the + * previous function, but only + * applicable if the matrix has + * only one block. + */ + void Tvmult (Vector &dst, + const Vector &src) const; + + /** + * Make the clear() function in the + * base class visible, though it is + * protected. + */ + using BlockMatrixBase::clear; + + /** @addtogroup Exceptions + * @{ + */ + + /** + * Exception + */ + DeclException4 (ExcIncompatibleRowNumbers, + int, int, int, int, + << "The blocks [" << arg1 << ',' << arg2 << "] and [" + << arg3 << ',' << arg4 << "] have differing row numbers."); + /** + * Exception + */ + DeclException4 (ExcIncompatibleColNumbers, + int, int, int, int, + << "The blocks [" << arg1 << ',' << arg2 << "] and [" + << arg3 << ',' << arg4 << "] have differing column numbers."); + ///@} }; @@@ -362,8 -362,8 +362,8 @@@ inline void - BlockSparseMatrix::Tvmult (BlockVector &dst, + BlockSparseMatrix::Tvmult (BlockVector &dst, - const Vector &src) const + const Vector &src) const { BaseClass::Tvmult_block_nonblock (dst, src); } diff --cc deal.II/include/deal.II/lac/petsc_block_vector.h index 73c3ecf561,07ea7dd923..8847069092 --- a/deal.II/include/deal.II/lac/petsc_block_vector.h +++ b/deal.II/include/deal.II/lac/petsc_block_vector.h @@@ -29,289 -29,289 +29,289 @@@ DEAL_II_NAMESPACE_OPE namespace PETScWrappers { - /*! @addtogroup PETScWrappers - *@{ - */ - - /** - * An implementation of block vectors based on the vector class implemented in - * PETScWrappers. While the base class provides for most of the interface, - * this class handles the actual allocation of vectors and provides functions - * that are specific to the underlying vector type. - * - * @ingroup Vectors - * @see @ref GlossBlockLA "Block (linear algebra)" - * @author Wolfgang Bangerth, 2004 - */ + /*! @addtogroup PETScWrappers + *@{ + */ + + /** + * An implementation of block vectors based on the vector class implemented in + * PETScWrappers. While the base class provides for most of the interface, + * this class handles the actual allocation of vectors and provides functions + * that are specific to the underlying vector type. + * + * @ingroup Vectors + * @see @ref GlossBlockLA "Block (linear algebra)" + * @author Wolfgang Bangerth, 2004 + */ class BlockVector : public BlockVectorBase { - public: - /** - * Typedef the base class for simpler - * access to its own typedefs. - */ - typedef BlockVectorBase BaseClass; - - /** - * Typedef the type of the underlying - * vector. - */ - typedef BaseClass::BlockType BlockType; - - /** - * Import the typedefs from the base - * class. - */ - typedef BaseClass::value_type value_type; - typedef BaseClass::pointer pointer; - typedef BaseClass::const_pointer const_pointer; - typedef BaseClass::reference reference; - typedef BaseClass::const_reference const_reference; - typedef BaseClass::size_type size_type; - typedef BaseClass::iterator iterator; - typedef BaseClass::const_iterator const_iterator; - - /** - * Constructor. There are three - * ways to use this - * constructor. First, without - * any arguments, it generates - * an object with no - * blocks. Given one argument, - * it initializes num_blocks - * blocks, but these blocks have - * size zero. The third variant - * finally initializes all - * blocks to the same size - * block_size. - * - * Confer the other constructor - * further down if you intend to - * use blocks of different - * sizes. - */ - explicit BlockVector (const unsigned int num_blocks = 0, - const unsigned int block_size = 0); - - /** - * Copy-Constructor. Dimension set to - * that of V, all components are copied - * from V - */ - BlockVector (const BlockVector &V); - - /** - * Copy-constructor: copy the values - * from a PETSc wrapper parallel block - * vector class. - * - * - * Note that due to the communication - * model of MPI, @em all processes have - * to actually perform this operation, - * even if they do not use the - * result. It is not sufficient if only - * one processor tries to copy the - * elements from the other processors - * over to its own process space. - */ - explicit BlockVector (const MPI::BlockVector &v); - - /** - * Constructor. Set the number of - * blocks to n.size() and - * initialize each block with - * n[i] zero elements. - */ - BlockVector (const std::vector &n); - - /** - * Constructor. Set the number of - * blocks to - * n.size(). Initialize the - * vector with the elements - * pointed to by the range of - * iterators given as second and - * third argument. Apart from the - * first argument, this - * constructor is in complete - * analogy to the respective - * constructor of the - * std::vector class, but the - * first argument is needed in - * order to know how to subdivide - * the block vector into - * different blocks. - */ - template - BlockVector (const std::vector &n, - const InputIterator first, - const InputIterator end); - - /** - * Destructor. Clears memory - */ - ~BlockVector (); - - /** - * Copy operator: fill all components of - * the vector with the given scalar - * value. - */ - BlockVector & operator = (const value_type s); - - /** - * Copy operator for arguments of the - * same type. - */ - BlockVector & - operator= (const BlockVector &V); - - /** - * Copy all the elements of the - * parallel block vector @p v into this - * local vector. Note that due to the - * communication model of MPI, @em all - * processes have to actually perform - * this operation, even if they do not - * use the result. It is not sufficient - * if only one processor tries to copy - * the elements from the other - * processors over to its own process - * space. - */ - BlockVector & - operator = (const MPI::BlockVector &v); - - /** - * Reinitialize the BlockVector to - * contain num_blocks blocks of - * size block_size each. - * - * If fast==false, the vector - * is filled with zeros. - */ - void reinit (const unsigned int num_blocks, - const unsigned int block_size, - const bool fast = false); - - /** - * Reinitialize the BlockVector such - * that it contains - * block_sizes.size() - * blocks. Each block is reinitialized - * to dimension - * block_sizes[i]. - * - * If the number of blocks is the - * same as before this function - * was called, all vectors remain - * the same and reinit() is - * called for each vector. - * - * If fast==false, the vector - * is filled with zeros. - * - * Note that you must call this - * (or the other reinit() - * functions) function, rather - * than calling the reinit() - * functions of an individual - * block, to allow the block - * vector to update its caches of - * vector sizes. If you call - * reinit() on one of the - * blocks, then subsequent - * actions on this object may - * yield unpredictable results - * since they may be routed to - * the wrong block. - */ - void reinit (const std::vector &N, - const bool fast=false); - - /** - * Change the dimension to that - * of the vector V. The same - * applies as for the other - * reinit() function. - * - * The elements of V are not - * copied, i.e. this function is - * the same as calling reinit - * (V.size(), fast). - * - * Note that you must call this - * (or the other reinit() - * functions) function, rather - * than calling the reinit() - * functions of an individual - * block, to allow the block - * vector to update its caches of - * vector sizes. If you call - * reinit() of one of the - * blocks, then subsequent - * actions of this object may - * yield unpredictable results - * since they may be routed to - * the wrong block. - */ - void reinit (const BlockVector &V, - const bool fast=false); - - /** - * Swap the contents of this - * vector and the other vector - * v. One could do this - * operation with a temporary - * variable and copying over the - * data elements, but this - * function is significantly more - * efficient since it only swaps - * the pointers to the data of - * the two vectors and therefore - * does not need to allocate - * temporary storage and move - * data around. - * - * Limitation: right now this - * function only works if both - * vectors have the same number - * of blocks. If needed, the - * numbers of blocks should be - * exchanged, too. - * - * This function is analog to the - * the swap() function of all C++ - * standard containers. Also, - * there is a global function - * swap(u,v) that simply calls - * u.swap(v), again in analogy - * to standard functions. - */ - void swap (BlockVector &v); - - /** - * Print to a stream. - */ - void print (std::ostream &out, - const unsigned int precision = 3, - const bool scientific = true, - const bool across = true) const; - - /** @addtogroup Exceptions - * @{ */ - - /** - * Exception - */ - DeclException0 (ExcIteratorRangeDoesNotMatchVectorSize); - ///@} + public: + /** + * Typedef the base class for simpler + * access to its own typedefs. + */ + typedef BlockVectorBase BaseClass; + + /** + * Typedef the type of the underlying + * vector. + */ + typedef BaseClass::BlockType BlockType; + + /** + * Import the typedefs from the base + * class. + */ + typedef BaseClass::value_type value_type; + typedef BaseClass::pointer pointer; + typedef BaseClass::const_pointer const_pointer; + typedef BaseClass::reference reference; + typedef BaseClass::const_reference const_reference; + typedef BaseClass::size_type size_type; + typedef BaseClass::iterator iterator; + typedef BaseClass::const_iterator const_iterator; + + /** + * Constructor. There are three + * ways to use this + * constructor. First, without + * any arguments, it generates + * an object with no + * blocks. Given one argument, + * it initializes num_blocks + * blocks, but these blocks have + * size zero. The third variant + * finally initializes all + * blocks to the same size + * block_size. + * + * Confer the other constructor + * further down if you intend to + * use blocks of different + * sizes. + */ + explicit BlockVector (const unsigned int num_blocks = 0, + const unsigned int block_size = 0); + + /** + * Copy-Constructor. Dimension set to + * that of V, all components are copied + * from V + */ - BlockVector (const BlockVector &V); ++ BlockVector (const BlockVector &V); + + /** + * Copy-constructor: copy the values + * from a PETSc wrapper parallel block + * vector class. + * + * + * Note that due to the communication + * model of MPI, @em all processes have + * to actually perform this operation, + * even if they do not use the + * result. It is not sufficient if only + * one processor tries to copy the + * elements from the other processors + * over to its own process space. + */ + explicit BlockVector (const MPI::BlockVector &v); + + /** + * Constructor. Set the number of + * blocks to n.size() and + * initialize each block with + * n[i] zero elements. + */ + BlockVector (const std::vector &n); + + /** + * Constructor. Set the number of + * blocks to + * n.size(). Initialize the + * vector with the elements + * pointed to by the range of + * iterators given as second and + * third argument. Apart from the + * first argument, this + * constructor is in complete + * analogy to the respective + * constructor of the + * std::vector class, but the + * first argument is needed in + * order to know how to subdivide + * the block vector into + * different blocks. + */ + template + BlockVector (const std::vector &n, + const InputIterator first, + const InputIterator end); + + /** + * Destructor. Clears memory + */ + ~BlockVector (); + + /** + * Copy operator: fill all components of + * the vector with the given scalar + * value. + */ + BlockVector &operator = (const value_type s); + + /** + * Copy operator for arguments of the + * same type. + */ + BlockVector & + operator= (const BlockVector &V); + + /** + * Copy all the elements of the + * parallel block vector @p v into this + * local vector. Note that due to the + * communication model of MPI, @em all + * processes have to actually perform + * this operation, even if they do not + * use the result. It is not sufficient + * if only one processor tries to copy + * the elements from the other + * processors over to its own process + * space. + */ + BlockVector & + operator = (const MPI::BlockVector &v); + + /** + * Reinitialize the BlockVector to + * contain num_blocks blocks of + * size block_size each. + * + * If fast==false, the vector + * is filled with zeros. + */ + void reinit (const unsigned int num_blocks, + const unsigned int block_size, + const bool fast = false); + + /** + * Reinitialize the BlockVector such + * that it contains + * block_sizes.size() + * blocks. Each block is reinitialized + * to dimension + * block_sizes[i]. + * + * If the number of blocks is the + * same as before this function + * was called, all vectors remain + * the same and reinit() is + * called for each vector. + * + * If fast==false, the vector + * is filled with zeros. + * + * Note that you must call this + * (or the other reinit() + * functions) function, rather + * than calling the reinit() + * functions of an individual + * block, to allow the block + * vector to update its caches of + * vector sizes. If you call + * reinit() on one of the + * blocks, then subsequent + * actions on this object may + * yield unpredictable results + * since they may be routed to + * the wrong block. + */ + void reinit (const std::vector &N, + const bool fast=false); + + /** + * Change the dimension to that + * of the vector V. The same + * applies as for the other + * reinit() function. + * + * The elements of V are not + * copied, i.e. this function is + * the same as calling reinit + * (V.size(), fast). + * + * Note that you must call this + * (or the other reinit() + * functions) function, rather + * than calling the reinit() + * functions of an individual + * block, to allow the block + * vector to update its caches of + * vector sizes. If you call + * reinit() of one of the + * blocks, then subsequent + * actions of this object may + * yield unpredictable results + * since they may be routed to + * the wrong block. + */ + void reinit (const BlockVector &V, + const bool fast=false); + + /** + * Swap the contents of this + * vector and the other vector + * v. One could do this + * operation with a temporary + * variable and copying over the + * data elements, but this + * function is significantly more + * efficient since it only swaps + * the pointers to the data of + * the two vectors and therefore + * does not need to allocate + * temporary storage and move + * data around. + * + * Limitation: right now this + * function only works if both + * vectors have the same number + * of blocks. If needed, the + * numbers of blocks should be + * exchanged, too. + * + * This function is analog to the + * the swap() function of all C++ + * standard containers. Also, + * there is a global function + * swap(u,v) that simply calls + * u.swap(v), again in analogy + * to standard functions. + */ + void swap (BlockVector &v); + + /** + * Print to a stream. + */ + void print (std::ostream &out, + const unsigned int precision = 3, + const bool scientific = true, + const bool across = true) const; + + /** @addtogroup Exceptions + * @{ */ + + /** + * Exception + */ + DeclException0 (ExcIteratorRangeDoesNotMatchVectorSize); + ///@} }; - /*@}*/ + /*@}*/ - /*----------------------- Inline functions ----------------------------------*/ + /*----------------------- Inline functions ----------------------------------*/ diff --cc deal.II/include/deal.II/lac/petsc_matrix_base.h index 03d6f676ba,a4bfe4d4c5..634ac7baf0 --- a/deal.II/include/deal.II/lac/petsc_matrix_base.h +++ b/deal.II/include/deal.II/lac/petsc_matrix_base.h @@@ -252,656 -252,656 +252,656 @@@ namespace PETScWrapper } - /** - * Base class for all matrix classes that are implemented on top of the PETSc - * matrix types. Since in PETSc all matrix types (i.e. sequential and - * parallel, sparse, blocked, etc.) are built by filling the contents of an - * abstract object that is only referenced through a pointer of a type that is - * independent of the actual matrix type, we can implement almost all - * functionality of matrices in this base class. Derived classes will then only - * have to provide the functionality to create one or the other kind of - * matrix. - * - * The interface of this class is modeled after the existing - * SparseMatrix class in deal.II. It has almost the same member - * functions, and is often exchangable. However, since PETSc only supports a - * single scalar type (either double, float, or a complex data type), it is - * not templated, and only works with whatever your PETSc installation has - * defined the data type PetscScalar to. - * - * Note that PETSc only guarantees that operations do what you expect if the - * functions @p MatAssemblyBegin and @p MatAssemblyEnd have been called - * after matrix assembly. Therefore, you need to call - * SparseMatrix::compress() before you actually use the matrix. This also - * calls @p MatCompress that compresses the storage format for sparse - * matrices by discarding unused elements. PETSc allows to continue with - * assembling the matrix after calls to these functions, but since there are - * no more free entries available after that any more, it is better to only - * call SparseMatrix::compress() once at the end of the assembly stage and - * before the matrix is actively used. - * - * @ingroup PETScWrappers - * @ingroup Matrix1 - * @author Wolfgang Bangerth, 2004 - */ + /** + * Base class for all matrix classes that are implemented on top of the PETSc + * matrix types. Since in PETSc all matrix types (i.e. sequential and + * parallel, sparse, blocked, etc.) are built by filling the contents of an + * abstract object that is only referenced through a pointer of a type that is + * independent of the actual matrix type, we can implement almost all + * functionality of matrices in this base class. Derived classes will then only + * have to provide the functionality to create one or the other kind of + * matrix. + * + * The interface of this class is modeled after the existing + * SparseMatrix class in deal.II. It has almost the same member + * functions, and is often exchangable. However, since PETSc only supports a + * single scalar type (either double, float, or a complex data type), it is + * not templated, and only works with whatever your PETSc installation has + * defined the data type PetscScalar to. + * + * Note that PETSc only guarantees that operations do what you expect if the + * functions @p MatAssemblyBegin and @p MatAssemblyEnd have been called + * after matrix assembly. Therefore, you need to call + * SparseMatrix::compress() before you actually use the matrix. This also + * calls @p MatCompress that compresses the storage format for sparse + * matrices by discarding unused elements. PETSc allows to continue with + * assembling the matrix after calls to these functions, but since there are + * no more free entries available after that any more, it is better to only + * call SparseMatrix::compress() once at the end of the assembly stage and + * before the matrix is actively used. + * + * @ingroup PETScWrappers + * @ingroup Matrix1 + * @author Wolfgang Bangerth, 2004 + */ class MatrixBase : public Subscriptor { - public: - /** - * Declare a typedef for the iterator - * class. - */ - typedef MatrixIterators::const_iterator const_iterator; - - /** - * Declare a typedef in analogy to all - * the other container classes. - */ - typedef PetscScalar value_type; - - /** - * Default constructor. - */ - MatrixBase (); - - /** - * Destructor. Made virtual so that one - * can use pointers to this class. - */ - virtual ~MatrixBase (); - - /** - * This operator assigns a scalar to a - * matrix. Since this does usually not - * make much sense (should we set all - * matrix entries to this value? Only - * the nonzero entries of the sparsity - * pattern?), this operation is only - * allowed if the actual value to be - * assigned is zero. This operator only - * exists to allow for the obvious - * notation matrix=0, which - * sets all elements of the matrix to - * zero, but keeps the sparsity pattern - * previously used. - */ - MatrixBase & - operator = (const value_type d); - /** - * Release all memory and return - * to a state just like after - * having called the default - * constructor. - */ - void clear (); - - /** - * Set the element (i,j) to @p - * value. - * - * If the present object (from a - * derived class of this one) happens - * to be a sparse matrix, then this - * function adds a new entry to the - * matrix if it didn't exist before, - * very much in contrast to the - * SparseMatrix class which throws an - * error if the entry does not exist. - * If value is not a finite - * number an exception is thrown. - */ - void set (const unsigned int i, - const unsigned int j, - const PetscScalar value); - - /** - * Set all elements given in a - * FullMatrix into the sparse - * matrix locations given by - * indices. In other words, - * this function writes the elements - * in full_matrix into the - * calling matrix, using the - * local-to-global indexing specified - * by indices for both the - * rows and the columns of the - * matrix. This function assumes a - * quadratic sparse matrix and a - * quadratic full_matrix, the usual - * situation in FE calculations. - * - * If the present object (from a - * derived class of this one) happens - * to be a sparse matrix, then this - * function adds some new entries to - * the matrix if they didn't exist - * before, very much in contrast to - * the SparseMatrix class which - * throws an error if the entry does - * not exist. - * - * The optional parameter - * elide_zero_values can be - * used to specify whether zero - * values should be inserted anyway - * or they should be filtered - * away. The default value is - * false, i.e., even zero - * values are inserted/replaced. - */ - void set (const std::vector &indices, - const FullMatrix &full_matrix, - const bool elide_zero_values = false); - - /** - * Same function as before, but now - * including the possibility to use - * rectangular full_matrices and - * different local-to-global indexing - * on rows and columns, respectively. - */ - void set (const std::vector &row_indices, - const std::vector &col_indices, - const FullMatrix &full_matrix, - const bool elide_zero_values = false); - - /** - * Set several elements in the - * specified row of the matrix with - * column indices as given by - * col_indices to the - * respective value. - * - * If the present object (from a - * derived class of this one) happens - * to be a sparse matrix, then this - * function adds some new entries to - * the matrix if they didn't exist - * before, very much in contrast to - * the SparseMatrix class which - * throws an error if the entry does - * not exist. - * - * The optional parameter - * elide_zero_values can be - * used to specify whether zero - * values should be inserted anyway - * or they should be filtered - * away. The default value is - * false, i.e., even zero - * values are inserted/replaced. - */ - void set (const unsigned int row, - const std::vector &col_indices, - const std::vector &values, - const bool elide_zero_values = false); - - /** - * Set several elements to values - * given by values in a - * given row in columns given by - * col_indices into the sparse - * matrix. - * - * If the present object (from a - * derived class of this one) happens - * to be a sparse matrix, then this - * function adds some new entries to - * the matrix if they didn't exist - * before, very much in contrast to - * the SparseMatrix class which - * throws an error if the entry does - * not exist. - * - * The optional parameter - * elide_zero_values can be - * used to specify whether zero - * values should be inserted anyway - * or they should be filtered - * away. The default value is - * false, i.e., even zero - * values are inserted/replaced. - */ - void set (const unsigned int row, - const unsigned int n_cols, - const unsigned int *col_indices, - const PetscScalar *values, - const bool elide_zero_values = false); - - /** - * Add @p value to the element - * (i,j). - * - * If the present object (from a - * derived class of this one) happens - * to be a sparse matrix, then this - * function adds a new entry to the - * matrix if it didn't exist before, - * very much in contrast to the - * SparseMatrix class which throws an - * error if the entry does not exist. - * If value is not a finite - * number an exception is thrown. - */ - void add (const unsigned int i, - const unsigned int j, - const PetscScalar value); - - /** - * Add all elements given in a - * FullMatrix into sparse - * matrix locations given by - * indices. In other words, - * this function adds the elements in - * full_matrix to the - * respective entries in calling - * matrix, using the local-to-global - * indexing specified by - * indices for both the rows - * and the columns of the - * matrix. This function assumes a - * quadratic sparse matrix and a - * quadratic full_matrix, the usual - * situation in FE calculations. - * - * If the present object (from a - * derived class of this one) happens - * to be a sparse matrix, then this - * function adds some new entries to - * the matrix if they didn't exist - * before, very much in contrast to - * the SparseMatrix class which - * throws an error if the entry does - * not exist. - * - * The optional parameter - * elide_zero_values can be - * used to specify whether zero - * values should be added anyway or - * these should be filtered away and - * only non-zero data is added. The - * default value is true, - * i.e., zero values won't be added - * into the matrix. - */ - void add (const std::vector &indices, - const FullMatrix &full_matrix, - const bool elide_zero_values = true); - - /** - * Same function as before, but now - * including the possibility to use - * rectangular full_matrices and - * different local-to-global indexing - * on rows and columns, respectively. - */ - void add (const std::vector &row_indices, - const std::vector &col_indices, - const FullMatrix &full_matrix, - const bool elide_zero_values = true); - - /** - * Set several elements in the - * specified row of the matrix with - * column indices as given by - * col_indices to the - * respective value. - * - * If the present object (from a - * derived class of this one) happens - * to be a sparse matrix, then this - * function adds some new entries to - * the matrix if they didn't exist - * before, very much in contrast to - * the SparseMatrix class which - * throws an error if the entry does - * not exist. - * - * The optional parameter - * elide_zero_values can be - * used to specify whether zero - * values should be added anyway or - * these should be filtered away and - * only non-zero data is added. The - * default value is true, - * i.e., zero values won't be added - * into the matrix. - */ - void add (const unsigned int row, - const std::vector &col_indices, - const std::vector &values, - const bool elide_zero_values = true); - - /** - * Add an array of values given by - * values in the given - * global matrix row at columns - * specified by col_indices in the - * sparse matrix. - * - * If the present object (from a - * derived class of this one) happens - * to be a sparse matrix, then this - * function adds some new entries to - * the matrix if they didn't exist - * before, very much in contrast to - * the SparseMatrix class which - * throws an error if the entry does - * not exist. - * - * The optional parameter - * elide_zero_values can be - * used to specify whether zero - * values should be added anyway or - * these should be filtered away and - * only non-zero data is added. The - * default value is true, - * i.e., zero values won't be added - * into the matrix. - */ - void add (const unsigned int row, - const unsigned int n_cols, - const unsigned int *col_indices, - const PetscScalar *values, - const bool elide_zero_values = true, - const bool col_indices_are_sorted = false); - - /** - * Remove all elements from - * this row by setting - * them to zero. The function - * does not modify the number - * of allocated nonzero - * entries, it only sets some - * entries to zero. It may drop - * them from the sparsity - * pattern, though (but retains - * the allocated memory in case - * new entries are again added - * later). - * - * This operation is used in - * eliminating constraints (e.g. due to - * hanging nodes) and makes sure that - * we can write this modification to - * the matrix without having to read - * entries (such as the locations of - * non-zero elements) from it -- - * without this operation, removing - * constraints on parallel matrices is - * a rather complicated procedure. - * - * The second parameter can be used to - * set the diagonal entry of this row - * to a value different from zero. The - * default is to set it to zero. - */ - void clear_row (const unsigned int row, - const PetscScalar new_diag_value = 0); - - /** - * Same as clear_row(), except that it - * works on a number of rows at once. - * - * The second parameter can be used to - * set the diagonal entries of all - * cleared rows to something different - * from zero. Note that all of these - * diagonal entries get the same value - * -- if you want different values for - * the diagonal entries, you have to - * set them by hand. - */ - void clear_rows (const std::vector &rows, - const PetscScalar new_diag_value = 0); - - /** - * PETSc matrices store their own - * sparsity patterns. So, in analogy to - * our own SparsityPattern class, - * this function compresses the - * sparsity pattern and allows the - * resulting matrix to be used in all - * other operations where before only - * assembly functions were - * allowed. This function must - * therefore be called once you have - * assembled the matrix. - * - * See @ref GlossCompress "Compressing distributed objects" - * for more information. - * more information. - */ - void compress (::dealii::VectorOperation::values operation - =::dealii::VectorOperation::unknown); - /** - * Return the value of the entry - * (i,j). This may be an - * expensive operation and you should - * always take care where to call this - * function. In contrast to the - * respective function in the - * @p MatrixBase class, we don't - * throw an exception if the respective - * entry doesn't exist in the sparsity - * pattern of this class, since PETSc - * does not transmit this information. - * - * This function is therefore exactly - * equivalent to the el() function. - */ - PetscScalar operator () (const unsigned int i, - const unsigned int j) const; - - /** - * Return the value of the matrix entry - * (i,j). If this entry does not - * exist in the sparsity pattern, then - * zero is returned. While this may be - * convenient in some cases, note that - * it is simple to write algorithms - * that are slow compared to an optimal - * solution, since the sparsity of the - * matrix is not used. - */ - PetscScalar el (const unsigned int i, - const unsigned int j) const; - - /** - * Return the main diagonal - * element in the ith - * row. This function throws an - * error if the matrix is not - * quadratic. - * - * Since we do not have direct access - * to the underlying data structure, - * this function is no faster than the - * elementwise access using the el() - * function. However, we provide this - * function for compatibility with the - * SparseMatrix class. - */ - PetscScalar diag_element (const unsigned int i) const; - - /** - * Return the number of rows in this - * matrix. - */ - unsigned int m () const; - - /** - * Return the number of columns in this - * matrix. - */ - unsigned int n () const; - - /** - * Return the local dimension of the - * matrix, i.e. the number of rows - * stored on the present MPI - * process. For sequential matrices, - * this number is the same as m(), - * but for parallel matrices it may be - * smaller. - * - * To figure out which elements - * exactly are stored locally, - * use local_range(). - */ - unsigned int local_size () const; - - /** - * Return a pair of indices - * indicating which rows of - * this matrix are stored - * locally. The first number is - * the index of the first - * row stored, the second - * the index of the one past - * the last one that is stored - * locally. If this is a - * sequential matrix, then the - * result will be the pair - * (0,m()), otherwise it will be - * a pair (i,i+n), where - * n=local_size(). - */ - std::pair - local_range () const; - - /** - * Return whether @p index is - * in the local range or not, - * see also local_range(). - */ - bool in_local_range (const unsigned int index) const; - - /** - * Return a reference to the MPI - * communicator object in use with this - * matrix. This function has to be - * implemented in derived classes. - */ - virtual const MPI_Comm & get_mpi_communicator () const = 0; - - /** - * Return the number of nonzero - * elements of this - * matrix. Actually, it returns - * the number of entries in the - * sparsity pattern; if any of - * the entries should happen to - * be zero, it is counted anyway. - */ - unsigned int n_nonzero_elements () const; - - /** - * Number of entries in a specific row. - */ - unsigned int row_length (const unsigned int row) const; - - /** - * Return the l1-norm of the matrix, that is - * $|M|_1=max_{all columns j}\sum_{all - * rows i} |M_ij|$, - * (max. sum of columns). - * This is the - * natural matrix norm that is compatible - * to the l1-norm for vectors, i.e. - * $|Mv|_1\leq |M|_1 |v|_1$. - * (cf. Haemmerlin-Hoffmann: - * Numerische Mathematik) - */ - PetscReal l1_norm () const; - - /** - * Return the linfty-norm of the - * matrix, that is - * $|M|_infty=max_{all rows i}\sum_{all - * columns j} |M_ij|$, - * (max. sum of rows). - * This is the - * natural matrix norm that is compatible - * to the linfty-norm of vectors, i.e. - * $|Mv|_infty \leq |M|_infty |v|_infty$. - * (cf. Haemmerlin-Hoffmann: - * Numerische Mathematik) - */ - PetscReal linfty_norm () const; - - /** - * Return the frobenius norm of the - * matrix, i.e. the square root of the - * sum of squares of all entries in the - * matrix. - */ - PetscReal frobenius_norm () const; - - - /** - * Return the square of the norm - * of the vector $v$ with respect - * to the norm induced by this - * matrix, - * i.e. $\left(v,Mv\right)$. This - * is useful, e.g. in the finite - * element context, where the - * $L_2$ norm of a function - * equals the matrix norm with - * respect to the mass matrix of - * the vector representing the - * nodal values of the finite - * element function. - * - * Obviously, the matrix needs to - * be quadratic for this operation. - * - * The implementation of this function - * is not as efficient as the one in - * the @p MatrixBase class used in - * deal.II (i.e. the original one, not - * the PETSc wrapper class) since PETSc - * doesn't support this operation and - * needs a temporary vector. - * - * Note that if the current object - * represents a parallel distributed - * matrix (of type - * PETScWrappers::MPI::SparseMatrix), - * then the given vector has to be - * a distributed vector as - * well. Conversely, if the matrix is - * not distributed, then neither - * may the vector be. - */ - PetscScalar matrix_norm_square (const VectorBase &v) const; - - - /** - * Compute the matrix scalar - * product $\left(u,Mv\right)$. - * - * The implementation of this function - * is not as efficient as the one in - * the @p MatrixBase class used in - * deal.II (i.e. the original one, not - * the PETSc wrapper class) since PETSc - * doesn't support this operation and - * needs a temporary vector. - * - * Note that if the current object - * represents a parallel distributed - * matrix (of type - * PETScWrappers::MPI::SparseMatrix), - * then both vectors have to be - * distributed vectors as - * well. Conversely, if the matrix is - * not distributed, then neither of the - * vectors may be. - */ - PetscScalar matrix_scalar_product (const VectorBase &u, - const VectorBase &v) const; + public: + /** + * Declare a typedef for the iterator + * class. + */ + typedef MatrixIterators::const_iterator const_iterator; + + /** + * Declare a typedef in analogy to all + * the other container classes. + */ + typedef PetscScalar value_type; + + /** + * Default constructor. + */ + MatrixBase (); + + /** + * Destructor. Made virtual so that one + * can use pointers to this class. + */ + virtual ~MatrixBase (); + + /** + * This operator assigns a scalar to a + * matrix. Since this does usually not + * make much sense (should we set all + * matrix entries to this value? Only + * the nonzero entries of the sparsity + * pattern?), this operation is only + * allowed if the actual value to be + * assigned is zero. This operator only + * exists to allow for the obvious + * notation matrix=0, which + * sets all elements of the matrix to + * zero, but keeps the sparsity pattern + * previously used. + */ + MatrixBase & + operator = (const value_type d); + /** + * Release all memory and return + * to a state just like after + * having called the default + * constructor. + */ + void clear (); + + /** + * Set the element (i,j) to @p + * value. + * + * If the present object (from a + * derived class of this one) happens + * to be a sparse matrix, then this + * function adds a new entry to the + * matrix if it didn't exist before, + * very much in contrast to the + * SparseMatrix class which throws an + * error if the entry does not exist. + * If value is not a finite + * number an exception is thrown. + */ + void set (const unsigned int i, + const unsigned int j, + const PetscScalar value); + + /** + * Set all elements given in a + * FullMatrix into the sparse + * matrix locations given by + * indices. In other words, + * this function writes the elements + * in full_matrix into the + * calling matrix, using the + * local-to-global indexing specified + * by indices for both the + * rows and the columns of the + * matrix. This function assumes a + * quadratic sparse matrix and a + * quadratic full_matrix, the usual + * situation in FE calculations. + * + * If the present object (from a + * derived class of this one) happens + * to be a sparse matrix, then this + * function adds some new entries to + * the matrix if they didn't exist + * before, very much in contrast to + * the SparseMatrix class which + * throws an error if the entry does + * not exist. + * + * The optional parameter + * elide_zero_values can be + * used to specify whether zero + * values should be inserted anyway + * or they should be filtered + * away. The default value is + * false, i.e., even zero + * values are inserted/replaced. + */ + void set (const std::vector &indices, + const FullMatrix &full_matrix, + const bool elide_zero_values = false); + + /** + * Same function as before, but now + * including the possibility to use + * rectangular full_matrices and + * different local-to-global indexing + * on rows and columns, respectively. + */ + void set (const std::vector &row_indices, + const std::vector &col_indices, + const FullMatrix &full_matrix, + const bool elide_zero_values = false); + + /** + * Set several elements in the + * specified row of the matrix with + * column indices as given by + * col_indices to the + * respective value. + * + * If the present object (from a + * derived class of this one) happens + * to be a sparse matrix, then this + * function adds some new entries to + * the matrix if they didn't exist + * before, very much in contrast to + * the SparseMatrix class which + * throws an error if the entry does + * not exist. + * + * The optional parameter + * elide_zero_values can be + * used to specify whether zero + * values should be inserted anyway + * or they should be filtered + * away. The default value is + * false, i.e., even zero + * values are inserted/replaced. + */ + void set (const unsigned int row, + const std::vector &col_indices, - const std::vector &values, ++ const std::vector &values, + const bool elide_zero_values = false); + + /** + * Set several elements to values + * given by values in a + * given row in columns given by + * col_indices into the sparse + * matrix. + * + * If the present object (from a + * derived class of this one) happens + * to be a sparse matrix, then this + * function adds some new entries to + * the matrix if they didn't exist + * before, very much in contrast to + * the SparseMatrix class which + * throws an error if the entry does + * not exist. + * + * The optional parameter + * elide_zero_values can be + * used to specify whether zero + * values should be inserted anyway + * or they should be filtered + * away. The default value is + * false, i.e., even zero + * values are inserted/replaced. + */ + void set (const unsigned int row, + const unsigned int n_cols, + const unsigned int *col_indices, - const PetscScalar *values, ++ const PetscScalar *values, + const bool elide_zero_values = false); + + /** + * Add @p value to the element + * (i,j). + * + * If the present object (from a + * derived class of this one) happens + * to be a sparse matrix, then this + * function adds a new entry to the + * matrix if it didn't exist before, + * very much in contrast to the + * SparseMatrix class which throws an + * error if the entry does not exist. + * If value is not a finite + * number an exception is thrown. + */ + void add (const unsigned int i, + const unsigned int j, + const PetscScalar value); + + /** + * Add all elements given in a + * FullMatrix into sparse + * matrix locations given by + * indices. In other words, + * this function adds the elements in + * full_matrix to the + * respective entries in calling + * matrix, using the local-to-global + * indexing specified by + * indices for both the rows + * and the columns of the + * matrix. This function assumes a + * quadratic sparse matrix and a + * quadratic full_matrix, the usual + * situation in FE calculations. + * + * If the present object (from a + * derived class of this one) happens + * to be a sparse matrix, then this + * function adds some new entries to + * the matrix if they didn't exist + * before, very much in contrast to + * the SparseMatrix class which + * throws an error if the entry does + * not exist. + * + * The optional parameter + * elide_zero_values can be + * used to specify whether zero + * values should be added anyway or + * these should be filtered away and + * only non-zero data is added. The + * default value is true, + * i.e., zero values won't be added + * into the matrix. + */ + void add (const std::vector &indices, + const FullMatrix &full_matrix, + const bool elide_zero_values = true); + + /** + * Same function as before, but now + * including the possibility to use + * rectangular full_matrices and + * different local-to-global indexing + * on rows and columns, respectively. + */ + void add (const std::vector &row_indices, + const std::vector &col_indices, + const FullMatrix &full_matrix, + const bool elide_zero_values = true); + + /** + * Set several elements in the + * specified row of the matrix with + * column indices as given by + * col_indices to the + * respective value. + * + * If the present object (from a + * derived class of this one) happens + * to be a sparse matrix, then this + * function adds some new entries to + * the matrix if they didn't exist + * before, very much in contrast to + * the SparseMatrix class which + * throws an error if the entry does + * not exist. + * + * The optional parameter + * elide_zero_values can be + * used to specify whether zero + * values should be added anyway or + * these should be filtered away and + * only non-zero data is added. The + * default value is true, + * i.e., zero values won't be added + * into the matrix. + */ + void add (const unsigned int row, + const std::vector &col_indices, - const std::vector &values, ++ const std::vector &values, + const bool elide_zero_values = true); + + /** + * Add an array of values given by + * values in the given + * global matrix row at columns + * specified by col_indices in the + * sparse matrix. + * + * If the present object (from a + * derived class of this one) happens + * to be a sparse matrix, then this + * function adds some new entries to + * the matrix if they didn't exist + * before, very much in contrast to + * the SparseMatrix class which + * throws an error if the entry does + * not exist. + * + * The optional parameter + * elide_zero_values can be + * used to specify whether zero + * values should be added anyway or + * these should be filtered away and + * only non-zero data is added. The + * default value is true, + * i.e., zero values won't be added + * into the matrix. + */ + void add (const unsigned int row, + const unsigned int n_cols, + const unsigned int *col_indices, - const PetscScalar *values, ++ const PetscScalar *values, + const bool elide_zero_values = true, + const bool col_indices_are_sorted = false); + + /** + * Remove all elements from + * this row by setting + * them to zero. The function + * does not modify the number + * of allocated nonzero + * entries, it only sets some + * entries to zero. It may drop + * them from the sparsity + * pattern, though (but retains + * the allocated memory in case + * new entries are again added + * later). + * + * This operation is used in + * eliminating constraints (e.g. due to + * hanging nodes) and makes sure that + * we can write this modification to + * the matrix without having to read + * entries (such as the locations of + * non-zero elements) from it -- + * without this operation, removing + * constraints on parallel matrices is + * a rather complicated procedure. + * + * The second parameter can be used to + * set the diagonal entry of this row + * to a value different from zero. The + * default is to set it to zero. + */ + void clear_row (const unsigned int row, + const PetscScalar new_diag_value = 0); + + /** + * Same as clear_row(), except that it + * works on a number of rows at once. + * + * The second parameter can be used to + * set the diagonal entries of all + * cleared rows to something different + * from zero. Note that all of these + * diagonal entries get the same value + * -- if you want different values for + * the diagonal entries, you have to + * set them by hand. + */ + void clear_rows (const std::vector &rows, + const PetscScalar new_diag_value = 0); + + /** + * PETSc matrices store their own + * sparsity patterns. So, in analogy to + * our own SparsityPattern class, + * this function compresses the + * sparsity pattern and allows the + * resulting matrix to be used in all + * other operations where before only + * assembly functions were + * allowed. This function must + * therefore be called once you have + * assembled the matrix. + * + * See @ref GlossCompress "Compressing distributed objects" + * for more information. + * more information. + */ + void compress (::dealii::VectorOperation::values operation + =::dealii::VectorOperation::unknown); + /** + * Return the value of the entry + * (i,j). This may be an + * expensive operation and you should + * always take care where to call this + * function. In contrast to the + * respective function in the + * @p MatrixBase class, we don't + * throw an exception if the respective + * entry doesn't exist in the sparsity + * pattern of this class, since PETSc + * does not transmit this information. + * + * This function is therefore exactly + * equivalent to the el() function. + */ + PetscScalar operator () (const unsigned int i, + const unsigned int j) const; + + /** + * Return the value of the matrix entry + * (i,j). If this entry does not + * exist in the sparsity pattern, then + * zero is returned. While this may be + * convenient in some cases, note that + * it is simple to write algorithms + * that are slow compared to an optimal + * solution, since the sparsity of the + * matrix is not used. + */ + PetscScalar el (const unsigned int i, + const unsigned int j) const; + + /** + * Return the main diagonal + * element in the ith + * row. This function throws an + * error if the matrix is not + * quadratic. + * + * Since we do not have direct access + * to the underlying data structure, + * this function is no faster than the + * elementwise access using the el() + * function. However, we provide this + * function for compatibility with the + * SparseMatrix class. + */ + PetscScalar diag_element (const unsigned int i) const; + + /** + * Return the number of rows in this + * matrix. + */ + unsigned int m () const; + + /** + * Return the number of columns in this + * matrix. + */ + unsigned int n () const; + + /** + * Return the local dimension of the + * matrix, i.e. the number of rows + * stored on the present MPI + * process. For sequential matrices, + * this number is the same as m(), + * but for parallel matrices it may be + * smaller. + * + * To figure out which elements + * exactly are stored locally, + * use local_range(). + */ + unsigned int local_size () const; + + /** + * Return a pair of indices + * indicating which rows of + * this matrix are stored + * locally. The first number is + * the index of the first + * row stored, the second + * the index of the one past + * the last one that is stored + * locally. If this is a + * sequential matrix, then the + * result will be the pair + * (0,m()), otherwise it will be + * a pair (i,i+n), where + * n=local_size(). + */ + std::pair + local_range () const; + + /** + * Return whether @p index is + * in the local range or not, + * see also local_range(). + */ + bool in_local_range (const unsigned int index) const; + + /** + * Return a reference to the MPI + * communicator object in use with this + * matrix. This function has to be + * implemented in derived classes. + */ + virtual const MPI_Comm &get_mpi_communicator () const = 0; + + /** + * Return the number of nonzero + * elements of this + * matrix. Actually, it returns + * the number of entries in the + * sparsity pattern; if any of + * the entries should happen to + * be zero, it is counted anyway. + */ + unsigned int n_nonzero_elements () const; + + /** + * Number of entries in a specific row. + */ + unsigned int row_length (const unsigned int row) const; + + /** + * Return the l1-norm of the matrix, that is + * $|M|_1=max_{all columns j}\sum_{all + * rows i} |M_ij|$, + * (max. sum of columns). + * This is the + * natural matrix norm that is compatible + * to the l1-norm for vectors, i.e. + * $|Mv|_1\leq |M|_1 |v|_1$. + * (cf. Haemmerlin-Hoffmann: + * Numerische Mathematik) + */ + PetscReal l1_norm () const; + + /** + * Return the linfty-norm of the + * matrix, that is + * $|M|_infty=max_{all rows i}\sum_{all + * columns j} |M_ij|$, + * (max. sum of rows). + * This is the + * natural matrix norm that is compatible + * to the linfty-norm of vectors, i.e. + * $|Mv|_infty \leq |M|_infty |v|_infty$. + * (cf. Haemmerlin-Hoffmann: + * Numerische Mathematik) + */ + PetscReal linfty_norm () const; + + /** + * Return the frobenius norm of the + * matrix, i.e. the square root of the + * sum of squares of all entries in the + * matrix. + */ + PetscReal frobenius_norm () const; + + + /** + * Return the square of the norm + * of the vector $v$ with respect + * to the norm induced by this + * matrix, + * i.e. $\left(v,Mv\right)$. This + * is useful, e.g. in the finite + * element context, where the + * $L_2$ norm of a function + * equals the matrix norm with + * respect to the mass matrix of + * the vector representing the + * nodal values of the finite + * element function. + * + * Obviously, the matrix needs to + * be quadratic for this operation. + * + * The implementation of this function + * is not as efficient as the one in + * the @p MatrixBase class used in + * deal.II (i.e. the original one, not + * the PETSc wrapper class) since PETSc + * doesn't support this operation and + * needs a temporary vector. + * + * Note that if the current object + * represents a parallel distributed + * matrix (of type + * PETScWrappers::MPI::SparseMatrix), + * then the given vector has to be + * a distributed vector as + * well. Conversely, if the matrix is + * not distributed, then neither + * may the vector be. + */ + PetscScalar matrix_norm_square (const VectorBase &v) const; + + + /** + * Compute the matrix scalar + * product $\left(u,Mv\right)$. + * + * The implementation of this function + * is not as efficient as the one in + * the @p MatrixBase class used in + * deal.II (i.e. the original one, not + * the PETSc wrapper class) since PETSc + * doesn't support this operation and + * needs a temporary vector. + * + * Note that if the current object + * represents a parallel distributed + * matrix (of type + * PETScWrappers::MPI::SparseMatrix), + * then both vectors have to be + * distributed vectors as + * well. Conversely, if the matrix is + * not distributed, then neither of the + * vectors may be. + */ + PetscScalar matrix_scalar_product (const VectorBase &u, + const VectorBase &v) const; #if DEAL_II_PETSC_VERSION_GTE(3,1,0) diff --cc deal.II/include/deal.II/lac/petsc_matrix_free.h index 3bb9e6fd57,8c99333fa1..2eec3536b7 --- a/deal.II/include/deal.II/lac/petsc_matrix_free.h +++ b/deal.II/include/deal.II/lac/petsc_matrix_free.h @@@ -30,329 -30,329 +30,329 @@@ DEAL_II_NAMESPACE_OPE namespace PETScWrappers { - /** - * Implementation of a parallel matrix class based on PETSc MatShell matrix-type. - * This base class implements only the interface to the PETSc matrix object, - * while all the functionality is contained in the matrix-vector - * multiplication which must be reimplmented in derived classes. - * - * This interface is an addition to the dealii::MatrixFree class to realize - * user-defined matrix-classes together with PETSc solvers and functionalities. - * See also the documentation of dealii::MatrixFree class and step-37 and step-48. - * - * Similar to other matrix classes in namespaces PETScWrappers and PETScWrappers::MPI, - * the MatrxiFree class provides the usual matrix-vector multiplication - * vmult(VectorBase &dst, const VectorBase &src) - * which is pure virtual and must be reimplemented in derived classes. - * Besides the usual interface, this class has a matrix-vector multiplication - * vmult(Vec &dst, const Vec &src) - * taking PETSc Vec objects, which will be called by - * matrix_free_mult(Mat A, Vec src, Vec dst) - * registered as matrix-vector multiplication of this PETSc matrix object. - * The default implementation of the vmult function in the base class translates - * the given PETSc Vec* vectors into a deal.II vector, calls - * the usual vmult function with the usual interface and converts - * the result back to PETSc Vec*. This could be made much more efficient - * in derived classes without allocating new memory. - * - * @ingroup PETScWrappers - * @ingroup Matrix1 - * @author Wolfgang Bangerth, Martin Steigemann, 2012 - */ + /** + * Implementation of a parallel matrix class based on PETSc MatShell matrix-type. + * This base class implements only the interface to the PETSc matrix object, + * while all the functionality is contained in the matrix-vector + * multiplication which must be reimplmented in derived classes. + * + * This interface is an addition to the dealii::MatrixFree class to realize + * user-defined matrix-classes together with PETSc solvers and functionalities. + * See also the documentation of dealii::MatrixFree class and step-37 and step-48. + * + * Similar to other matrix classes in namespaces PETScWrappers and PETScWrappers::MPI, + * the MatrxiFree class provides the usual matrix-vector multiplication + * vmult(VectorBase &dst, const VectorBase &src) + * which is pure virtual and must be reimplemented in derived classes. + * Besides the usual interface, this class has a matrix-vector multiplication + * vmult(Vec &dst, const Vec &src) + * taking PETSc Vec objects, which will be called by + * matrix_free_mult(Mat A, Vec src, Vec dst) + * registered as matrix-vector multiplication of this PETSc matrix object. + * The default implementation of the vmult function in the base class translates + * the given PETSc Vec* vectors into a deal.II vector, calls + * the usual vmult function with the usual interface and converts + * the result back to PETSc Vec*. This could be made much more efficient + * in derived classes without allocating new memory. + * + * @ingroup PETScWrappers + * @ingroup Matrix1 + * @author Wolfgang Bangerth, Martin Steigemann, 2012 + */ class MatrixFree : public MatrixBase { - public: - - /** - * Default constructor. Create an - * empty matrix object. - */ - MatrixFree (); - - /** - * Create a matrix object of - * dimensions @p m times @p n - * with communication happening - * over the provided @p communicator. - * - * For the meaning of the @p local_rows - * and @p local_columns parameters, - * see the PETScWrappers::MPI::SparseMatrix - * class documentation. - * - * As other PETSc matrices, also the - * the matrix-free object needs to - * have a size and to perform matrix - * vector multiplications efficiently - * in parallel also @p local_rows - * and @p local_columns. But in contrast - * to PETSc::SparseMatrix classes a - * PETSc matrix-free object does not need - * any estimation of non_zero entries - * and has no option is_symmetric. - */ - MatrixFree (const MPI_Comm &communicator, - const unsigned int m, - const unsigned int n, - const unsigned int local_rows, - const unsigned int local_columns); - - /** - * Create a matrix object of - * dimensions @p m times @p n - * with communication happening - * over the provided @p communicator. - * - * As other PETSc matrices, also the - * the matrix-free object needs to - * have a size and to perform matrix - * vector multiplications efficiently - * in parallel also @p local_rows - * and @p local_columns. But in contrast - * to PETSc::SparseMatrix classes a - * PETSc matrix-free object does not need - * any estimation of non_zero entries - * and has no option is_symmetric. - */ - MatrixFree (const MPI_Comm &communicator, - const unsigned int m, - const unsigned int n, - const std::vector &local_rows_per_process, - const std::vector &local_columns_per_process, - const unsigned int this_process); - - /** - * Constructor for the serial case: - * Same function as - * MatrixFree(), see above, - * with communicator = MPI_COMM_WORLD. - */ - MatrixFree (const unsigned int m, - const unsigned int n, - const unsigned int local_rows, - const unsigned int local_columns); - - /** - * Constructor for the serial case: - * Same function as - * MatrixFree(), see above, - * with communicator = MPI_COMM_WORLD. - */ - MatrixFree (const unsigned int m, - const unsigned int n, - const std::vector &local_rows_per_process, - const std::vector &local_columns_per_process, - const unsigned int this_process); - - /** - * Throw away the present matrix and - * generate one that has the same - * properties as if it were created by - * the constructor of this class with - * the same argument list as the - * present function. - */ - void reinit (const MPI_Comm &communicator, - const unsigned int m, - const unsigned int n, - const unsigned int local_rows, - const unsigned int local_columns); - - /** - * Throw away the present matrix and - * generate one that has the same - * properties as if it were created by - * the constructor of this class with - * the same argument list as the - * present function. - */ - void reinit (const MPI_Comm &communicator, - const unsigned int m, - const unsigned int n, - const std::vector &local_rows_per_process, - const std::vector &local_columns_per_process, - const unsigned int this_process); - - /** - * Calls the @p reinit() function - * above with communicator = MPI_COMM_WORLD. - */ - void reinit (const unsigned int m, - const unsigned int n, - const unsigned int local_rows, - const unsigned int local_columns); - - /** - * Calls the @p reinit() function - * above with communicator = MPI_COMM_WORLD. - */ - void reinit (const unsigned int m, - const unsigned int n, - const std::vector &local_rows_per_process, - const std::vector &local_columns_per_process, - const unsigned int this_process); - - /** - * Release all memory and return - * to a state just like after - * having called the default - * constructor. - */ - void clear (); - - /** - * Return a reference to the MPI - * communicator object in use with - * this matrix. - */ - const MPI_Comm & get_mpi_communicator () const; - - /** - * Matrix-vector multiplication: - * let dst = M*src with - * M being this matrix. - * - * Source and destination must - * not be the same vector. - * - * Note that if the current object - * represents a parallel distributed - * matrix (of type - * PETScWrappers::MPI::SparseMatrix), - * then both vectors have to be - * distributed vectors as - * well. Conversely, if the matrix is - * not distributed, then neither of the - * vectors may be. - */ - virtual - void vmult (VectorBase &dst, - const VectorBase &src) const = 0; - - /** - * Matrix-vector multiplication: let - * dst = MT*src with - * M being this matrix. This - * function does the same as @p vmult() - * but takes the transposed matrix. - * - * Source and destination must - * not be the same vector. - * - * Note that if the current object - * represents a parallel distributed - * matrix then both vectors have to be - * distributed vectors as - * well. Conversely, if the matrix is - * not distributed, then neither of the - * vectors may be. - */ - virtual - void Tvmult (VectorBase &dst, - const VectorBase &src) const = 0; - - /** - * Adding Matrix-vector - * multiplication. Add - * M*src on dst - * with M being this - * matrix. - * - * Source and destination must - * not be the same vector. - * - * Note that if the current object - * represents a parallel distributed - * matrix then both vectors have to be - * distributed vectors as - * well. Conversely, if the matrix is - * not distributed, then neither of the - * vectors may be. - */ - virtual - void vmult_add (VectorBase &dst, - const VectorBase &src) const = 0; - - /** - * Adding Matrix-vector - * multiplication. Add - * MT*src to - * dst with M being - * this matrix. This function - * does the same as @p vmult_add() - * but takes the transposed - * matrix. - * - * Source and destination must - * not be the same vector. - * - * Note that if the current object - * represents a parallel distributed - * matrix then both vectors have to be - * distributed vectors as - * well. Conversely, if the matrix is - * not distributed, then neither of the - * vectors may be. - */ - virtual - void Tvmult_add (VectorBase &dst, - const VectorBase &src) const = 0; - - /** - * The matrix-vector multiplication - * called by @p matrix_free_mult(). - * This function can be reimplemented - * in derived classes for efficiency. The default - * implementation copies the given vectors - * into PETScWrappers::*::Vector - * and calls vmult(VectorBase &dst, const VectorBase &src) - * which is purely virtual and must be reimplemented - * in derived classes. - */ - virtual - void vmult (Vec &dst, const Vec &src) const; - - private: - - /** - * Copy of the communicator object to - * be used for this parallel matrix-free object. - */ - MPI_Comm communicator; - - /** - * Callback-function registered - * as the matrix-vector multiplication - * of this matrix-free object - * called by PETSc routines. - * This function must be static and - * takes a PETSc matrix @p A, - * and vectors @p src and @p dst, - * where dst = A*src - * - * Source and destination must - * not be the same vector. - * - * This function calls - * vmult(Vec &dst, const Vec &src) - * which should be reimplemented in - * derived classes. - */ - static int matrix_free_mult (Mat A, Vec src, Vec dst); - - /** - * Do the actual work for the - * respective @p reinit() function and - * the matching constructor, - * i.e. create a matrix object. Getting rid - * of the previous matrix is left to - * the caller. - */ - void do_reinit (const unsigned int m, - const unsigned int n, - const unsigned int local_rows, - const unsigned int local_columns); - }; + public: + + /** + * Default constructor. Create an + * empty matrix object. + */ + MatrixFree (); + + /** + * Create a matrix object of + * dimensions @p m times @p n + * with communication happening + * over the provided @p communicator. + * + * For the meaning of the @p local_rows + * and @p local_columns parameters, + * see the PETScWrappers::MPI::SparseMatrix + * class documentation. + * + * As other PETSc matrices, also the + * the matrix-free object needs to + * have a size and to perform matrix + * vector multiplications efficiently + * in parallel also @p local_rows + * and @p local_columns. But in contrast + * to PETSc::SparseMatrix classes a + * PETSc matrix-free object does not need + * any estimation of non_zero entries + * and has no option is_symmetric. + */ + MatrixFree (const MPI_Comm &communicator, + const unsigned int m, + const unsigned int n, + const unsigned int local_rows, + const unsigned int local_columns); + + /** + * Create a matrix object of + * dimensions @p m times @p n + * with communication happening + * over the provided @p communicator. + * + * As other PETSc matrices, also the + * the matrix-free object needs to + * have a size and to perform matrix + * vector multiplications efficiently + * in parallel also @p local_rows + * and @p local_columns. But in contrast + * to PETSc::SparseMatrix classes a + * PETSc matrix-free object does not need + * any estimation of non_zero entries + * and has no option is_symmetric. + */ + MatrixFree (const MPI_Comm &communicator, + const unsigned int m, + const unsigned int n, + const std::vector &local_rows_per_process, + const std::vector &local_columns_per_process, + const unsigned int this_process); + + /** + * Constructor for the serial case: + * Same function as + * MatrixFree(), see above, + * with communicator = MPI_COMM_WORLD. + */ + MatrixFree (const unsigned int m, + const unsigned int n, + const unsigned int local_rows, + const unsigned int local_columns); + + /** + * Constructor for the serial case: + * Same function as + * MatrixFree(), see above, + * with communicator = MPI_COMM_WORLD. + */ + MatrixFree (const unsigned int m, + const unsigned int n, + const std::vector &local_rows_per_process, + const std::vector &local_columns_per_process, + const unsigned int this_process); + + /** + * Throw away the present matrix and + * generate one that has the same + * properties as if it were created by + * the constructor of this class with + * the same argument list as the + * present function. + */ + void reinit (const MPI_Comm &communicator, + const unsigned int m, + const unsigned int n, + const unsigned int local_rows, + const unsigned int local_columns); + + /** + * Throw away the present matrix and + * generate one that has the same + * properties as if it were created by + * the constructor of this class with + * the same argument list as the + * present function. + */ + void reinit (const MPI_Comm &communicator, + const unsigned int m, + const unsigned int n, + const std::vector &local_rows_per_process, + const std::vector &local_columns_per_process, + const unsigned int this_process); + + /** + * Calls the @p reinit() function + * above with communicator = MPI_COMM_WORLD. + */ + void reinit (const unsigned int m, + const unsigned int n, + const unsigned int local_rows, + const unsigned int local_columns); + + /** + * Calls the @p reinit() function + * above with communicator = MPI_COMM_WORLD. + */ + void reinit (const unsigned int m, + const unsigned int n, + const std::vector &local_rows_per_process, + const std::vector &local_columns_per_process, + const unsigned int this_process); + + /** + * Release all memory and return + * to a state just like after + * having called the default + * constructor. + */ + void clear (); + + /** + * Return a reference to the MPI + * communicator object in use with + * this matrix. + */ + const MPI_Comm &get_mpi_communicator () const; + + /** + * Matrix-vector multiplication: + * let dst = M*src with + * M being this matrix. + * + * Source and destination must + * not be the same vector. + * + * Note that if the current object + * represents a parallel distributed + * matrix (of type + * PETScWrappers::MPI::SparseMatrix), + * then both vectors have to be + * distributed vectors as + * well. Conversely, if the matrix is + * not distributed, then neither of the + * vectors may be. + */ + virtual + void vmult (VectorBase &dst, + const VectorBase &src) const = 0; + + /** + * Matrix-vector multiplication: let + * dst = MT*src with + * M being this matrix. This + * function does the same as @p vmult() + * but takes the transposed matrix. + * + * Source and destination must + * not be the same vector. + * + * Note that if the current object + * represents a parallel distributed + * matrix then both vectors have to be + * distributed vectors as + * well. Conversely, if the matrix is + * not distributed, then neither of the + * vectors may be. + */ + virtual + void Tvmult (VectorBase &dst, + const VectorBase &src) const = 0; + + /** + * Adding Matrix-vector + * multiplication. Add + * M*src on dst + * with M being this + * matrix. + * + * Source and destination must + * not be the same vector. + * + * Note that if the current object + * represents a parallel distributed + * matrix then both vectors have to be + * distributed vectors as + * well. Conversely, if the matrix is + * not distributed, then neither of the + * vectors may be. + */ + virtual + void vmult_add (VectorBase &dst, + const VectorBase &src) const = 0; + + /** + * Adding Matrix-vector + * multiplication. Add + * MT*src to + * dst with M being + * this matrix. This function + * does the same as @p vmult_add() + * but takes the transposed + * matrix. + * + * Source and destination must + * not be the same vector. + * + * Note that if the current object + * represents a parallel distributed + * matrix then both vectors have to be + * distributed vectors as + * well. Conversely, if the matrix is + * not distributed, then neither of the + * vectors may be. + */ + virtual + void Tvmult_add (VectorBase &dst, + const VectorBase &src) const = 0; + + /** + * The matrix-vector multiplication + * called by @p matrix_free_mult(). + * This function can be reimplemented + * in derived classes for efficiency. The default + * implementation copies the given vectors + * into PETScWrappers::*::Vector + * and calls vmult(VectorBase &dst, const VectorBase &src) + * which is purely virtual and must be reimplemented + * in derived classes. + */ + virtual - void vmult (Vec &dst, const Vec &src) const; ++ void vmult (Vec &dst, const Vec &src) const; + + private: + + /** + * Copy of the communicator object to + * be used for this parallel matrix-free object. + */ + MPI_Comm communicator; + + /** + * Callback-function registered + * as the matrix-vector multiplication + * of this matrix-free object + * called by PETSc routines. + * This function must be static and + * takes a PETSc matrix @p A, + * and vectors @p src and @p dst, + * where dst = A*src + * + * Source and destination must + * not be the same vector. + * + * This function calls + * vmult(Vec &dst, const Vec &src) + * which should be reimplemented in + * derived classes. + */ + static int matrix_free_mult (Mat A, Vec src, Vec dst); + + /** + * Do the actual work for the + * respective @p reinit() function and + * the matching constructor, + * i.e. create a matrix object. Getting rid + * of the previous matrix is left to + * the caller. + */ + void do_reinit (const unsigned int m, + const unsigned int n, + const unsigned int local_rows, + const unsigned int local_columns); + }; diff --cc deal.II/include/deal.II/lac/petsc_parallel_block_sparse_matrix.h index 3ccaa6d9c1,92bb6151c4..0274dd372b --- a/deal.II/include/deal.II/lac/petsc_parallel_block_sparse_matrix.h +++ b/deal.II/include/deal.II/lac/petsc_parallel_block_sparse_matrix.h @@@ -33,251 -33,251 +33,251 @@@ namespace PETScWrapper namespace MPI { - /*! @addtogroup PETScWrappers - *@{ - */ - - /** - * Blocked sparse matrix based on the PETScWrappers::SparseMatrix class. This - * class implements the functions that are specific to the PETSc SparseMatrix - * base objects for a blocked sparse matrix, and leaves the actual work - * relaying most of the calls to the individual blocks to the functions - * implemented in the base class. See there also for a description of when - * this class is useful. - * - * In contrast to the deal.II-type SparseMatrix class, the PETSc matrices do - * not have external objects for the sparsity patterns. Thus, one does not - * determine the size of the individual blocks of a block matrix of this type - * by attaching a block sparsity pattern, but by calling reinit() to set the - * number of blocks and then by setting the size of each block separately. In - * order to fix the data structures of the block matrix, it is then necessary - * to let it know that we have changed the sizes of the underlying - * matrices. For this, one has to call the collect_sizes() function, for much - * the same reason as is documented with the BlockSparsityPattern class. - * - * @ingroup Matrix1 - * @see @ref GlossBlockLA "Block (linear algebra)" - * @author Wolfgang Bangerth, 2004 - */ + /*! @addtogroup PETScWrappers + *@{ + */ + + /** + * Blocked sparse matrix based on the PETScWrappers::SparseMatrix class. This + * class implements the functions that are specific to the PETSc SparseMatrix + * base objects for a blocked sparse matrix, and leaves the actual work + * relaying most of the calls to the individual blocks to the functions + * implemented in the base class. See there also for a description of when + * this class is useful. + * + * In contrast to the deal.II-type SparseMatrix class, the PETSc matrices do + * not have external objects for the sparsity patterns. Thus, one does not + * determine the size of the individual blocks of a block matrix of this type + * by attaching a block sparsity pattern, but by calling reinit() to set the + * number of blocks and then by setting the size of each block separately. In + * order to fix the data structures of the block matrix, it is then necessary + * to let it know that we have changed the sizes of the underlying + * matrices. For this, one has to call the collect_sizes() function, for much + * the same reason as is documented with the BlockSparsityPattern class. + * + * @ingroup Matrix1 + * @see @ref GlossBlockLA "Block (linear algebra)" + * @author Wolfgang Bangerth, 2004 + */ class BlockSparseMatrix : public BlockMatrixBase { - public: - /** - * Typedef the base class for simpler - * access to its own typedefs. - */ - typedef BlockMatrixBase BaseClass; - - /** - * Typedef the type of the underlying - * matrix. - */ - typedef BaseClass::BlockType BlockType; - - /** - * Import the typedefs from the base - * class. - */ - typedef BaseClass::value_type value_type; - typedef BaseClass::pointer pointer; - typedef BaseClass::const_pointer const_pointer; - typedef BaseClass::reference reference; - typedef BaseClass::const_reference const_reference; - typedef BaseClass::size_type size_type; - typedef BaseClass::iterator iterator; - typedef BaseClass::const_iterator const_iterator; - - /** - * Constructor; initializes the - * matrix to be empty, without - * any structure, i.e. the - * matrix is not usable at - * all. This constructor is - * therefore only useful for - * matrices which are members of - * a class. All other matrices - * should be created at a point - * in the data flow where all - * necessary information is - * available. - * - * You have to initialize the - * matrix before usage with - * reinit(BlockSparsityPattern). The - * number of blocks per row and - * column are then determined by - * that function. - */ - BlockSparseMatrix (); - - /** - * Destructor. - */ - ~BlockSparseMatrix (); - - /** - * Pseudo copy operator only copying - * empty objects. The sizes of the - * block matrices need to be the - * same. - */ - BlockSparseMatrix & - operator = (const BlockSparseMatrix &); - - /** - * This operator assigns a scalar to - * a matrix. Since this does usually - * not make much sense (should we set - * all matrix entries to this value? - * Only the nonzero entries of the - * sparsity pattern?), this operation - * is only allowed if the actual - * value to be assigned is zero. This - * operator only exists to allow for - * the obvious notation - * matrix=0, which sets all - * elements of the matrix to zero, - * but keep the sparsity pattern - * previously used. - */ - BlockSparseMatrix & - operator = (const double d); - - /** - * Resize the matrix, by setting - * the number of block rows and - * columns. This deletes all - * blocks and replaces them by - * unitialized ones, i.e. ones - * for which also the sizes are - * not yet set. You have to do - * that by calling the @p reinit - * functions of the blocks - * themselves. Do not forget to - * call collect_sizes() after - * that on this object. - * - * The reason that you have to - * set sizes of the blocks - * yourself is that the sizes may - * be varying, the maximum number - * of elements per row may be - * varying, etc. It is simpler - * not to reproduce the interface - * of the SparsityPattern - * class here but rather let the - * user call whatever function - * she desires. - */ - void reinit (const unsigned int n_block_rows, - const unsigned int n_block_columns); - - /** - * Matrix-vector multiplication: - * let $dst = M*src$ with $M$ - * being this matrix. - */ - void vmult (BlockVector &dst, - const BlockVector &src) const; - - /** - * Matrix-vector - * multiplication. Just like the - * previous function, but only - * applicable if the matrix has - * only one block column. - */ - void vmult (BlockVector &dst, - const Vector &src) const; - - /** - * Matrix-vector - * multiplication. Just like the - * previous function, but only - * applicable if the matrix has - * only one block row. - */ - void vmult (Vector &dst, - const BlockVector &src) const; - - /** - * Matrix-vector - * multiplication. Just like the - * previous function, but only - * applicable if the matrix has - * only one block. - */ - void vmult (Vector &dst, - const Vector &src) const; - - /** - * Matrix-vector multiplication: - * let $dst = M^T*src$ with $M$ - * being this matrix. This - * function does the same as - * vmult() but takes the - * transposed matrix. - */ - void Tvmult (BlockVector &dst, - const BlockVector &src) const; - - /** - * Matrix-vector - * multiplication. Just like the - * previous function, but only - * applicable if the matrix has - * only one block row. - */ - void Tvmult (BlockVector &dst, - const Vector &src) const; - - /** - * Matrix-vector - * multiplication. Just like the - * previous function, but only - * applicable if the matrix has - * only one block column. - */ - void Tvmult (Vector &dst, - const BlockVector &src) const; - - /** - * Matrix-vector - * multiplication. Just like the - * previous function, but only - * applicable if the matrix has - * only one block. - */ - void Tvmult (Vector &dst, - const Vector &src) const; - - /** - * This function collects the - * sizes of the sub-objects and - * stores them in internal - * arrays, in order to be able to - * relay global indices into the - * matrix to indices into the - * subobjects. You *must* call - * this function each time after - * you have changed the size of - * the sub-objects. - */ - void collect_sizes (); - - /** - * Return a reference to the MPI - * communicator object in use with - * this matrix. - */ - const MPI_Comm & get_mpi_communicator () const; - - /** - * Make the clear() function in the - * base class visible, though it is - * protected. - */ - using BlockMatrixBase::clear; + public: + /** + * Typedef the base class for simpler + * access to its own typedefs. + */ + typedef BlockMatrixBase BaseClass; + + /** + * Typedef the type of the underlying + * matrix. + */ + typedef BaseClass::BlockType BlockType; + + /** + * Import the typedefs from the base + * class. + */ + typedef BaseClass::value_type value_type; + typedef BaseClass::pointer pointer; + typedef BaseClass::const_pointer const_pointer; + typedef BaseClass::reference reference; + typedef BaseClass::const_reference const_reference; + typedef BaseClass::size_type size_type; + typedef BaseClass::iterator iterator; + typedef BaseClass::const_iterator const_iterator; + + /** + * Constructor; initializes the + * matrix to be empty, without + * any structure, i.e. the + * matrix is not usable at + * all. This constructor is + * therefore only useful for + * matrices which are members of + * a class. All other matrices + * should be created at a point + * in the data flow where all + * necessary information is + * available. + * + * You have to initialize the + * matrix before usage with + * reinit(BlockSparsityPattern). The + * number of blocks per row and + * column are then determined by + * that function. + */ + BlockSparseMatrix (); + + /** + * Destructor. + */ + ~BlockSparseMatrix (); + + /** + * Pseudo copy operator only copying + * empty objects. The sizes of the + * block matrices need to be the + * same. + */ + BlockSparseMatrix & + operator = (const BlockSparseMatrix &); + + /** + * This operator assigns a scalar to + * a matrix. Since this does usually + * not make much sense (should we set + * all matrix entries to this value? + * Only the nonzero entries of the + * sparsity pattern?), this operation + * is only allowed if the actual + * value to be assigned is zero. This + * operator only exists to allow for + * the obvious notation + * matrix=0, which sets all + * elements of the matrix to zero, + * but keep the sparsity pattern + * previously used. + */ + BlockSparseMatrix & + operator = (const double d); + + /** + * Resize the matrix, by setting + * the number of block rows and + * columns. This deletes all + * blocks and replaces them by + * unitialized ones, i.e. ones + * for which also the sizes are + * not yet set. You have to do + * that by calling the @p reinit + * functions of the blocks + * themselves. Do not forget to + * call collect_sizes() after + * that on this object. + * + * The reason that you have to + * set sizes of the blocks + * yourself is that the sizes may + * be varying, the maximum number + * of elements per row may be + * varying, etc. It is simpler + * not to reproduce the interface + * of the SparsityPattern + * class here but rather let the + * user call whatever function + * she desires. + */ + void reinit (const unsigned int n_block_rows, + const unsigned int n_block_columns); + + /** + * Matrix-vector multiplication: + * let $dst = M*src$ with $M$ + * being this matrix. + */ + void vmult (BlockVector &dst, + const BlockVector &src) const; + + /** + * Matrix-vector + * multiplication. Just like the + * previous function, but only + * applicable if the matrix has + * only one block column. + */ + void vmult (BlockVector &dst, + const Vector &src) const; + + /** + * Matrix-vector + * multiplication. Just like the + * previous function, but only + * applicable if the matrix has + * only one block row. + */ + void vmult (Vector &dst, + const BlockVector &src) const; + + /** + * Matrix-vector + * multiplication. Just like the + * previous function, but only + * applicable if the matrix has + * only one block. + */ + void vmult (Vector &dst, + const Vector &src) const; + + /** + * Matrix-vector multiplication: + * let $dst = M^T*src$ with $M$ + * being this matrix. This + * function does the same as + * vmult() but takes the + * transposed matrix. + */ + void Tvmult (BlockVector &dst, + const BlockVector &src) const; + + /** + * Matrix-vector + * multiplication. Just like the + * previous function, but only + * applicable if the matrix has + * only one block row. + */ - void Tvmult (BlockVector &dst, ++ void Tvmult (BlockVector &dst, + const Vector &src) const; + + /** + * Matrix-vector + * multiplication. Just like the + * previous function, but only + * applicable if the matrix has + * only one block column. + */ + void Tvmult (Vector &dst, + const BlockVector &src) const; + + /** + * Matrix-vector + * multiplication. Just like the + * previous function, but only + * applicable if the matrix has + * only one block. + */ + void Tvmult (Vector &dst, + const Vector &src) const; + + /** + * This function collects the + * sizes of the sub-objects and + * stores them in internal + * arrays, in order to be able to + * relay global indices into the + * matrix to indices into the + * subobjects. You *must* call + * this function each time after + * you have changed the size of + * the sub-objects. + */ + void collect_sizes (); + + /** + * Return a reference to the MPI + * communicator object in use with + * this matrix. + */ + const MPI_Comm &get_mpi_communicator () const; + + /** + * Make the clear() function in the + * base class visible, though it is + * protected. + */ + using BlockMatrixBase::clear; }; diff --cc deal.II/include/deal.II/lac/petsc_parallel_block_vector.h index fb1365a5fc,e17ce5b484..050e5ea43c --- a/deal.II/include/deal.II/lac/petsc_parallel_block_vector.h +++ b/deal.II/include/deal.II/lac/petsc_parallel_block_vector.h @@@ -33,298 -33,298 +33,298 @@@ namespace PETScWrapper namespace MPI { - /*! @addtogroup PETScWrappers - *@{ - */ - - /** - * An implementation of block vectors based on the parallel vector class - * implemented in PETScWrappers. While the base class provides for most of the - * interface, this class handles the actual allocation of vectors and provides - * functions that are specific to the underlying vector type. - * - * The model of distribution of data is such that each of the blocks is - * distributed across all MPI processes named in the MPI communicator. I.e. we - * don't just distribute the whole vector, but each component. In the - * constructors and reinit() functions, one therefore not only has to specify - * the sizes of the individual blocks, but also the number of elements of each - * of these blocks to be stored on the local process. - * - * @ingroup Vectors - * @see @ref GlossBlockLA "Block (linear algebra)" - * @author Wolfgang Bangerth, 2004 - */ + /*! @addtogroup PETScWrappers + *@{ + */ + + /** + * An implementation of block vectors based on the parallel vector class + * implemented in PETScWrappers. While the base class provides for most of the + * interface, this class handles the actual allocation of vectors and provides + * functions that are specific to the underlying vector type. + * + * The model of distribution of data is such that each of the blocks is + * distributed across all MPI processes named in the MPI communicator. I.e. we + * don't just distribute the whole vector, but each component. In the + * constructors and reinit() functions, one therefore not only has to specify + * the sizes of the individual blocks, but also the number of elements of each + * of these blocks to be stored on the local process. + * + * @ingroup Vectors + * @see @ref GlossBlockLA "Block (linear algebra)" + * @author Wolfgang Bangerth, 2004 + */ class BlockVector : public BlockVectorBase { - public: - /** - * Typedef the base class for simpler - * access to its own typedefs. - */ - typedef BlockVectorBase BaseClass; - - /** - * Typedef the type of the underlying - * vector. - */ - typedef BaseClass::BlockType BlockType; - - /** - * Import the typedefs from the base - * class. - */ - typedef BaseClass::value_type value_type; - typedef BaseClass::pointer pointer; - typedef BaseClass::const_pointer const_pointer; - typedef BaseClass::reference reference; - typedef BaseClass::const_reference const_reference; - typedef BaseClass::size_type size_type; - typedef BaseClass::iterator iterator; - typedef BaseClass::const_iterator const_iterator; - - /** - * Default constructor. Generate an - * empty vector without any blocks. - */ - BlockVector (); - - /** - * Constructor. Generate a block - * vector with @p n_blocks blocks, - * each of which is a parallel - * vector across @p communicator - * with @p block_size elements of - * which @p local_size elements are - * stored on the present process. - */ - explicit BlockVector (const unsigned int n_blocks, - const MPI_Comm &communicator, - const unsigned int block_size, - const unsigned int local_size); - - /** - * Copy-Constructor. Set all the - * properties of the parallel vector - * to those of the given argument and - * copy the elements. - */ - BlockVector (const BlockVector &V); - - /** - * Constructor. Set the number of - * blocks to - * block_sizes.size() and - * initialize each block with - * block_sizes[i] zero - * elements. The individual blocks - * are distributed across the given - * communicator, and each store - * local_elements[i] - * elements on the present process. - */ - BlockVector (const std::vector &block_sizes, - const MPI_Comm &communicator, - const std::vector &local_elements); - - /** - * Destructor. Clears memory - */ - ~BlockVector (); - - /** - * Copy operator: fill all components - * of the vector that are locally - * stored with the given scalar value. - */ - BlockVector & operator = (const value_type s); - - /** - * Copy operator for arguments of the - * same type. - */ - BlockVector & - operator= (const BlockVector &V); - - /** - * Copy the given sequential - * (non-distributed) block vector - * into the present parallel block - * vector. It is assumed that they - * have the same size, and this - * operation does not change the - * partitioning of the parallel - * vectors by which its elements are - * distributed across several MPI - * processes. What this operation - * therefore does is to copy that - * chunk of the given vector @p v - * that corresponds to elements of - * the target vector that are stored - * locally, and copies them, for each - * of the individual blocks of this - * object. Elements that are not - * stored locally are not touched. - * - * This being a parallel vector, you - * must make sure that @em all - * processes call this function at - * the same time. It is not possible - * to change the local part of a - * parallel vector on only one - * process, independent of what other - * processes do, with this function. - */ - BlockVector & - operator = (const PETScWrappers::BlockVector &v); - - /** - * Reinitialize the BlockVector to - * contain @p n_blocks of size @p - * block_size, each of which stores - * @p local_size elements - * locally. The @p communicator - * argument denotes which MPI channel - * each of these blocks shall - * communicate. - * - * If fast==false, the vector - * is filled with zeros. - */ - void reinit (const unsigned int n_blocks, - const MPI_Comm &communicator, - const unsigned int block_size, - const unsigned int local_size, - const bool fast = false); - - /** - * Reinitialize the BlockVector such - * that it contains - * block_sizes.size() - * blocks. Each block is - * reinitialized to dimension - * block_sizes[i]. Each of - * them stores - * local_sizes[i] elements - * on the present process. - * - * If the number of blocks is the - * same as before this function - * was called, all vectors remain - * the same and reinit() is - * called for each vector. - * - * If fast==false, the vector - * is filled with zeros. - * - * Note that you must call this - * (or the other reinit() - * functions) function, rather - * than calling the reinit() - * functions of an individual - * block, to allow the block - * vector to update its caches of - * vector sizes. If you call - * reinit() of one of the - * blocks, then subsequent - * actions on this object may - * yield unpredictable results - * since they may be routed to - * the wrong block. - */ - void reinit (const std::vector &block_sizes, - const MPI_Comm &communicator, - const std::vector &local_sizes, - const bool fast=false); - - /** - * Change the dimension to that - * of the vector V. The same - * applies as for the other - * reinit() function. - * - * The elements of V are not - * copied, i.e. this function is - * the same as calling reinit - * (V.size(), fast). - * - * Note that you must call this - * (or the other reinit() - * functions) function, rather - * than calling the reinit() - * functions of an individual - * block, to allow the block - * vector to update its caches of - * vector sizes. If you call - * reinit() on one of the - * blocks, then subsequent - * actions on this object may - * yield unpredictable results - * since they may be routed to - * the wrong block. - */ - void reinit (const BlockVector &V, - const bool fast=false); - - /** - * Return a reference to the MPI - * communicator object in use with - * this vector. - */ - const MPI_Comm & get_mpi_communicator () const; - - /** - * Swap the contents of this - * vector and the other vector - * v. One could do this - * operation with a temporary - * variable and copying over the - * data elements, but this - * function is significantly more - * efficient since it only swaps - * the pointers to the data of - * the two vectors and therefore - * does not need to allocate - * temporary storage and move - * data around. - * - * Limitation: right now this - * function only works if both - * vectors have the same number - * of blocks. If needed, the - * numbers of blocks should be - * exchanged, too. - * - * This function is analog to the - * the swap() function of all C++ - * standard containers. Also, - * there is a global function - * swap(u,v) that simply calls - * u.swap(v), again in analogy - * to standard functions. - */ - void swap (BlockVector &v); - - /** - * Print to a stream. - */ - void print (std::ostream &out, - const unsigned int precision = 3, - const bool scientific = true, - const bool across = true) const; - - /** - * Exception - */ - DeclException0 (ExcIteratorRangeDoesNotMatchVectorSize); - /** - * Exception - */ - DeclException0 (ExcNonMatchingBlockVectors); + public: + /** + * Typedef the base class for simpler + * access to its own typedefs. + */ + typedef BlockVectorBase BaseClass; + + /** + * Typedef the type of the underlying + * vector. + */ + typedef BaseClass::BlockType BlockType; + + /** + * Import the typedefs from the base + * class. + */ + typedef BaseClass::value_type value_type; + typedef BaseClass::pointer pointer; + typedef BaseClass::const_pointer const_pointer; + typedef BaseClass::reference reference; + typedef BaseClass::const_reference const_reference; + typedef BaseClass::size_type size_type; + typedef BaseClass::iterator iterator; + typedef BaseClass::const_iterator const_iterator; + + /** + * Default constructor. Generate an + * empty vector without any blocks. + */ + BlockVector (); + + /** + * Constructor. Generate a block + * vector with @p n_blocks blocks, + * each of which is a parallel + * vector across @p communicator + * with @p block_size elements of + * which @p local_size elements are + * stored on the present process. + */ + explicit BlockVector (const unsigned int n_blocks, + const MPI_Comm &communicator, + const unsigned int block_size, + const unsigned int local_size); + + /** + * Copy-Constructor. Set all the + * properties of the parallel vector + * to those of the given argument and + * copy the elements. + */ - BlockVector (const BlockVector &V); ++ BlockVector (const BlockVector &V); + + /** + * Constructor. Set the number of + * blocks to + * block_sizes.size() and + * initialize each block with + * block_sizes[i] zero + * elements. The individual blocks + * are distributed across the given + * communicator, and each store + * local_elements[i] + * elements on the present process. + */ + BlockVector (const std::vector &block_sizes, + const MPI_Comm &communicator, + const std::vector &local_elements); + + /** + * Destructor. Clears memory + */ + ~BlockVector (); + + /** + * Copy operator: fill all components + * of the vector that are locally + * stored with the given scalar value. + */ + BlockVector &operator = (const value_type s); + + /** + * Copy operator for arguments of the + * same type. + */ + BlockVector & + operator= (const BlockVector &V); + + /** + * Copy the given sequential + * (non-distributed) block vector + * into the present parallel block + * vector. It is assumed that they + * have the same size, and this + * operation does not change the + * partitioning of the parallel + * vectors by which its elements are + * distributed across several MPI + * processes. What this operation + * therefore does is to copy that + * chunk of the given vector @p v + * that corresponds to elements of + * the target vector that are stored + * locally, and copies them, for each + * of the individual blocks of this + * object. Elements that are not + * stored locally are not touched. + * + * This being a parallel vector, you + * must make sure that @em all + * processes call this function at + * the same time. It is not possible + * to change the local part of a + * parallel vector on only one + * process, independent of what other + * processes do, with this function. + */ + BlockVector & + operator = (const PETScWrappers::BlockVector &v); + + /** + * Reinitialize the BlockVector to + * contain @p n_blocks of size @p + * block_size, each of which stores + * @p local_size elements + * locally. The @p communicator + * argument denotes which MPI channel + * each of these blocks shall + * communicate. + * + * If fast==false, the vector + * is filled with zeros. + */ + void reinit (const unsigned int n_blocks, + const MPI_Comm &communicator, + const unsigned int block_size, + const unsigned int local_size, + const bool fast = false); + + /** + * Reinitialize the BlockVector such + * that it contains + * block_sizes.size() + * blocks. Each block is + * reinitialized to dimension + * block_sizes[i]. Each of + * them stores + * local_sizes[i] elements + * on the present process. + * + * If the number of blocks is the + * same as before this function + * was called, all vectors remain + * the same and reinit() is + * called for each vector. + * + * If fast==false, the vector + * is filled with zeros. + * + * Note that you must call this + * (or the other reinit() + * functions) function, rather + * than calling the reinit() + * functions of an individual + * block, to allow the block + * vector to update its caches of + * vector sizes. If you call + * reinit() of one of the + * blocks, then subsequent + * actions on this object may + * yield unpredictable results + * since they may be routed to + * the wrong block. + */ + void reinit (const std::vector &block_sizes, + const MPI_Comm &communicator, + const std::vector &local_sizes, + const bool fast=false); + + /** + * Change the dimension to that + * of the vector V. The same + * applies as for the other + * reinit() function. + * + * The elements of V are not + * copied, i.e. this function is + * the same as calling reinit + * (V.size(), fast). + * + * Note that you must call this + * (or the other reinit() + * functions) function, rather + * than calling the reinit() + * functions of an individual + * block, to allow the block + * vector to update its caches of + * vector sizes. If you call + * reinit() on one of the + * blocks, then subsequent + * actions on this object may + * yield unpredictable results + * since they may be routed to + * the wrong block. + */ + void reinit (const BlockVector &V, + const bool fast=false); + + /** + * Return a reference to the MPI + * communicator object in use with + * this vector. + */ + const MPI_Comm &get_mpi_communicator () const; + + /** + * Swap the contents of this + * vector and the other vector + * v. One could do this + * operation with a temporary + * variable and copying over the + * data elements, but this + * function is significantly more + * efficient since it only swaps + * the pointers to the data of + * the two vectors and therefore + * does not need to allocate + * temporary storage and move + * data around. + * + * Limitation: right now this + * function only works if both + * vectors have the same number + * of blocks. If needed, the + * numbers of blocks should be + * exchanged, too. + * + * This function is analog to the + * the swap() function of all C++ + * standard containers. Also, + * there is a global function + * swap(u,v) that simply calls + * u.swap(v), again in analogy + * to standard functions. + */ + void swap (BlockVector &v); + + /** + * Print to a stream. + */ + void print (std::ostream &out, + const unsigned int precision = 3, + const bool scientific = true, + const bool across = true) const; + + /** + * Exception + */ + DeclException0 (ExcIteratorRangeDoesNotMatchVectorSize); + /** + * Exception + */ + DeclException0 (ExcNonMatchingBlockVectors); }; - /*@}*/ + /*@}*/ - /*----------------------- Inline functions ----------------------------------*/ + /*----------------------- Inline functions ----------------------------------*/ inline diff --cc deal.II/include/deal.II/lac/petsc_solver.h index 4636db9c08,e9fae6171c..fb9e933c46 --- a/deal.II/include/deal.II/lac/petsc_solver.h +++ b/deal.II/include/deal.II/lac/petsc_solver.h @@@ -34,257 -34,257 +34,257 @@@ namespace PETScWrapper class PreconditionerBase; - /** - * Base class for solver classes using the PETSc solvers. Since solvers in - * PETSc are selected based on flags passed to a generic solver object, - * basically all the actual solver calls happen in this class, and derived - * classes simply set the right flags to select one solver or another, or to - * set certain parameters for individual solvers. - * - * Optionally, the user can create a solver derived from the - * SolverBase class and can set the default arguments necessary to - * solve the linear system of equations with SolverControl. These - * default options can be overridden by specifying command line - * arguments of the form @p -ksp_*. For example, - * @p -ksp_monitor_true_residual prints out true residual norm - * (unpreconditioned) at each iteration and @p -ksp_view provides - * information about the linear solver and the preconditioner used in - * the current context. The type of the solver can also be changed - * during runtime by specifying @p -ksp_type {richardson, cg, gmres, - * fgmres, ..} to dynamically test the optimal solver along with a - * suitable preconditioner set using @p -pc_type {jacobi, bjacobi, - * ilu, lu, ..}. There are several other command line options - * available to modify the behavior of the PETSc linear solver and can - * be obtained from the documentation and manual - * pages. - * - * @note Repeated calls to solve() on a solver object with a Preconditioner - * must be used with care. The preconditioner is initialized in the first call - * to solve() and subsequent calls reuse the solver and preconditioner - * object. This is done for performance reasons. The solver and preconditioner - * can be reset by calling reset(). - * - * One of the gotchas of PETSc is that -- in particular in MPI mode -- it - * often does not produce very helpful error messages. In order to save - * other users some time in searching a hard to track down error, here is - * one situation and the error message one gets there: - * when you don't specify an MPI communicator to your solver's constructor. In - * this case, you will get an error of the following form from each of your - * parallel processes: - * @verbatim - * [1]PETSC ERROR: PCSetVector() line 1173 in src/ksp/pc/interface/precon.c - * [1]PETSC ERROR: Arguments must have same communicators! - * [1]PETSC ERROR: Different communicators in the two objects: Argument # 1 and 2! - * [1]PETSC ERROR: KSPSetUp() line 195 in src/ksp/ksp/interface/itfunc.c - * @endverbatim - * - * This error, on which one can spend a very long time figuring out - * what exactly goes wrong, results from not specifying an MPI - * communicator. Note that the communicator @em must match that of the - * matrix and all vectors in the linear system which we want to - * solve. Aggravating the situation is the fact that the default - * argument to the solver classes, @p PETSC_COMM_SELF, is the - * appropriate argument for the sequential case (which is why it is - * the default argument), so this error only shows up in parallel - * mode. - * - * @ingroup PETScWrappers - * @author Wolfgang Bangerth, 2004 - */ + /** + * Base class for solver classes using the PETSc solvers. Since solvers in + * PETSc are selected based on flags passed to a generic solver object, + * basically all the actual solver calls happen in this class, and derived + * classes simply set the right flags to select one solver or another, or to + * set certain parameters for individual solvers. + * + * Optionally, the user can create a solver derived from the + * SolverBase class and can set the default arguments necessary to + * solve the linear system of equations with SolverControl. These + * default options can be overridden by specifying command line + * arguments of the form @p -ksp_*. For example, + * @p -ksp_monitor_true_residual prints out true residual norm + * (unpreconditioned) at each iteration and @p -ksp_view provides + * information about the linear solver and the preconditioner used in + * the current context. The type of the solver can also be changed + * during runtime by specifying @p -ksp_type {richardson, cg, gmres, + * fgmres, ..} to dynamically test the optimal solver along with a + * suitable preconditioner set using @p -pc_type {jacobi, bjacobi, + * ilu, lu, ..}. There are several other command line options + * available to modify the behavior of the PETSc linear solver and can + * be obtained from the documentation and manual + * pages. + * + * @note Repeated calls to solve() on a solver object with a Preconditioner + * must be used with care. The preconditioner is initialized in the first call + * to solve() and subsequent calls reuse the solver and preconditioner + * object. This is done for performance reasons. The solver and preconditioner + * can be reset by calling reset(). + * + * One of the gotchas of PETSc is that -- in particular in MPI mode -- it + * often does not produce very helpful error messages. In order to save + * other users some time in searching a hard to track down error, here is + * one situation and the error message one gets there: + * when you don't specify an MPI communicator to your solver's constructor. In + * this case, you will get an error of the following form from each of your + * parallel processes: + * @verbatim + * [1]PETSC ERROR: PCSetVector() line 1173 in src/ksp/pc/interface/precon.c + * [1]PETSC ERROR: Arguments must have same communicators! + * [1]PETSC ERROR: Different communicators in the two objects: Argument # 1 and 2! + * [1]PETSC ERROR: KSPSetUp() line 195 in src/ksp/ksp/interface/itfunc.c + * @endverbatim + * + * This error, on which one can spend a very long time figuring out + * what exactly goes wrong, results from not specifying an MPI + * communicator. Note that the communicator @em must match that of the + * matrix and all vectors in the linear system which we want to + * solve. Aggravating the situation is the fact that the default + * argument to the solver classes, @p PETSC_COMM_SELF, is the + * appropriate argument for the sequential case (which is why it is + * the default argument), so this error only shows up in parallel + * mode. + * + * @ingroup PETScWrappers + * @author Wolfgang Bangerth, 2004 + */ class SolverBase { - public: - /** - * Constructor. Takes the solver - * control object and the MPI - * communicator over which parallel - * computations are to happen. - * - * Note that the communicator used here - * must match the communicator used in - * the system matrix, solution, and - * right hand side object of the solve - * to be done with this - * solver. Otherwise, PETSc will - * generate hard to track down errors, - * see the documentation of the - * SolverBase class. - */ - SolverBase (SolverControl &cn, - const MPI_Comm &mpi_communicator); - - /** - * Destructor. - */ - virtual ~SolverBase (); - - /** - * Solve the linear system - * Ax=b. Depending on the - * information provided by derived - * classes and the object passed as a - * preconditioner, one of the linear - * solvers and preconditioners of PETSc - * is chosen. Repeated calls to - * solve() do not reconstruct the - * preconditioner for performance - * reasons. See class Documentation. - */ - void - solve (const MatrixBase &A, - VectorBase &x, - const VectorBase &b, - const PreconditionerBase &preconditioner); - - - /** - * Resets the contained preconditioner - * and solver object. See class - * description for more details. - */ - virtual void reset(); - - - /** - * Sets a prefix name for the solver - * object. Useful when customizing the - * PETSc KSP object with command-line - * options. - */ - void set_prefix(const std::string &prefix); - - - /** - * Access to object that controls - * convergence. - */ - SolverControl & control() const; - - /** - * Exception - */ - DeclException1 (ExcPETScError, - int, - << "An error with error number " << arg1 - << " occurred while calling a PETSc function"); - - protected: - - /** - * Reference to the object that - * controls convergence of the - * iterative solver. In fact, for these - * PETSc wrappers, PETSc does so - * itself, but we copy the data from - * this object before starting the - * solution process, and copy the data - * back into it afterwards. - */ - SolverControl &solver_control; - - /** - * Copy of the MPI communicator object - * to be used for the solver. - */ - const MPI_Comm mpi_communicator; - - /** - * Function that takes a Krylov - * Subspace Solver context object, and - * sets the type of solver that is - * requested by the derived class. - */ - virtual void set_solver_type (KSP &ksp) const = 0; - - /** - * Solver prefix name to qualify options - * specific to the PETSc KSP object in the - * current context. - * Note: A hyphen (-) must NOT be given - * at the beginning of the prefix name. - * The first character of all runtime - * options is AUTOMATICALLY the hyphen. - */ - std::string prefix_name; - - private: - /** - * A function that is used in PETSc as - * a callback to check on - * convergence. It takes the - * information provided from PETSc and - * checks it against deal.II's own - * SolverControl objects to see if - * convergence has been reached. - */ - static + public: + /** + * Constructor. Takes the solver + * control object and the MPI + * communicator over which parallel + * computations are to happen. + * + * Note that the communicator used here + * must match the communicator used in + * the system matrix, solution, and + * right hand side object of the solve + * to be done with this + * solver. Otherwise, PETSc will + * generate hard to track down errors, + * see the documentation of the + * SolverBase class. + */ - SolverBase (SolverControl &cn, ++ SolverBase (SolverControl &cn, + const MPI_Comm &mpi_communicator); + + /** + * Destructor. + */ + virtual ~SolverBase (); + + /** + * Solve the linear system + * Ax=b. Depending on the + * information provided by derived + * classes and the object passed as a + * preconditioner, one of the linear + * solvers and preconditioners of PETSc + * is chosen. Repeated calls to + * solve() do not reconstruct the + * preconditioner for performance + * reasons. See class Documentation. + */ + void + solve (const MatrixBase &A, + VectorBase &x, + const VectorBase &b, + const PreconditionerBase &preconditioner); + + + /** + * Resets the contained preconditioner + * and solver object. See class + * description for more details. + */ + virtual void reset(); + + + /** + * Sets a prefix name for the solver + * object. Useful when customizing the + * PETSc KSP object with command-line + * options. + */ + void set_prefix(const std::string &prefix); + + + /** + * Access to object that controls + * convergence. + */ + SolverControl &control() const; + + /** + * Exception + */ + DeclException1 (ExcPETScError, + int, + << "An error with error number " << arg1 + << " occurred while calling a PETSc function"); + + protected: + + /** + * Reference to the object that + * controls convergence of the + * iterative solver. In fact, for these + * PETSc wrappers, PETSc does so + * itself, but we copy the data from + * this object before starting the + * solution process, and copy the data + * back into it afterwards. + */ + SolverControl &solver_control; + + /** + * Copy of the MPI communicator object + * to be used for the solver. + */ + const MPI_Comm mpi_communicator; + + /** + * Function that takes a Krylov + * Subspace Solver context object, and + * sets the type of solver that is + * requested by the derived class. + */ + virtual void set_solver_type (KSP &ksp) const = 0; + + /** + * Solver prefix name to qualify options + * specific to the PETSc KSP object in the + * current context. + * Note: A hyphen (-) must NOT be given + * at the beginning of the prefix name. + * The first character of all runtime + * options is AUTOMATICALLY the hyphen. + */ + std::string prefix_name; + + private: + /** + * A function that is used in PETSc as + * a callback to check on + * convergence. It takes the + * information provided from PETSc and + * checks it against deal.II's own + * SolverControl objects to see if + * convergence has been reached. + */ + static #ifdef PETSC_USE_64BIT_INDICES - PetscErrorCode + PetscErrorCode #else - int + int #endif - convergence_test (KSP ksp, + convergence_test (KSP ksp, #ifdef PETSC_USE_64BIT_INDICES - const PetscInt iteration, + const PetscInt iteration, #else - const int iteration, + const int iteration, #endif - const PetscReal residual_norm, - KSPConvergedReason *reason, - void *solver_control); - - /** - * A structure that contains the PETSc - * solver and preconditioner - * objects. This object is preserved - * between subsequent calls to the - * solver if the same preconditioner is - * used as in the previous solver - * step. This may save some computation - * time, if setting up a preconditioner - * is expensive, such as in the case of - * an ILU for example. - * - * The actual declaration of this class - * is complicated by the fact that - * PETSc changed its solver interface - * completely and incompatibly between - * versions 2.1.6 and 2.2.0 :-( - * - * Objects of this type are explicitly - * created, but are destroyed when the - * surrounding solver object goes out - * of scope, or when we assign a new - * value to the pointer to this - * object. The respective *Destroy - * functions are therefore written into - * the destructor of this object, even - * though the object does not have a - * constructor. - */ - struct SolverData - { - /** - * Destructor - */ - ~SolverData (); - - /** - * Objects for Krylov subspace - * solvers and preconditioners. - */ - KSP ksp; - PC pc; - }; - - /** - * Pointer to an object that stores the - * solver context. This is recreated in - * the main solver routine if - * necessary. - */ - std_cxx1x::shared_ptr solver_data; + const PetscReal residual_norm, + KSPConvergedReason *reason, + void *solver_control); + + /** + * A structure that contains the PETSc + * solver and preconditioner + * objects. This object is preserved + * between subsequent calls to the + * solver if the same preconditioner is + * used as in the previous solver + * step. This may save some computation + * time, if setting up a preconditioner + * is expensive, such as in the case of + * an ILU for example. + * + * The actual declaration of this class + * is complicated by the fact that + * PETSc changed its solver interface + * completely and incompatibly between + * versions 2.1.6 and 2.2.0 :-( + * + * Objects of this type are explicitly + * created, but are destroyed when the + * surrounding solver object goes out + * of scope, or when we assign a new + * value to the pointer to this + * object. The respective *Destroy + * functions are therefore written into + * the destructor of this object, even + * though the object does not have a + * constructor. + */ + struct SolverData + { + /** + * Destructor + */ + ~SolverData (); + + /** + * Objects for Krylov subspace + * solvers and preconditioners. + */ + KSP ksp; + PC pc; + }; + + /** + * Pointer to an object that stores the + * solver context. This is recreated in + * the main solver routine if + * necessary. + */ + std_cxx1x::shared_ptr solver_data; }; diff --cc deal.II/include/deal.II/lac/petsc_vector_base.h index c897e523fe,eb265a8648..25c36dc628 --- a/deal.II/include/deal.II/lac/petsc_vector_base.h +++ b/deal.II/include/deal.II/lac/petsc_vector_base.h @@@ -42,824 -42,824 +42,824 @@@ template class Vector */ namespace PETScWrappers { - // forward declaration + // forward declaration class VectorBase; - /** - * @cond internal - */ + /** + * @cond internal + */ - /** - * A namespace for internal implementation details of the PETScWrapper - * members. - * @ingroup PETScWrappers - */ + /** + * A namespace for internal implementation details of the PETScWrapper + * members. + * @ingroup PETScWrappers + */ namespace internal { - /** - * Since access to PETSc vectors only - * goes through functions, rather than by - * obtaining a reference to a vector - * element, we need a wrapper class that - * acts as if it was a reference, and - * basically redirects all accesses (read - * and write) to member functions of this - * class. - * - * This class implements such a wrapper: - * it is initialized with a vector and an - * element within it, and has a - * conversion operator to extract the - * scalar value of this element. It also - * has a variety of assignment operator - * for writing to this one element. - * @ingroup PETScWrappers - */ + /** + * Since access to PETSc vectors only + * goes through functions, rather than by + * obtaining a reference to a vector + * element, we need a wrapper class that + * acts as if it was a reference, and + * basically redirects all accesses (read + * and write) to member functions of this + * class. + * + * This class implements such a wrapper: + * it is initialized with a vector and an + * element within it, and has a + * conversion operator to extract the + * scalar value of this element. It also + * has a variety of assignment operator + * for writing to this one element. + * @ingroup PETScWrappers + */ class VectorReference { - private: - /** - * Constructor. It is made private so - * as to only allow the actual vector - * class to create it. - */ - VectorReference (const VectorBase &vector, - const unsigned int index); - - public: - /** - * This looks like a copy operator, - * but does something different than - * usual. In particular, it does not - * copy the member variables of this - * reference. Rather, it handles the - * situation where we have two - * vectors @p v and @p w, and assign - * elements like in - * v(i)=w(i). Here, both - * left and right hand side of the - * assignment have data type - * VectorReference, but what we - * really mean is to assign the - * vector elements represented by the - * two references. This operator - * implements this operation. Note - * also that this allows us to make - * the assignment operator const. - */ - const VectorReference & operator = (const VectorReference &r) const; - - /** - * The same function as above, but - * for non-const reference - * objects. The function is needed - * since the compiler might otherwise - * automatically generate a copy - * operator for non-const objects. - */ - VectorReference & operator = (const VectorReference &r); - - /** - * Set the referenced element of the - * vector to s. - */ - const VectorReference & operator = (const PetscScalar &s) const; - - /** - * Add s to the referenced - * element of the vector. - */ - const VectorReference & operator += (const PetscScalar &s) const; - - /** - * Subtract s from the - * referenced element of the vector. - */ - const VectorReference & operator -= (const PetscScalar &s) const; - - /** - * Multiply the referenced element of - * the vector by s. - */ - const VectorReference & operator *= (const PetscScalar &s) const; - - /** - * Divide the referenced element of - * the vector by s. - */ - const VectorReference & operator /= (const PetscScalar &s) const; - - /** - * Convert the reference to an actual - * value, i.e. return the value of - * the referenced element of the - * vector. - */ - operator PetscScalar () const; - - /** - * Exception - */ - DeclException1 (ExcPETScError, - int, - << "An error with error number " << arg1 - << " occurred while calling a PETSc function"); - /** - * Exception - */ - DeclException3 (ExcAccessToNonlocalElement, - int, int, int, - << "You tried to access element " << arg1 - << " of a distributed vector, but only elements " - << arg2 << " through " << arg3 - << " are stored locally and can be accessed."); - /** - * Exception. - */ - DeclException2 (ExcWrongMode, - int, int, - << "You tried to do a " - << (arg1 == 1 ? - "'set'" : - (arg1 == 2 ? - "'add'" : "???")) - << " operation but the vector is currently in " - << (arg2 == 1 ? - "'set'" : - (arg2 == 2 ? - "'add'" : "???")) - << " mode. You first have to call 'compress()'."); - - private: - /** - * Point to the vector we are - * referencing. - */ - const VectorBase &vector; - - /** - * Index of the referenced element of - * the vector. - */ - const unsigned int index; - - /** - * Make the vector class a friend, so - * that it can create objects of the - * present type. - */ - friend class ::dealii::PETScWrappers::VectorBase; + private: + /** + * Constructor. It is made private so + * as to only allow the actual vector + * class to create it. + */ - VectorReference (const VectorBase &vector, ++ VectorReference (const VectorBase &vector, + const unsigned int index); + + public: + /** + * This looks like a copy operator, + * but does something different than + * usual. In particular, it does not + * copy the member variables of this + * reference. Rather, it handles the + * situation where we have two + * vectors @p v and @p w, and assign + * elements like in + * v(i)=w(i). Here, both + * left and right hand side of the + * assignment have data type + * VectorReference, but what we + * really mean is to assign the + * vector elements represented by the + * two references. This operator + * implements this operation. Note + * also that this allows us to make + * the assignment operator const. + */ + const VectorReference &operator = (const VectorReference &r) const; + + /** + * The same function as above, but + * for non-const reference + * objects. The function is needed + * since the compiler might otherwise + * automatically generate a copy + * operator for non-const objects. + */ + VectorReference &operator = (const VectorReference &r); + + /** + * Set the referenced element of the + * vector to s. + */ + const VectorReference &operator = (const PetscScalar &s) const; + + /** + * Add s to the referenced + * element of the vector. + */ + const VectorReference &operator += (const PetscScalar &s) const; + + /** + * Subtract s from the + * referenced element of the vector. + */ + const VectorReference &operator -= (const PetscScalar &s) const; + + /** + * Multiply the referenced element of + * the vector by s. + */ + const VectorReference &operator *= (const PetscScalar &s) const; + + /** + * Divide the referenced element of + * the vector by s. + */ + const VectorReference &operator /= (const PetscScalar &s) const; + + /** + * Convert the reference to an actual + * value, i.e. return the value of + * the referenced element of the + * vector. + */ + operator PetscScalar () const; + + /** + * Exception + */ + DeclException1 (ExcPETScError, + int, + << "An error with error number " << arg1 + << " occurred while calling a PETSc function"); + /** + * Exception + */ + DeclException3 (ExcAccessToNonlocalElement, + int, int, int, + << "You tried to access element " << arg1 + << " of a distributed vector, but only elements " + << arg2 << " through " << arg3 + << " are stored locally and can be accessed."); + /** + * Exception. + */ + DeclException2 (ExcWrongMode, + int, int, + << "You tried to do a " + << (arg1 == 1 ? + "'set'" : + (arg1 == 2 ? + "'add'" : "???")) + << " operation but the vector is currently in " + << (arg2 == 1 ? + "'set'" : + (arg2 == 2 ? + "'add'" : "???")) + << " mode. You first have to call 'compress()'."); + + private: + /** + * Point to the vector we are + * referencing. + */ + const VectorBase &vector; + + /** + * Index of the referenced element of + * the vector. + */ + const unsigned int index; + + /** + * Make the vector class a friend, so + * that it can create objects of the + * present type. + */ + friend class ::dealii::PETScWrappers::VectorBase; }; } - /** - * @endcond - */ - - - /** - * Base class for all vector classes that are implemented on top of the PETSc - * vector types. Since in PETSc all vector types (i.e. sequential and parallel - * ones) are built by filling the contents of an abstract object that is only - * referenced through a pointer of a type that is independent of the actual - * vector type, we can implement almost all functionality of vectors in this - * base class. Derived classes will then only have to provide the - * functionality to create one or the other kind of vector. - * - * The interface of this class is modeled after the existing Vector - * class in deal.II. It has almost the same member functions, and is often - * exchangable. However, since PETSc only supports a single scalar type - * (either double, float, or a complex data type), it is not templated, and - * only works with whatever your PETSc installation has defined the data type - * @p PetscScalar to. - * - * Note that PETSc only guarantees that operations do what you expect if the - * functions @p VecAssemblyBegin and @p VecAssemblyEnd have been called - * after vector assembly. Therefore, you need to call Vector::compress() - * before you actually use the vector. - * - * @ingroup PETScWrappers - * @author Wolfgang Bangerth, 2004 - */ + /** + * @endcond + */ + + + /** + * Base class for all vector classes that are implemented on top of the PETSc + * vector types. Since in PETSc all vector types (i.e. sequential and parallel + * ones) are built by filling the contents of an abstract object that is only + * referenced through a pointer of a type that is independent of the actual + * vector type, we can implement almost all functionality of vectors in this + * base class. Derived classes will then only have to provide the + * functionality to create one or the other kind of vector. + * + * The interface of this class is modeled after the existing Vector + * class in deal.II. It has almost the same member functions, and is often + * exchangable. However, since PETSc only supports a single scalar type + * (either double, float, or a complex data type), it is not templated, and + * only works with whatever your PETSc installation has defined the data type + * @p PetscScalar to. + * + * Note that PETSc only guarantees that operations do what you expect if the + * functions @p VecAssemblyBegin and @p VecAssemblyEnd have been called + * after vector assembly. Therefore, you need to call Vector::compress() + * before you actually use the vector. + * + * @ingroup PETScWrappers + * @author Wolfgang Bangerth, 2004 + */ class VectorBase : public Subscriptor { - public: - /** - * Declare some of the standard types - * used in all containers. These types - * parallel those in the C++ - * standard libraries vector<...> - * class. - */ - typedef PetscScalar value_type; - typedef PetscReal real_type; - typedef std::size_t size_type; - typedef internal::VectorReference reference; - typedef const internal::VectorReference const_reference; - - /** - * Default constructor. It doesn't do - * anything, derived classes will have - * to initialize the data. - */ - VectorBase (); - - /** - * Copy constructor. Sets the dimension - * to that of the given vector, and - * copies all elements. - */ - VectorBase (const VectorBase &v); - - /** - * Initialize a Vector from a PETSc Vec - * object. Note that we do not copy the - * vector and we do not attain - * ownership, so we do not destroy the - * PETSc object in the destructor. - */ - explicit VectorBase (const Vec & v); - - /** - * Destructor - */ - virtual ~VectorBase (); - - /** - * Compress the underlying - * representation of the PETSc object, - * i.e. flush the buffers of the vector - * object if it has any. This function - * is necessary after writing into a - * vector element-by-element and before - * anything else can be done on it. - * - * See @ref GlossCompress "Compressing distributed objects" - * for more information. - */ - void compress (::dealii::VectorOperation::values operation - =::dealii::VectorOperation::unknown); - - /** - * Set all components of the vector to - * the given number @p s. Simply pass - * this down to the individual block - * objects, but we still need to declare - * this function to make the example - * given in the discussion about making - * the constructor explicit work. - * - * - * Since the semantics of assigning a - * scalar to a vector are not - * immediately clear, this operator - * should really only be used if you - * want to set the entire vector to - * zero. This allows the intuitive - * notation v=0. Assigning - * other values is deprecated and may - * be disallowed in the future. - */ - VectorBase & operator = (const PetscScalar s); - - /** - * Test for equality. This function - * assumes that the present vector and - * the one to compare with have the same - * size already, since comparing vectors - * of different sizes makes not much - * sense anyway. - */ - bool operator == (const VectorBase &v) const; - - /** - * Test for inequality. This function - * assumes that the present vector and - * the one to compare with have the same - * size already, since comparing vectors - * of different sizes makes not much - * sense anyway. - */ - bool operator != (const VectorBase &v) const; - - /** - * Return the global dimension of the - * vector. - */ - unsigned int size () const; - - /** - * Return the local dimension of the - * vector, i.e. the number of elements - * stored on the present MPI - * process. For sequential vectors, - * this number is the same as size(), - * but for parallel vectors it may be - * smaller. - * - * To figure out which elements - * exactly are stored locally, - * use local_range(). - */ - unsigned int local_size () const; - - /** - * Return a pair of indices - * indicating which elements of - * this vector are stored - * locally. The first number is - * the index of the first - * element stored, the second - * the index of the one past - * the last one that is stored - * locally. If this is a - * sequential vector, then the - * result will be the pair - * (0,N), otherwise it will be - * a pair (i,i+n), where - * n=local_size(). - */ - std::pair - local_range () const; - - /** - * Return whether @p index is - * in the local range or not, - * see also local_range(). - */ - bool in_local_range (const unsigned int index) const; - - /** - * Return if the vector contains ghost - * elements. - */ - bool has_ghost_elements() const; - - /** - * Provide access to a given element, - * both read and write. - */ - reference - operator () (const unsigned int index); - - /** - * Provide read-only access to an - * element. - */ - PetscScalar - operator () (const unsigned int index) const; - - /** - * Provide access to a given - * element, both read and write. - * - * Exactly the same as operator(). - */ - reference - operator [] (const unsigned int index); - - /** - * Provide read-only access to an - * element. This is equivalent to - * the el() command. - * - * Exactly the same as operator(). - */ - PetscScalar - operator [] (const unsigned int index) const; - - /** - * A collective set operation: instead - * of setting individual elements of a - * vector, this function allows to set - * a whole set of elements at once. The - * indices of the elements to be set - * are stated in the first argument, - * the corresponding values in the - * second. - */ - void set (const std::vector &indices, - const std::vector &values); - - /** - * A collective add operation: This - * function adds a whole set of values - * stored in @p values to the vector - * components specified by @p indices. - */ - void add (const std::vector &indices, - const std::vector &values); - - /** - * This is a second collective - * add operation. As a - * difference, this function - * takes a deal.II vector of - * values. - */ - void add (const std::vector &indices, - const ::dealii::Vector &values); - - /** - * Take an address where - * n_elements are stored - * contiguously and add them into - * the vector. Handles all cases - * which are not covered by the - * other two add() - * functions above. - */ - void add (const unsigned int n_elements, - const unsigned int *indices, - const PetscScalar *values); - - /** - * Return the scalar product of two - * vectors. The vectors must have the - * same size. - */ - PetscScalar operator * (const VectorBase &vec) const; - - /** - * Return square of the $l_2$-norm. - */ - real_type norm_sqr () const; - - /** - * Mean value of the elements of - * this vector. - */ - PetscScalar mean_value () const; - - /** - * $l_1$-norm of the vector. - * The sum of the absolute values. - */ - real_type l1_norm () const; - - /** - * $l_2$-norm of the vector. The - * square root of the sum of the - * squares of the elements. - */ - real_type l2_norm () const; - - /** - * $l_p$-norm of the vector. The - * pth root of the sum of the pth - * powers of the absolute values - * of the elements. - */ - real_type lp_norm (const real_type p) const; - - /** - * Maximum absolute value of the - * elements. - */ - real_type linfty_norm () const; - - /** - * Normalize vector by dividing - * by the $l_2$-norm of the - * vector. Return vector norm - * before normalization. - */ - real_type normalize () const; - - /** - * Return vector component with - * the minimal magnitude. - */ - real_type min () const; - - /** - * Return vector component with - * the maximal magnitude. - */ - real_type max () const; - - - /** - * Replace every element in a - * vector with its absolute - * value. - */ - VectorBase & abs (); - - /** - * Conjugate a vector. - */ - VectorBase & conjugate (); - - /** - * A collective piecewise - * multiply operation on - * this vector - * with itself. TODO: The model - * for this function should be - * similer to add (). - */ - VectorBase & mult (); - - /** - * Same as above, but a - * collective piecewise - * multiply operation of - * this vector - * with v. - */ - VectorBase & mult (const VectorBase &v); - - /** - * Same as above, but a - * collective piecewise - * multiply operation of - * u with v. - */ - VectorBase & mult (const VectorBase &u, - const VectorBase &v); - - /** - * Return whether the vector contains - * only elements with value zero. This - * function is mainly for internal - * consistency checks and should - * seldom be used when not in debug - * mode since it uses quite some time. - */ - bool all_zero () const; - - /** - * Return @p true if the vector has no - * negative entries, i.e. all entries - * are zero or positive. This function - * is used, for example, to check - * whether refinement indicators are - * really all positive (or zero). - */ - bool is_non_negative () const; - - /** - * Multiply the entire vector by a - * fixed factor. - */ - VectorBase & operator *= (const PetscScalar factor); - - /** - * Divide the entire vector by a - * fixed factor. - */ - VectorBase & operator /= (const PetscScalar factor); - - /** - * Add the given vector to the present - * one. - */ - VectorBase & operator += (const VectorBase &V); - - /** - * Subtract the given vector from the - * present one. - */ - VectorBase & operator -= (const VectorBase &V); - - /** - * Addition of @p s to all - * components. Note that @p s is a - * scalar and not a vector. - */ - void add (const PetscScalar s); - - /** - * Simple vector addition, equal to the - * operator +=. - */ - void add (const VectorBase &V); - - /** - * Simple addition of a multiple of a - * vector, i.e. *this += a*V. - */ - void add (const PetscScalar a, const VectorBase &V); - - /** - * Multiple addition of scaled vectors, - * i.e. *this += a*V+b*W. - */ - void add (const PetscScalar a, const VectorBase &V, - const PetscScalar b, const VectorBase &W); - - /** - * Scaling and simple vector addition, - * i.e. - * *this = s*(*this)+V. - */ - void sadd (const PetscScalar s, - const VectorBase &V); - - /** - * Scaling and simple addition, i.e. - * *this = s*(*this)+a*V. - */ - void sadd (const PetscScalar s, - const PetscScalar a, - const VectorBase &V); - - /** - * Scaling and multiple addition. - */ - void sadd (const PetscScalar s, - const PetscScalar a, - const VectorBase &V, - const PetscScalar b, - const VectorBase &W); - - /** - * Scaling and multiple addition. - * *this = s*(*this)+a*V + b*W + c*X. - */ - void sadd (const PetscScalar s, - const PetscScalar a, - const VectorBase &V, - const PetscScalar b, - const VectorBase &W, - const PetscScalar c, - const VectorBase &X); - - /** - * Scale each element of this - * vector by the corresponding - * element in the argument. This - * function is mostly meant to - * simulate multiplication (and - * immediate re-assignment) by a - * diagonal scaling matrix. - */ - void scale (const VectorBase &scaling_factors); - - /** - * Assignment *this = a*V. - */ - void equ (const PetscScalar a, const VectorBase &V); - - /** - * Assignment *this = a*V + b*W. - */ - void equ (const PetscScalar a, const VectorBase &V, - const PetscScalar b, const VectorBase &W); - - /** - * Compute the elementwise ratio of the - * two given vectors, that is let - * this[i] = a[i]/b[i]. This is - * useful for example if you want to - * compute the cellwise ratio of true to - * estimated error. - * - * This vector is appropriately - * scaled to hold the result. - * - * If any of the b[i] is - * zero, the result is - * undefined. No attempt is made - * to catch such situations. - */ - void ratio (const VectorBase &a, - const VectorBase &b); - - /** - * Updates the ghost values of this - * vector. This is necessary after any - * modification before reading ghost - * values. - */ - void update_ghost_values() const; - - /** - * Print to a - * stream. @p precision denotes - * the desired precision with - * which values shall be printed, - * @p scientific whether - * scientific notation shall be - * used. If @p across is - * @p true then the vector is - * printed in a line, while if - * @p false then the elements - * are printed on a separate line - * each. - */ - void print (std::ostream &out, - const unsigned int precision = 3, - const bool scientific = true, - const bool across = true) const; - - /** - * Swap the contents of this - * vector and the other vector - * @p v. One could do this - * operation with a temporary - * variable and copying over the - * data elements, but this - * function is significantly more - * efficient since it only swaps - * the pointers to the data of - * the two vectors and therefore - * does not need to allocate - * temporary storage and move - * data around. - * - * This function is analog to the - * the @p swap function of all C++ - * standard containers. Also, - * there is a global function - * swap(u,v) that simply calls - * u.swap(v), again in analogy - * to standard functions. - */ - void swap (VectorBase &v); - - /** - * Conversion operator to gain access - * to the underlying PETSc type. If you - * do this, you cut this class off some - * information it may need, so this - * conversion operator should only be - * used if you know what you do. In - * particular, it should only be used - * for read-only operations into the - * vector. - */ - operator const Vec & () const; - - /** - * Estimate for the memory - * consumption (not implemented - * for this class). - */ - std::size_t memory_consumption () const; - - protected: - /** - * A generic vector object in - * PETSc. The actual type, a sequential - * vector, is set in the constructor. - */ - Vec vector; - - /** - * Denotes if this vector has ghost - * indices associated with it. This - * means that at least one of the - * processes in a parallel programm has - * at least one ghost index. - */ - bool ghosted; - - /** - * This vector contains the global - * indices of the ghost values. The - * location in this vector denotes the - * local numbering, which is used in - * PETSc. - */ - IndexSet ghost_indices; - - /** - * Store whether the last action was a - * write or add operation. This - * variable is @p mutable so that the - * accessor classes can write to it, - * even though the vector object they - * refer to is constant. - */ - mutable ::dealii::VectorOperation::values last_action; - - /** - * Make the reference class a friend. - */ - friend class internal::VectorReference; - - /** - * Specifies if the vector is the owner - * of the PETSc Vec. This is true if it - * got created by this class and - * determines if it gets destructed in - * the destructor. - */ - bool attained_ownership; - - /** - * Collective set or add - * operation: This function is - * invoked by the collective @p - * set and @p add with the - * @p add_values flag set to the - * corresponding value. - */ - void do_set_add_operation (const unsigned int n_elements, - const unsigned int *indices, - const PetscScalar *values, - const bool add_values); + public: + /** + * Declare some of the standard types + * used in all containers. These types + * parallel those in the C++ + * standard libraries vector<...> + * class. + */ + typedef PetscScalar value_type; + typedef PetscReal real_type; + typedef std::size_t size_type; + typedef internal::VectorReference reference; + typedef const internal::VectorReference const_reference; + + /** + * Default constructor. It doesn't do + * anything, derived classes will have + * to initialize the data. + */ + VectorBase (); + + /** + * Copy constructor. Sets the dimension + * to that of the given vector, and + * copies all elements. + */ + VectorBase (const VectorBase &v); + + /** + * Initialize a Vector from a PETSc Vec + * object. Note that we do not copy the + * vector and we do not attain + * ownership, so we do not destroy the + * PETSc object in the destructor. + */ + explicit VectorBase (const Vec &v); + + /** + * Destructor + */ + virtual ~VectorBase (); + + /** + * Compress the underlying + * representation of the PETSc object, + * i.e. flush the buffers of the vector + * object if it has any. This function + * is necessary after writing into a + * vector element-by-element and before + * anything else can be done on it. + * + * See @ref GlossCompress "Compressing distributed objects" + * for more information. + */ + void compress (::dealii::VectorOperation::values operation + =::dealii::VectorOperation::unknown); + + /** + * Set all components of the vector to + * the given number @p s. Simply pass + * this down to the individual block + * objects, but we still need to declare + * this function to make the example + * given in the discussion about making + * the constructor explicit work. + * + * + * Since the semantics of assigning a + * scalar to a vector are not + * immediately clear, this operator + * should really only be used if you + * want to set the entire vector to + * zero. This allows the intuitive + * notation v=0. Assigning + * other values is deprecated and may + * be disallowed in the future. + */ + VectorBase &operator = (const PetscScalar s); + + /** + * Test for equality. This function + * assumes that the present vector and + * the one to compare with have the same + * size already, since comparing vectors + * of different sizes makes not much + * sense anyway. + */ + bool operator == (const VectorBase &v) const; + + /** + * Test for inequality. This function + * assumes that the present vector and + * the one to compare with have the same + * size already, since comparing vectors + * of different sizes makes not much + * sense anyway. + */ + bool operator != (const VectorBase &v) const; + + /** + * Return the global dimension of the + * vector. + */ + unsigned int size () const; + + /** + * Return the local dimension of the + * vector, i.e. the number of elements + * stored on the present MPI + * process. For sequential vectors, + * this number is the same as size(), + * but for parallel vectors it may be + * smaller. + * + * To figure out which elements + * exactly are stored locally, + * use local_range(). + */ + unsigned int local_size () const; + + /** + * Return a pair of indices + * indicating which elements of + * this vector are stored + * locally. The first number is + * the index of the first + * element stored, the second + * the index of the one past + * the last one that is stored + * locally. If this is a + * sequential vector, then the + * result will be the pair + * (0,N), otherwise it will be + * a pair (i,i+n), where + * n=local_size(). + */ + std::pair + local_range () const; + + /** + * Return whether @p index is + * in the local range or not, + * see also local_range(). + */ + bool in_local_range (const unsigned int index) const; + + /** + * Return if the vector contains ghost + * elements. + */ + bool has_ghost_elements() const; + + /** + * Provide access to a given element, + * both read and write. + */ + reference + operator () (const unsigned int index); + + /** + * Provide read-only access to an + * element. + */ + PetscScalar + operator () (const unsigned int index) const; + + /** + * Provide access to a given + * element, both read and write. + * + * Exactly the same as operator(). + */ + reference + operator [] (const unsigned int index); + + /** + * Provide read-only access to an + * element. This is equivalent to + * the el() command. + * + * Exactly the same as operator(). + */ + PetscScalar + operator [] (const unsigned int index) const; + + /** + * A collective set operation: instead + * of setting individual elements of a + * vector, this function allows to set + * a whole set of elements at once. The + * indices of the elements to be set + * are stated in the first argument, + * the corresponding values in the + * second. + */ + void set (const std::vector &indices, - const std::vector &values); ++ const std::vector &values); + + /** + * A collective add operation: This + * function adds a whole set of values + * stored in @p values to the vector + * components specified by @p indices. + */ + void add (const std::vector &indices, - const std::vector &values); ++ const std::vector &values); + + /** + * This is a second collective + * add operation. As a + * difference, this function + * takes a deal.II vector of + * values. + */ + void add (const std::vector &indices, + const ::dealii::Vector &values); + + /** + * Take an address where + * n_elements are stored + * contiguously and add them into + * the vector. Handles all cases + * which are not covered by the + * other two add() + * functions above. + */ + void add (const unsigned int n_elements, + const unsigned int *indices, - const PetscScalar *values); ++ const PetscScalar *values); + + /** + * Return the scalar product of two + * vectors. The vectors must have the + * same size. + */ + PetscScalar operator * (const VectorBase &vec) const; + + /** + * Return square of the $l_2$-norm. + */ + real_type norm_sqr () const; + + /** + * Mean value of the elements of + * this vector. + */ + PetscScalar mean_value () const; + + /** + * $l_1$-norm of the vector. + * The sum of the absolute values. + */ + real_type l1_norm () const; + + /** + * $l_2$-norm of the vector. The + * square root of the sum of the + * squares of the elements. + */ + real_type l2_norm () const; + + /** + * $l_p$-norm of the vector. The + * pth root of the sum of the pth + * powers of the absolute values + * of the elements. + */ + real_type lp_norm (const real_type p) const; + + /** + * Maximum absolute value of the + * elements. + */ + real_type linfty_norm () const; + + /** + * Normalize vector by dividing + * by the $l_2$-norm of the + * vector. Return vector norm + * before normalization. + */ + real_type normalize () const; + + /** + * Return vector component with + * the minimal magnitude. + */ + real_type min () const; + + /** + * Return vector component with + * the maximal magnitude. + */ + real_type max () const; + + + /** + * Replace every element in a + * vector with its absolute + * value. + */ + VectorBase &abs (); + + /** + * Conjugate a vector. + */ + VectorBase &conjugate (); + + /** + * A collective piecewise + * multiply operation on + * this vector + * with itself. TODO: The model + * for this function should be + * similer to add (). + */ + VectorBase &mult (); + + /** + * Same as above, but a + * collective piecewise + * multiply operation of + * this vector + * with v. + */ + VectorBase &mult (const VectorBase &v); + + /** + * Same as above, but a + * collective piecewise + * multiply operation of + * u with v. + */ + VectorBase &mult (const VectorBase &u, + const VectorBase &v); + + /** + * Return whether the vector contains + * only elements with value zero. This + * function is mainly for internal + * consistency checks and should + * seldom be used when not in debug + * mode since it uses quite some time. + */ + bool all_zero () const; + + /** + * Return @p true if the vector has no + * negative entries, i.e. all entries + * are zero or positive. This function + * is used, for example, to check + * whether refinement indicators are + * really all positive (or zero). + */ + bool is_non_negative () const; + + /** + * Multiply the entire vector by a + * fixed factor. + */ + VectorBase &operator *= (const PetscScalar factor); + + /** + * Divide the entire vector by a + * fixed factor. + */ + VectorBase &operator /= (const PetscScalar factor); + + /** + * Add the given vector to the present + * one. + */ + VectorBase &operator += (const VectorBase &V); + + /** + * Subtract the given vector from the + * present one. + */ + VectorBase &operator -= (const VectorBase &V); + + /** + * Addition of @p s to all + * components. Note that @p s is a + * scalar and not a vector. + */ + void add (const PetscScalar s); + + /** + * Simple vector addition, equal to the + * operator +=. + */ + void add (const VectorBase &V); + + /** + * Simple addition of a multiple of a + * vector, i.e. *this += a*V. + */ + void add (const PetscScalar a, const VectorBase &V); + + /** + * Multiple addition of scaled vectors, + * i.e. *this += a*V+b*W. + */ + void add (const PetscScalar a, const VectorBase &V, + const PetscScalar b, const VectorBase &W); + + /** + * Scaling and simple vector addition, + * i.e. + * *this = s*(*this)+V. + */ + void sadd (const PetscScalar s, + const VectorBase &V); + + /** + * Scaling and simple addition, i.e. + * *this = s*(*this)+a*V. + */ + void sadd (const PetscScalar s, + const PetscScalar a, + const VectorBase &V); + + /** + * Scaling and multiple addition. + */ + void sadd (const PetscScalar s, + const PetscScalar a, + const VectorBase &V, + const PetscScalar b, + const VectorBase &W); + + /** + * Scaling and multiple addition. + * *this = s*(*this)+a*V + b*W + c*X. + */ + void sadd (const PetscScalar s, + const PetscScalar a, + const VectorBase &V, + const PetscScalar b, + const VectorBase &W, + const PetscScalar c, + const VectorBase &X); + + /** + * Scale each element of this + * vector by the corresponding + * element in the argument. This + * function is mostly meant to + * simulate multiplication (and + * immediate re-assignment) by a + * diagonal scaling matrix. + */ + void scale (const VectorBase &scaling_factors); + + /** + * Assignment *this = a*V. + */ + void equ (const PetscScalar a, const VectorBase &V); + + /** + * Assignment *this = a*V + b*W. + */ + void equ (const PetscScalar a, const VectorBase &V, + const PetscScalar b, const VectorBase &W); + + /** + * Compute the elementwise ratio of the + * two given vectors, that is let + * this[i] = a[i]/b[i]. This is + * useful for example if you want to + * compute the cellwise ratio of true to + * estimated error. + * + * This vector is appropriately + * scaled to hold the result. + * + * If any of the b[i] is + * zero, the result is + * undefined. No attempt is made + * to catch such situations. + */ + void ratio (const VectorBase &a, + const VectorBase &b); + + /** + * Updates the ghost values of this + * vector. This is necessary after any + * modification before reading ghost + * values. + */ + void update_ghost_values() const; + + /** + * Print to a + * stream. @p precision denotes + * the desired precision with + * which values shall be printed, + * @p scientific whether + * scientific notation shall be + * used. If @p across is + * @p true then the vector is + * printed in a line, while if + * @p false then the elements + * are printed on a separate line + * each. + */ + void print (std::ostream &out, + const unsigned int precision = 3, + const bool scientific = true, + const bool across = true) const; + + /** + * Swap the contents of this + * vector and the other vector + * @p v. One could do this + * operation with a temporary + * variable and copying over the + * data elements, but this + * function is significantly more + * efficient since it only swaps + * the pointers to the data of + * the two vectors and therefore + * does not need to allocate + * temporary storage and move + * data around. + * + * This function is analog to the + * the @p swap function of all C++ + * standard containers. Also, + * there is a global function + * swap(u,v) that simply calls + * u.swap(v), again in analogy + * to standard functions. + */ + void swap (VectorBase &v); + + /** + * Conversion operator to gain access + * to the underlying PETSc type. If you + * do this, you cut this class off some + * information it may need, so this + * conversion operator should only be + * used if you know what you do. In + * particular, it should only be used + * for read-only operations into the + * vector. + */ + operator const Vec &() const; + + /** + * Estimate for the memory + * consumption (not implemented + * for this class). + */ + std::size_t memory_consumption () const; + + protected: + /** + * A generic vector object in + * PETSc. The actual type, a sequential + * vector, is set in the constructor. + */ + Vec vector; + + /** + * Denotes if this vector has ghost + * indices associated with it. This + * means that at least one of the + * processes in a parallel programm has + * at least one ghost index. + */ + bool ghosted; + + /** + * This vector contains the global + * indices of the ghost values. The + * location in this vector denotes the + * local numbering, which is used in + * PETSc. + */ + IndexSet ghost_indices; + + /** + * Store whether the last action was a + * write or add operation. This + * variable is @p mutable so that the + * accessor classes can write to it, + * even though the vector object they + * refer to is constant. + */ + mutable ::dealii::VectorOperation::values last_action; + + /** + * Make the reference class a friend. + */ + friend class internal::VectorReference; + + /** + * Specifies if the vector is the owner + * of the PETSc Vec. This is true if it + * got created by this class and + * determines if it gets destructed in + * the destructor. + */ + bool attained_ownership; + + /** + * Collective set or add + * operation: This function is + * invoked by the collective @p + * set and @p add with the + * @p add_values flag set to the + * corresponding value. + */ + void do_set_add_operation (const unsigned int n_elements, + const unsigned int *indices, - const PetscScalar *values, ++ const PetscScalar *values, + const bool add_values); }; @@@ -886,11 -886,11 +886,11 @@@ namespace internal { inline - VectorReference::VectorReference (const VectorBase &vector, + VectorReference::VectorReference (const VectorBase &vector, const unsigned int index) - : - vector (vector), - index (index) + : + vector (vector), + index (index) {} diff --cc deal.II/include/deal.II/lac/sparse_matrix.h index 07b8ee7929,6cdf1e8f2a..f292e170e3 --- a/deal.II/include/deal.II/lac/sparse_matrix.h +++ b/deal.II/include/deal.II/lac/sparse_matrix.h @@@ -470,1312 -470,1290 +470,1290 @@@ namespace SparseMatrixIterator template class SparseMatrix : public virtual Subscriptor { - public: - /** - * Type of matrix entries. In analogy to - * the STL container classes. - */ - typedef number value_type; - - /** - * Declare a type that has holds - * real-valued numbers with the - * same precision as the template - * argument to this class. If the - * template argument of this - * class is a real data type, - * then real_type equals the - * template argument. If the - * template argument is a - * std::complex type then - * real_type equals the type - * underlying the complex - * numbers. - * - * This typedef is used to - * represent the return type of - * norms. - */ - typedef typename numbers::NumberTraits::real_type real_type; - - /** - * Typedef of an STL conforming iterator - * class walking over all the nonzero - * entries of this matrix. This iterator - * cannot change the values of the - * matrix. - */ - typedef - SparseMatrixIterators::Iterator - const_iterator; - - /** - * Typedef of an STL conforming iterator - * class walking over all the nonzero - * entries of this matrix. This iterator - * @em can change the values of the - * matrix, but of course can't change the - * sparsity pattern as this is fixed once - * a sparse matrix is attached to it. - */ - typedef - SparseMatrixIterators::Iterator - iterator; - - /** - * A structure that describes some of the - * traits of this class in terms of its - * run-time behavior. Some other classes - * (such as the block matrix classes) - * that take one or other of the matrix - * classes as its template parameters can - * tune their behavior based on the - * variables in this class. - */ - struct Traits - { - /** - * It is safe to elide additions of - * zeros to individual elements of - * this matrix. - */ - static const bool zero_addition_can_be_elided = true; - }; + public: + /** + * Type of matrix entries. In analogy to + * the STL container classes. + */ + typedef number value_type; + + /** + * Declare a type that has holds + * real-valued numbers with the + * same precision as the template + * argument to this class. If the + * template argument of this + * class is a real data type, + * then real_type equals the + * template argument. If the + * template argument is a + * std::complex type then + * real_type equals the type + * underlying the complex + * numbers. + * + * This typedef is used to + * represent the return type of + * norms. + */ + typedef typename numbers::NumberTraits::real_type real_type; + + /** + * Typedef of an STL conforming iterator + * class walking over all the nonzero + * entries of this matrix. This iterator + * cannot change the values of the + * matrix. + */ + typedef + SparseMatrixIterators::Iterator + const_iterator; + + /** + * Typedef of an STL conforming iterator + * class walking over all the nonzero + * entries of this matrix. This iterator + * @em can change the values of the + * matrix, but of course can't change the + * sparsity pattern as this is fixed once + * a sparse matrix is attached to it. + */ + typedef + SparseMatrixIterators::Iterator + iterator; + + /** + * A structure that describes some of the + * traits of this class in terms of its + * run-time behavior. Some other classes + * (such as the block matrix classes) + * that take one or other of the matrix + * classes as its template parameters can + * tune their behavior based on the + * variables in this class. + */ + struct Traits + { + /** + * It is safe to elide additions of + * zeros to individual elements of + * this matrix. + */ + static const bool zero_addition_can_be_elided = true; + }; - /** - * @name Constructors and initalization - */ + /** + * @name Constructors and initalization + */ //@{ - /** - * Constructor; initializes the matrix to - * be empty, without any structure, i.e. - * the matrix is not usable at all. This - * constructor is therefore only useful - * for matrices which are members of a - * class. All other matrices should be - * created at a point in the data flow - * where all necessary information is - * available. - * - * You have to initialize - * the matrix before usage with - * reinit(const SparsityPattern&). - */ - SparseMatrix (); - - /** - * Copy constructor. This constructor is - * only allowed to be called if the matrix - * to be copied is empty. This is for the - * same reason as for the - * SparsityPattern, see there for the - * details. - * - * If you really want to copy a whole - * matrix, you can do so by using the - * copy_from() function. - */ - SparseMatrix (const SparseMatrix &); - - /** - * Constructor. Takes the given - * matrix sparsity structure to - * represent the sparsity pattern - * of this matrix. You can change - * the sparsity pattern later on - * by calling the reinit(const - * SparsityPattern&) function. - * - * You have to make sure that the - * lifetime of the sparsity - * structure is at least as long - * as that of this matrix or as - * long as reinit(const - * SparsityPattern&) is not - * called with a new sparsity - * pattern. - * - * The constructor is marked - * explicit so as to disallow - * that someone passes a sparsity - * pattern in place of a sparse - * matrix to some function, where - * an empty matrix would be - * generated then. - */ - explicit SparseMatrix (const SparsityPattern &sparsity); - - /** - * Copy constructor: initialize - * the matrix with the identity - * matrix. This constructor will - * throw an exception if the - * sizes of the sparsity pattern - * and the identity matrix do not - * coincide, or if the sparsity - * pattern does not provide for - * nonzero entries on the entire - * diagonal. - */ - SparseMatrix (const SparsityPattern &sparsity, - const IdentityMatrix &id); - - /** - * Destructor. Free all memory, but do not - * release the memory of the sparsity - * structure. - */ - virtual ~SparseMatrix (); - - /** - * Copy operator. Since copying - * entire sparse matrices is a - * very expensive operation, we - * disallow doing so except for - * the special case of empty - * matrices of size zero. This - * doesn't seem particularly - * useful, but is exactly what - * one needs if one wanted to - * have a - * std::vector@ - * @>: in that case, one - * can create a vector (which - * needs the ability to copy - * objects) of empty matrices - * that are then later filled - * with something useful. - */ - SparseMatrix& operator = (const SparseMatrix &); - - /** - * Copy operator: initialize - * the matrix with the identity - * matrix. This operator will - * throw an exception if the - * sizes of the sparsity pattern - * and the identity matrix do not - * coincide, or if the sparsity - * pattern does not provide for - * nonzero entries on the entire - * diagonal. - */ - SparseMatrix & - operator= (const IdentityMatrix &id); - - /** - * This operator assigns a scalar to - * a matrix. Since this does usually - * not make much sense (should we set - * all matrix entries to this value? - * Only the nonzero entries of the - * sparsity pattern?), this operation - * is only allowed if the actual - * value to be assigned is zero. This - * operator only exists to allow for - * the obvious notation - * matrix=0, which sets all - * elements of the matrix to zero, - * but keep the sparsity pattern - * previously used. - */ - SparseMatrix & operator = (const double d); - - /** - * Reinitialize the sparse matrix - * with the given sparsity - * pattern. The latter tells the - * matrix how many nonzero - * elements there need to be - * reserved. - * - * Regarding memory allocation, - * the same applies as said - * above. - * - * You have to make sure that the - * lifetime of the sparsity - * structure is at least as long - * as that of this matrix or as - * long as reinit(const - * SparsityPattern &) is not - * called with a new sparsity - * structure. - * - * The elements of the matrix are - * set to zero by this function. - */ - virtual void reinit (const SparsityPattern &sparsity); - - /** - * Release all memory and return - * to a state just like after - * having called the default - * constructor. It also forgets - * the sparsity pattern it was - * previously tied to. - */ - virtual void clear (); + /** + * Constructor; initializes the matrix to + * be empty, without any structure, i.e. + * the matrix is not usable at all. This + * constructor is therefore only useful + * for matrices which are members of a + * class. All other matrices should be + * created at a point in the data flow + * where all necessary information is + * available. + * + * You have to initialize + * the matrix before usage with + * reinit(const SparsityPattern&). + */ + SparseMatrix (); + + /** + * Copy constructor. This constructor is + * only allowed to be called if the matrix + * to be copied is empty. This is for the + * same reason as for the + * SparsityPattern, see there for the + * details. + * + * If you really want to copy a whole + * matrix, you can do so by using the + * copy_from() function. + */ + SparseMatrix (const SparseMatrix &); + + /** + * Constructor. Takes the given + * matrix sparsity structure to + * represent the sparsity pattern + * of this matrix. You can change + * the sparsity pattern later on + * by calling the reinit(const + * SparsityPattern&) function. + * + * You have to make sure that the + * lifetime of the sparsity + * structure is at least as long + * as that of this matrix or as + * long as reinit(const + * SparsityPattern&) is not + * called with a new sparsity + * pattern. + * + * The constructor is marked + * explicit so as to disallow + * that someone passes a sparsity + * pattern in place of a sparse + * matrix to some function, where + * an empty matrix would be + * generated then. + */ + explicit SparseMatrix (const SparsityPattern &sparsity); + + /** + * Copy constructor: initialize + * the matrix with the identity + * matrix. This constructor will + * throw an exception if the + * sizes of the sparsity pattern + * and the identity matrix do not + * coincide, or if the sparsity + * pattern does not provide for + * nonzero entries on the entire + * diagonal. + */ + SparseMatrix (const SparsityPattern &sparsity, - const IdentityMatrix &id); ++ const IdentityMatrix &id); + + /** + * Destructor. Free all memory, but do not + * release the memory of the sparsity + * structure. + */ + virtual ~SparseMatrix (); + + /** + * Copy operator. Since copying + * entire sparse matrices is a + * very expensive operation, we + * disallow doing so except for + * the special case of empty + * matrices of size zero. This + * doesn't seem particularly + * useful, but is exactly what + * one needs if one wanted to + * have a + * std::vector@ + * @>: in that case, one + * can create a vector (which + * needs the ability to copy + * objects) of empty matrices + * that are then later filled + * with something useful. + */ + SparseMatrix &operator = (const SparseMatrix &); + + /** + * Copy operator: initialize + * the matrix with the identity + * matrix. This operator will + * throw an exception if the + * sizes of the sparsity pattern + * and the identity matrix do not + * coincide, or if the sparsity + * pattern does not provide for + * nonzero entries on the entire + * diagonal. + */ + SparseMatrix & - operator= (const IdentityMatrix &id); ++ operator= (const IdentityMatrix &id); + + /** + * This operator assigns a scalar to + * a matrix. Since this does usually + * not make much sense (should we set + * all matrix entries to this value? + * Only the nonzero entries of the + * sparsity pattern?), this operation + * is only allowed if the actual + * value to be assigned is zero. This + * operator only exists to allow for + * the obvious notation + * matrix=0, which sets all + * elements of the matrix to zero, + * but keep the sparsity pattern + * previously used. + */ + SparseMatrix &operator = (const double d); + + /** + * Reinitialize the sparse matrix + * with the given sparsity + * pattern. The latter tells the + * matrix how many nonzero + * elements there need to be + * reserved. + * + * Regarding memory allocation, + * the same applies as said + * above. + * + * You have to make sure that the + * lifetime of the sparsity + * structure is at least as long + * as that of this matrix or as + * long as reinit(const + * SparsityPattern &) is not + * called with a new sparsity + * structure. + * + * The elements of the matrix are + * set to zero by this function. + */ + virtual void reinit (const SparsityPattern &sparsity); + + /** + * Release all memory and return + * to a state just like after + * having called the default + * constructor. It also forgets + * the sparsity pattern it was + * previously tied to. + */ + virtual void clear (); //@} - /** - * @name Information on the matrix - */ + /** + * @name Information on the matrix + */ //@{ - /** - * Return whether the object is - * empty. It is empty if either - * both dimensions are zero or no - * SparsityPattern is - * associated. - */ - bool empty () const; - - /** - * Return the dimension of the - * image space. To remember: the - * matrix is of dimension - * $m \times n$. - */ - unsigned int m () const; - - /** - * Return the dimension of the - * range space. To remember: the - * matrix is of dimension - * $m \times n$. - */ - unsigned int n () const; - - /** - * Return the number of entries - * in a specific row. - */ - unsigned int get_row_length (const unsigned int row) const; - - /** - * Return the number of nonzero - * elements of this - * matrix. Actually, it returns - * the number of entries in the - * sparsity pattern; if any of - * the entries should happen to - * be zero, it is counted anyway. - */ - unsigned int n_nonzero_elements () const; - - /** - * Return the number of actually - * nonzero elements of this matrix. It - * is possible to specify the parameter - * threshold in order to count - * only the elements that have absolute - * value greater than the threshold. - * - * Note, that this function does (in - * contrary to n_nonzero_elements()) - * not count all entries of the - * sparsity pattern but only the ones - * that are nonzero (or whose absolute - * value is greater than threshold). - */ - unsigned int n_actually_nonzero_elements (const double threshold = 0.) const; - - /** - * Return a (constant) reference - * to the underlying sparsity - * pattern of this matrix. - * - * Though the return value is - * declared const, you - * should be aware that it may - * change if you call any - * nonconstant function of - * objects which operate on it. - */ - const SparsityPattern & get_sparsity_pattern () const; - - /** - * Determine an estimate for the - * memory consumption (in bytes) - * of this object. See - * MemoryConsumption. - */ - std::size_t memory_consumption () const; + /** + * Return whether the object is + * empty. It is empty if either + * both dimensions are zero or no + * SparsityPattern is + * associated. + */ + bool empty () const; + + /** + * Return the dimension of the + * image space. To remember: the + * matrix is of dimension + * $m \times n$. + */ + unsigned int m () const; + + /** + * Return the dimension of the + * range space. To remember: the + * matrix is of dimension + * $m \times n$. + */ + unsigned int n () const; + + /** + * Return the number of entries + * in a specific row. + */ + unsigned int get_row_length (const unsigned int row) const; + + /** + * Return the number of nonzero + * elements of this + * matrix. Actually, it returns + * the number of entries in the + * sparsity pattern; if any of + * the entries should happen to + * be zero, it is counted anyway. + */ + unsigned int n_nonzero_elements () const; + + /** + * Return the number of actually + * nonzero elements of this matrix. It + * is possible to specify the parameter + * threshold in order to count + * only the elements that have absolute + * value greater than the threshold. + * + * Note, that this function does (in + * contrary to n_nonzero_elements()) + * not count all entries of the + * sparsity pattern but only the ones + * that are nonzero (or whose absolute + * value is greater than threshold). + */ + unsigned int n_actually_nonzero_elements (const double threshold = 0.) const; + + /** + * Return a (constant) reference + * to the underlying sparsity + * pattern of this matrix. + * + * Though the return value is + * declared const, you + * should be aware that it may + * change if you call any + * nonconstant function of + * objects which operate on it. + */ + const SparsityPattern &get_sparsity_pattern () const; + + /** + * Determine an estimate for the + * memory consumption (in bytes) + * of this object. See + * MemoryConsumption. + */ + std::size_t memory_consumption () const; //@} - /** - * @name Modifying entries - */ + /** + * @name Modifying entries + */ //@{ - /** - * Set the element (i,j) - * to value. Throws an - * error if the entry does not - * exist or if value is - * not a finite number. Still, it - * is allowed to store zero - * values in non-existent fields. - */ - void set (const unsigned int i, - const unsigned int j, - const number value); - - /** - * Set all elements given in a - * FullMatrix into the sparse matrix - * locations given by - * indices. In other words, - * this function writes the elements - * in full_matrix into the - * calling matrix, using the - * local-to-global indexing specified - * by indices for both the - * rows and the columns of the - * matrix. This function assumes a - * quadratic sparse matrix and a - * quadratic full_matrix, the usual - * situation in FE calculations. - * - * The optional parameter - * elide_zero_values can be - * used to specify whether zero - * values should be set anyway or - * they should be filtered away (and - * not change the previous content in - * the respective element if it - * exists). The default value is - * false, i.e., even zero - * values are treated. - */ - template - void set (const std::vector &indices, - const FullMatrix &full_matrix, - const bool elide_zero_values = false); - - /** - * Same function as before, but now - * including the possibility to use - * rectangular full_matrices and - * different local-to-global indexing - * on rows and columns, respectively. - */ - template - void set (const std::vector &row_indices, - const std::vector &col_indices, - const FullMatrix &full_matrix, - const bool elide_zero_values = false); - - /** - * Set several elements in the - * specified row of the matrix with - * column indices as given by - * col_indices to the - * respective value. - * - * The optional parameter - * elide_zero_values can be - * used to specify whether zero - * values should be set anyway or - * they should be filtered away (and - * not change the previous content in - * the respective element if it - * exists). The default value is - * false, i.e., even zero - * values are treated. - */ - template - void set (const unsigned int row, - const std::vector &col_indices, - const std::vector &values, - const bool elide_zero_values = false); - - /** - * Set several elements to values - * given by values in a - * given row in columns given by - * col_indices into the sparse - * matrix. - * - * The optional parameter - * elide_zero_values can be - * used to specify whether zero - * values should be inserted anyway - * or they should be filtered - * away. The default value is - * false, i.e., even zero - * values are inserted/replaced. - */ - template - void set (const unsigned int row, - const unsigned int n_cols, - const unsigned int *col_indices, - const number2 *values, - const bool elide_zero_values = false); - - /** - * Add value to the - * element (i,j). Throws - * an error if the entry does not - * exist or if value is - * not a finite number. Still, it - * is allowed to store zero - * values in non-existent fields. - */ - void add (const unsigned int i, - const unsigned int j, - const number value); - - /** - * Add all elements given in a - * FullMatrix into sparse - * matrix locations given by - * indices. In other words, - * this function adds the elements in - * full_matrix to the - * respective entries in calling - * matrix, using the local-to-global - * indexing specified by - * indices for both the rows - * and the columns of the - * matrix. This function assumes a - * quadratic sparse matrix and a - * quadratic full_matrix, the usual - * situation in FE calculations. - * - * The optional parameter - * elide_zero_values can be - * used to specify whether zero - * values should be added anyway or - * these should be filtered away and - * only non-zero data is added. The - * default value is true, - * i.e., zero values won't be added - * into the matrix. - */ - template - void add (const std::vector &indices, - const FullMatrix &full_matrix, - const bool elide_zero_values = true); - - /** - * Same function as before, but now - * including the possibility to use - * rectangular full_matrices and - * different local-to-global indexing - * on rows and columns, respectively. - */ - template - void add (const std::vector &row_indices, - const std::vector &col_indices, - const FullMatrix &full_matrix, - const bool elide_zero_values = true); - - /** - * Set several elements in the - * specified row of the matrix with - * column indices as given by - * col_indices to the - * respective value. - * - * The optional parameter - * elide_zero_values can be - * used to specify whether zero - * values should be added anyway or - * these should be filtered away and - * only non-zero data is added. The - * default value is true, - * i.e., zero values won't be added - * into the matrix. - */ - template - void add (const unsigned int row, - const std::vector &col_indices, - const std::vector &values, - const bool elide_zero_values = true); - - /** - * Add an array of values given by - * values in the given - * global matrix row at columns - * specified by col_indices in the - * sparse matrix. - * - * The optional parameter - * elide_zero_values can be - * used to specify whether zero - * values should be added anyway or - * these should be filtered away and - * only non-zero data is added. The - * default value is true, - * i.e., zero values won't be added - * into the matrix. - */ - template - void add (const unsigned int row, - const unsigned int n_cols, - const unsigned int *col_indices, - const number2 *values, - const bool elide_zero_values = true, - const bool col_indices_are_sorted = false); - - /** - * Multiply the entire matrix by a - * fixed factor. - */ - SparseMatrix & operator *= (const number factor); - - /** - * Divide the entire matrix by a - * fixed factor. - */ - SparseMatrix & operator /= (const number factor); - - /** - * Symmetrize the matrix by - * forming the mean value between - * the existing matrix and its - * transpose, $A = \frac 12(A+A^T)$. - * - * This operation assumes that - * the underlying sparsity - * pattern represents a symmetric - * object. If this is not the - * case, then the result of this - * operation will not be a - * symmetric matrix, since it - * only explicitly symmetrizes - * by looping over the lower left - * triangular part for efficiency - * reasons; if there are entries - * in the upper right triangle, - * then these elements are missed - * in the - * symmetrization. Symmetrization - * of the sparsity pattern can be - * obtain by - * SparsityPattern::symmetrize(). - */ - void symmetrize (); - - /** - * Copy the given matrix to this - * one. The operation throws an - * error if the sparsity patterns - * of the two involved matrices - * do not point to the same - * object, since in this case the - * copy operation is - * cheaper. Since this operation - * is notheless not for free, we - * do not make it available - * through operator =, - * since this may lead to - * unwanted usage, e.g. in copy - * arguments to functions, which - * should really be arguments by - * reference. - * - * The source matrix may be a matrix - * of arbitrary type, as long as its - * data type is convertible to the - * data type of this matrix. - * - * The function returns a reference to - * *this. - */ - template - SparseMatrix & - copy_from (const SparseMatrix &source); - - /** - * This function is complete - * analogous to the - * SparsityPattern::copy_from() - * function in that it allows to - * initialize a whole matrix in - * one step. See there for more - * information on argument types - * and their meaning. You can - * also find a small example on - * how to use this function - * there. - * - * The only difference to the - * cited function is that the - * objects which the inner - * iterator points to need to be - * of type std::pair, where - * value needs to be - * convertible to the element - * type of this class, as - * specified by the - * number template - * argument. - * - * Previous content of the matrix - * is overwritten. Note that the - * entries specified by the input - * parameters need not - * necessarily cover all elements - * of the matrix. Elements not - * covered remain untouched. - */ - template - void copy_from (const ForwardIterator begin, - const ForwardIterator end); - - /** - * Copy the nonzero entries of a - * full matrix into this - * object. Previous content is - * deleted. Note that the - * underlying sparsity pattern - * must be appropriate to hold - * the nonzero entries of the - * full matrix. - */ - template - void copy_from (const FullMatrix &matrix); - - /** - * Add matrix scaled by - * factor to this matrix, - * i.e. the matrix factor*matrix - * is added to this. This - * function throws an error if the - * sparsity patterns of the two involved - * matrices do not point to the same - * object, since in this case the - * operation is cheaper. - * - * The source matrix may be a sparse - * matrix over an arbitrary underlying - * scalar type, as long as its data type - * is convertible to the data type of - * this matrix. - */ - template - void add (const number factor, - const SparseMatrix &matrix); + /** + * Set the element (i,j) + * to value. Throws an + * error if the entry does not + * exist or if value is + * not a finite number. Still, it + * is allowed to store zero + * values in non-existent fields. + */ + void set (const unsigned int i, + const unsigned int j, + const number value); + + /** + * Set all elements given in a + * FullMatrix into the sparse matrix + * locations given by + * indices. In other words, + * this function writes the elements + * in full_matrix into the + * calling matrix, using the + * local-to-global indexing specified + * by indices for both the + * rows and the columns of the + * matrix. This function assumes a + * quadratic sparse matrix and a + * quadratic full_matrix, the usual + * situation in FE calculations. + * + * The optional parameter + * elide_zero_values can be + * used to specify whether zero + * values should be set anyway or + * they should be filtered away (and + * not change the previous content in + * the respective element if it + * exists). The default value is + * false, i.e., even zero + * values are treated. + */ + template + void set (const std::vector &indices, + const FullMatrix &full_matrix, + const bool elide_zero_values = false); + + /** + * Same function as before, but now + * including the possibility to use + * rectangular full_matrices and + * different local-to-global indexing + * on rows and columns, respectively. + */ + template + void set (const std::vector &row_indices, + const std::vector &col_indices, + const FullMatrix &full_matrix, + const bool elide_zero_values = false); + + /** + * Set several elements in the + * specified row of the matrix with + * column indices as given by + * col_indices to the + * respective value. + * + * The optional parameter + * elide_zero_values can be + * used to specify whether zero + * values should be set anyway or + * they should be filtered away (and + * not change the previous content in + * the respective element if it + * exists). The default value is + * false, i.e., even zero + * values are treated. + */ + template + void set (const unsigned int row, + const std::vector &col_indices, + const std::vector &values, + const bool elide_zero_values = false); + + /** + * Set several elements to values + * given by values in a + * given row in columns given by + * col_indices into the sparse + * matrix. + * + * The optional parameter + * elide_zero_values can be + * used to specify whether zero + * values should be inserted anyway + * or they should be filtered + * away. The default value is + * false, i.e., even zero + * values are inserted/replaced. + */ + template + void set (const unsigned int row, + const unsigned int n_cols, + const unsigned int *col_indices, + const number2 *values, + const bool elide_zero_values = false); + + /** + * Add value to the + * element (i,j). Throws + * an error if the entry does not + * exist or if value is + * not a finite number. Still, it + * is allowed to store zero + * values in non-existent fields. + */ + void add (const unsigned int i, + const unsigned int j, + const number value); + + /** + * Add all elements given in a + * FullMatrix into sparse + * matrix locations given by + * indices. In other words, + * this function adds the elements in + * full_matrix to the + * respective entries in calling + * matrix, using the local-to-global + * indexing specified by + * indices for both the rows + * and the columns of the + * matrix. This function assumes a + * quadratic sparse matrix and a + * quadratic full_matrix, the usual + * situation in FE calculations. + * + * The optional parameter + * elide_zero_values can be + * used to specify whether zero + * values should be added anyway or + * these should be filtered away and + * only non-zero data is added. The + * default value is true, + * i.e., zero values won't be added + * into the matrix. + */ + template + void add (const std::vector &indices, + const FullMatrix &full_matrix, + const bool elide_zero_values = true); + + /** + * Same function as before, but now + * including the possibility to use + * rectangular full_matrices and + * different local-to-global indexing + * on rows and columns, respectively. + */ + template + void add (const std::vector &row_indices, + const std::vector &col_indices, + const FullMatrix &full_matrix, + const bool elide_zero_values = true); + + /** + * Set several elements in the + * specified row of the matrix with + * column indices as given by + * col_indices to the + * respective value. + * + * The optional parameter + * elide_zero_values can be + * used to specify whether zero + * values should be added anyway or + * these should be filtered away and + * only non-zero data is added. The + * default value is true, + * i.e., zero values won't be added + * into the matrix. + */ + template + void add (const unsigned int row, + const std::vector &col_indices, + const std::vector &values, + const bool elide_zero_values = true); + + /** + * Add an array of values given by + * values in the given + * global matrix row at columns + * specified by col_indices in the + * sparse matrix. + * + * The optional parameter + * elide_zero_values can be + * used to specify whether zero + * values should be added anyway or + * these should be filtered away and + * only non-zero data is added. The + * default value is true, + * i.e., zero values won't be added + * into the matrix. + */ + template + void add (const unsigned int row, + const unsigned int n_cols, + const unsigned int *col_indices, + const number2 *values, + const bool elide_zero_values = true, + const bool col_indices_are_sorted = false); + + /** + * Multiply the entire matrix by a + * fixed factor. + */ + SparseMatrix &operator *= (const number factor); + + /** + * Divide the entire matrix by a + * fixed factor. + */ + SparseMatrix &operator /= (const number factor); + + /** + * Symmetrize the matrix by + * forming the mean value between + * the existing matrix and its + * transpose, $A = \frac 12(A+A^T)$. + * + * This operation assumes that + * the underlying sparsity + * pattern represents a symmetric + * object. If this is not the + * case, then the result of this + * operation will not be a + * symmetric matrix, since it + * only explicitly symmetrizes + * by looping over the lower left + * triangular part for efficiency + * reasons; if there are entries + * in the upper right triangle, + * then these elements are missed + * in the + * symmetrization. Symmetrization + * of the sparsity pattern can be + * obtain by + * SparsityPattern::symmetrize(). + */ + void symmetrize (); + + /** + * Copy the given matrix to this + * one. The operation throws an + * error if the sparsity patterns + * of the two involved matrices + * do not point to the same + * object, since in this case the + * copy operation is + * cheaper. Since this operation + * is notheless not for free, we + * do not make it available + * through operator =, + * since this may lead to + * unwanted usage, e.g. in copy + * arguments to functions, which + * should really be arguments by + * reference. + * + * The source matrix may be a matrix + * of arbitrary type, as long as its + * data type is convertible to the + * data type of this matrix. + * + * The function returns a reference to + * *this. + */ + template + SparseMatrix & + copy_from (const SparseMatrix &source); + + /** + * This function is complete + * analogous to the + * SparsityPattern::copy_from() + * function in that it allows to + * initialize a whole matrix in + * one step. See there for more + * information on argument types + * and their meaning. You can + * also find a small example on + * how to use this function + * there. + * + * The only difference to the + * cited function is that the + * objects which the inner + * iterator points to need to be + * of type std::pair, where + * value needs to be + * convertible to the element + * type of this class, as + * specified by the + * number template + * argument. + * + * Previous content of the matrix + * is overwritten. Note that the + * entries specified by the input + * parameters need not + * necessarily cover all elements + * of the matrix. Elements not + * covered remain untouched. + */ + template + void copy_from (const ForwardIterator begin, + const ForwardIterator end); + + /** + * Copy the nonzero entries of a + * full matrix into this + * object. Previous content is + * deleted. Note that the + * underlying sparsity pattern + * must be appropriate to hold + * the nonzero entries of the + * full matrix. + */ + template + void copy_from (const FullMatrix &matrix); + + /** + * Add matrix scaled by + * factor to this matrix, + * i.e. the matrix factor*matrix + * is added to this. This + * function throws an error if the + * sparsity patterns of the two involved + * matrices do not point to the same + * object, since in this case the + * operation is cheaper. + * + * The source matrix may be a sparse + * matrix over an arbitrary underlying + * scalar type, as long as its data type + * is convertible to the data type of + * this matrix. + */ + template + void add (const number factor, + const SparseMatrix &matrix); //@} - /** - * @name Entry Access - */ + /** + * @name Entry Access + */ //@{ - /** - * Return the value of the entry - * (i,j). This may be an - * expensive operation and you - * should always take care where - * to call this function. In - * order to avoid abuse, this - * function throws an exception - * if the required element does - * not exist in the matrix. - * - * In case you want a function - * that returns zero instead (for - * entries that are not in the - * sparsity pattern of the - * matrix), use the el() - * function. - * - * If you are looping over all elements, - * consider using one of the iterator - * classes instead, since they are - * tailored better to a sparse matrix - * structure. - */ - number operator () (const unsigned int i, - const unsigned int j) const; - - /** - * This function is mostly like - * operator()() in that it - * returns the value of the - * matrix entry (i,j). The - * only difference is that if - * this entry does not exist in - * the sparsity pattern, then - * instead of raising an - * exception, zero is - * returned. While this may be - * convenient in some cases, note - * that it is simple to write - * algorithms that are slow - * compared to an optimal - * solution, since the sparsity - * of the matrix is not used. - * - * If you are looping over all elements, - * consider using one of the iterator - * classes instead, since they are - * tailored better to a sparse matrix - * structure. - */ - number el (const unsigned int i, - const unsigned int j) const; - - /** - * Return the main diagonal - * element in the ith - * row. This function throws an - * error if the matrix is not - * quadratic (see - * SparsityPattern::optimize_diagonal()). - * - * This function is considerably - * faster than the operator()(), - * since for quadratic matrices, the - * diagonal entry may be the - * first to be stored in each row - * and access therefore does not - * involve searching for the - * right column number. - */ - number diag_element (const unsigned int i) const; - - /** - * Same as above, but return a - * writeable reference. You're - * sure you know what you do? - */ - number & diag_element (const unsigned int i); - - /** - * Access to values in internal - * mode. Returns the value of - * the indexth entry in - * row. Here, - * index refers to the - * internal representation of the - * matrix, not the column. Be - * sure to understand what you - * are doing here. - * - * @deprecated Use iterator or - * const_iterator instead! - */ - number raw_entry (const unsigned int row, - const unsigned int index) const; - - /** - * @internal @deprecated Use iterator or - * const_iterator instead! - * - * This is for hackers. Get - * access to the ith element of - * this matrix. The elements are - * stored in a consecutive way, - * refer to the SparsityPattern - * class for more details. - * - * You should use this interface - * very carefully and only if you - * are absolutely sure to know - * what you do. You should also - * note that the structure of - * these arrays may change over - * time. If you change the - * layout yourself, you should - * also rename this function to - * avoid programs relying on - * outdated information! - */ - number global_entry (const unsigned int i) const; - - /** - * @internal @deprecated Use iterator or - * const_iterator instead! - * - * Same as above, but with write - * access. You certainly know - * what you do? - */ - number & global_entry (const unsigned int i); + /** + * Return the value of the entry + * (i,j). This may be an + * expensive operation and you + * should always take care where + * to call this function. In + * order to avoid abuse, this + * function throws an exception + * if the required element does + * not exist in the matrix. + * + * In case you want a function + * that returns zero instead (for + * entries that are not in the + * sparsity pattern of the + * matrix), use the el() + * function. + * + * If you are looping over all elements, + * consider using one of the iterator + * classes instead, since they are + * tailored better to a sparse matrix + * structure. + */ + number operator () (const unsigned int i, + const unsigned int j) const; + + /** + * This function is mostly like + * operator()() in that it + * returns the value of the + * matrix entry (i,j). The + * only difference is that if + * this entry does not exist in + * the sparsity pattern, then + * instead of raising an + * exception, zero is + * returned. While this may be + * convenient in some cases, note + * that it is simple to write + * algorithms that are slow + * compared to an optimal + * solution, since the sparsity + * of the matrix is not used. + * + * If you are looping over all elements, + * consider using one of the iterator + * classes instead, since they are + * tailored better to a sparse matrix + * structure. + */ + number el (const unsigned int i, + const unsigned int j) const; + + /** + * Return the main diagonal + * element in the ith + * row. This function throws an + * error if the matrix is not + * quadratic (see + * SparsityPattern::optimize_diagonal()). + * + * This function is considerably + * faster than the operator()(), + * since for quadratic matrices, the + * diagonal entry may be the + * first to be stored in each row + * and access therefore does not + * involve searching for the + * right column number. + */ + number diag_element (const unsigned int i) const; + + /** + * Same as above, but return a + * writeable reference. You're + * sure you know what you do? + */ + number &diag_element (const unsigned int i); + + /** + * Access to values in internal + * mode. Returns the value of + * the indexth entry in + * row. Here, + * index refers to the + * internal representation of the + * matrix, not the column. Be + * sure to understand what you + * are doing here. + * + * @deprecated Use iterator or + * const_iterator instead! + */ + number raw_entry (const unsigned int row, + const unsigned int index) const; + + /** + * @internal @deprecated Use iterator or + * const_iterator instead! + * + * This is for hackers. Get + * access to the ith element of + * this matrix. The elements are + * stored in a consecutive way, + * refer to the SparsityPattern + * class for more details. + * + * You should use this interface + * very carefully and only if you + * are absolutely sure to know + * what you do. You should also + * note that the structure of + * these arrays may change over + * time. If you change the + * layout yourself, you should + * also rename this function to + * avoid programs relying on + * outdated information! + */ + number global_entry (const unsigned int i) const; + + /** + * @internal @deprecated Use iterator or + * const_iterator instead! + * + * Same as above, but with write + * access. You certainly know + * what you do? + */ + number &global_entry (const unsigned int i); //@} - /** - * @name Multiplications - */ + /** + * @name Multiplications + */ //@{ - /** - * Matrix-vector multiplication: - * let dst = M*src with - * M being this matrix. - * - * Note that while this function can - * operate on all vectors that offer - * iterator classes, it is only really - * effective for objects of type @ref - * Vector. For all classes for which - * iterating over elements, or random - * member access is expensive, this - * function is not efficient. In - * particular, if you want to multiply - * with BlockVector objects, you should - * consider using a BlockSparseMatrix as - * well. - * - * Source and destination must - * not be the same vector. - */ - template - void vmult (OutVector& dst, - const InVector& src) const; - - /** - * Matrix-vector multiplication: - * let dst = MT*src with - * M being this - * matrix. This function does the - * same as vmult() but takes - * the transposed matrix. - * - * Note that while this function can - * operate on all vectors that offer - * iterator classes, it is only really - * effective for objects of type @ref - * Vector. For all classes for which - * iterating over elements, or random - * member access is expensive, this - * function is not efficient. In - * particular, if you want to multiply - * with BlockVector objects, you should - * consider using a BlockSparseMatrix as - * well. - * - * Source and destination must - * not be the same vector. - */ - template - void Tvmult (OutVector& dst, - const InVector& src) const; - - /** - * Adding Matrix-vector - * multiplication. Add - * M*src on dst - * with M being this - * matrix. - * - * Note that while this function can - * operate on all vectors that offer - * iterator classes, it is only really - * effective for objects of type @ref - * Vector. For all classes for which - * iterating over elements, or random - * member access is expensive, this - * function is not efficient. In - * particular, if you want to multiply - * with BlockVector objects, you should - * consider using a BlockSparseMatrix as - * well. - * - * Source and destination must - * not be the same vector. - */ - template - void vmult_add (OutVector& dst, - const InVector& src) const; - - /** - * Adding Matrix-vector - * multiplication. Add - * MT*src to - * dst with M being - * this matrix. This function - * does the same as vmult_add() - * but takes the transposed - * matrix. - * - * Note that while this function can - * operate on all vectors that offer - * iterator classes, it is only really - * effective for objects of type @ref - * Vector. For all classes for which - * iterating over elements, or random - * member access is expensive, this - * function is not efficient. In - * particular, if you want to multiply - * with BlockVector objects, you should - * consider using a BlockSparseMatrix as - * well. - * - * Source and destination must - * not be the same vector. - */ - template - void Tvmult_add (OutVector& dst, - const InVector& src) const; - - /** - * Return the square of the norm - * of the vector $v$ with respect - * to the norm induced by this - * matrix, - * i.e. $\left(v,Mv\right)$. This - * is useful, e.g. in the finite - * element context, where the - * $L_2$ norm of a function - * equals the matrix norm with - * respect to the mass matrix of - * the vector representing the - * nodal values of the finite - * element function. - * - * Obviously, the matrix needs to be - * quadratic for this operation, and for - * the result to actually be a norm it - * also needs to be either real symmetric - * or complex hermitian. - * - * The underlying template types of both - * this matrix and the given vector - * should either both be real or - * complex-valued, but not mixed, for - * this function to make sense. - */ - template - somenumber matrix_norm_square (const Vector &v) const; - - /** - * Compute the matrix scalar - * product $\left(u,Mv\right)$. - */ - template - somenumber matrix_scalar_product (const Vector &u, - const Vector &v) const; - - /** - * Compute the residual of an - * equation Mx=b, where - * the residual is defined to be - * r=b-Mx. Write the - * residual into - * dst. The - * l2 norm of - * the residual vector is - * returned. - * - * Source x and destination - * dst must not be the same - * vector. - */ - template - somenumber residual (Vector &dst, - const Vector &x, - const Vector &b) const; - - /** - * Perform the matrix-matrix - * multiplication C = A * B, - * or, if an optional vector argument - * is given, C = A * diag(V) * - * B, where diag(V) - * defines a diagonal matrix with the - * vector entries. - * - * This function assumes that the - * calling matrix A and - * B have compatible - * sizes. The size of C will - * be set within this function. - * - * The content as well as the sparsity - * pattern of the matrix C will be - * changed by this function, so make - * sure that the sparsity pattern is - * not used somewhere else in your - * program. This is an expensive - * operation, so think twice before you - * use this function. - * - * There is an optional flag - * rebuild_sparsity_pattern - * that can be used to bypass the - * creation of a new sparsity pattern - * and instead uses the sparsity - * pattern stored in C. In - * that case, make sure that it really - * fits. The default is to rebuild the - * sparsity pattern. - * - * @note Rebuilding the sparsity pattern - * requires changing it. This means that - * all other matrices that are associated - * with this sparsity pattern will - * then have invalid entries. - */ - template - void mmult (SparseMatrix &C, - const SparseMatrix &B, - const Vector &V = Vector(), - const bool rebuild_sparsity_pattern = true) const; - - /** - * Perform the matrix-matrix - * multiplication with the transpose of - * this, i.e., C = - * AT * B, or, if an - * optional vector argument is given, - * C = AT * diag(V) * - * B, where diag(V) - * defines a diagonal matrix with the - * vector entries. - * - * This function assumes that the - * calling matrix A and - * B have compatible - * sizes. The size of C will - * be set within this function. - * - * The content as well as the sparsity - * pattern of the matrix C will be - * changed by this function, so make - * sure that the sparsity pattern is - * not used somewhere else in your - * program. This is an expensive - * operation, so think twice before you - * use this function. - * - * There is an optional flag - * rebuild_sparsity_pattern - * that can be used to bypass the - * creation of a new sparsity pattern - * and instead uses the sparsity - * pattern stored in C. In - * that case, make sure that it really - * fits. The default is to rebuild the - * sparsity pattern. - * - * @note Rebuilding the sparsity pattern - * requires changing it. This means that - * all other matrices that are associated - * with this sparsity pattern will - * then have invalid entries. - */ - template - void Tmmult (SparseMatrix &C, - const SparseMatrix &B, - const Vector &V = Vector(), - const bool rebuild_sparsity_pattern = true) const; + /** + * Matrix-vector multiplication: + * let dst = M*src with + * M being this matrix. + * + * Note that while this function can + * operate on all vectors that offer + * iterator classes, it is only really + * effective for objects of type @ref + * Vector. For all classes for which + * iterating over elements, or random + * member access is expensive, this + * function is not efficient. In + * particular, if you want to multiply + * with BlockVector objects, you should + * consider using a BlockSparseMatrix as + * well. + * + * Source and destination must + * not be the same vector. + */ + template + void vmult (OutVector &dst, + const InVector &src) const; + + /** + * Matrix-vector multiplication: + * let dst = MT*src with + * M being this + * matrix. This function does the + * same as vmult() but takes + * the transposed matrix. + * + * Note that while this function can + * operate on all vectors that offer + * iterator classes, it is only really + * effective for objects of type @ref + * Vector. For all classes for which + * iterating over elements, or random + * member access is expensive, this + * function is not efficient. In + * particular, if you want to multiply + * with BlockVector objects, you should + * consider using a BlockSparseMatrix as + * well. + * + * Source and destination must + * not be the same vector. + */ + template + void Tvmult (OutVector &dst, + const InVector &src) const; + + /** + * Adding Matrix-vector + * multiplication. Add + * M*src on dst + * with M being this + * matrix. + * + * Note that while this function can + * operate on all vectors that offer + * iterator classes, it is only really + * effective for objects of type @ref + * Vector. For all classes for which + * iterating over elements, or random + * member access is expensive, this + * function is not efficient. In + * particular, if you want to multiply + * with BlockVector objects, you should + * consider using a BlockSparseMatrix as + * well. + * + * Source and destination must + * not be the same vector. + */ + template + void vmult_add (OutVector &dst, + const InVector &src) const; + + /** + * Adding Matrix-vector + * multiplication. Add + * MT*src to + * dst with M being + * this matrix. This function + * does the same as vmult_add() + * but takes the transposed + * matrix. + * + * Note that while this function can + * operate on all vectors that offer + * iterator classes, it is only really + * effective for objects of type @ref + * Vector. For all classes for which + * iterating over elements, or random + * member access is expensive, this + * function is not efficient. In + * particular, if you want to multiply + * with BlockVector objects, you should + * consider using a BlockSparseMatrix as + * well. + * + * Source and destination must + * not be the same vector. + */ + template + void Tvmult_add (OutVector &dst, + const InVector &src) const; + + /** + * Return the square of the norm + * of the vector $v$ with respect + * to the norm induced by this + * matrix, + * i.e. $\left(v,Mv\right)$. This + * is useful, e.g. in the finite + * element context, where the + * $L_2$ norm of a function + * equals the matrix norm with + * respect to the mass matrix of + * the vector representing the + * nodal values of the finite + * element function. + * + * Obviously, the matrix needs to be + * quadratic for this operation, and for + * the result to actually be a norm it + * also needs to be either real symmetric + * or complex hermitian. + * + * The underlying template types of both + * this matrix and the given vector + * should either both be real or + * complex-valued, but not mixed, for + * this function to make sense. + */ + template + somenumber matrix_norm_square (const Vector &v) const; + + /** + * Compute the matrix scalar + * product $\left(u,Mv\right)$. + */ + template + somenumber matrix_scalar_product (const Vector &u, + const Vector &v) const; + + /** + * Compute the residual of an + * equation Mx=b, where + * the residual is defined to be + * r=b-Mx. Write the + * residual into + * dst. The + * l2 norm of + * the residual vector is + * returned. + * + * Source x and destination + * dst must not be the same + * vector. + */ + template + somenumber residual (Vector &dst, + const Vector &x, + const Vector &b) const; + + /** + * Perform the matrix-matrix + * multiplication C = A * B, + * or, if an optional vector argument + * is given, C = A * diag(V) * + * B, where diag(V) + * defines a diagonal matrix with the + * vector entries. + * + * This function assumes that the + * calling matrix A and + * B have compatible + * sizes. The size of C will + * be set within this function. + * + * The content as well as the sparsity + * pattern of the matrix C will be + * changed by this function, so make + * sure that the sparsity pattern is + * not used somewhere else in your + * program. This is an expensive + * operation, so think twice before you + * use this function. + * + * There is an optional flag + * rebuild_sparsity_pattern + * that can be used to bypass the + * creation of a new sparsity pattern + * and instead uses the sparsity + * pattern stored in C. In + * that case, make sure that it really + * fits. The default is to rebuild the + * sparsity pattern. + * + * @note Rebuilding the sparsity pattern + * requires changing it. This means that + * all other matrices that are associated + * with this sparsity pattern will + * then have invalid entries. + */ + template + void mmult (SparseMatrix &C, + const SparseMatrix &B, + const Vector &V = Vector(), + const bool rebuild_sparsity_pattern = true) const; + + /** + * Perform the matrix-matrix + * multiplication with the transpose of + * this, i.e., C = + * AT * B, or, if an + * optional vector argument is given, + * C = AT * diag(V) * + * B, where diag(V) + * defines a diagonal matrix with the + * vector entries. + * + * This function assumes that the + * calling matrix A and + * B have compatible + * sizes. The size of C will + * be set within this function. + * + * The content as well as the sparsity + * pattern of the matrix C will be + * changed by this function, so make + * sure that the sparsity pattern is + * not used somewhere else in your + * program. This is an expensive + * operation, so think twice before you + * use this function. + * + * There is an optional flag + * rebuild_sparsity_pattern + * that can be used to bypass the + * creation of a new sparsity pattern + * and instead uses the sparsity + * pattern stored in C. In + * that case, make sure that it really + * fits. The default is to rebuild the + * sparsity pattern. + * + * @note Rebuilding the sparsity pattern + * requires changing it. This means that + * all other matrices that are associated + * with this sparsity pattern will + * then have invalid entries. + */ + template + void Tmmult (SparseMatrix &C, + const SparseMatrix &B, + const Vector &V = Vector(), + const bool rebuild_sparsity_pattern = true) const; //@} - /** - * @name Matrix norms - */ + /** + * @name Matrix norms + */ //@{ - /** - * Return the $l_1$-norm of the matrix, - * that is $|M|_1=\max_{\mathrm{all\ - * columns\ }j}\sum_{\mathrm{all\ rows\ - * } i} |M_{ij}|$, (max. sum of - * columns). This is the natural - * matrix norm that is compatible to - * the $l_1$-norm for vectors, i.e. - * $|Mv|_1\leq |M|_1 |v|_1$. - * (cf. Haemmerlin-Hoffmann : - * Numerische Mathematik) - */ - real_type l1_norm () const; - - /** - * Return the $l_\infty$-norm of the - * matrix, that is - * $|M|_\infty=\max_{\mathrm{all\ rows\ - * }i}\sum_{\mathrm{all\ columns\ }j} - * |M_{ij}|$, (max. sum of rows). This - * is the natural matrix norm that is - * compatible to the $l_\infty$-norm of - * vectors, i.e. $|Mv|_\infty \leq - * |M|_\infty |v|_\infty$. - * (cf. Haemmerlin-Hoffmann : - * Numerische Mathematik) - */ - real_type linfty_norm () const; - - /** - * Return the frobenius norm of the - * matrix, i.e. the square root of the - * sum of squares of all entries in the - * matrix. - */ - real_type frobenius_norm () const; + /** + * Return the $l_1$-norm of the matrix, + * that is $|M|_1=\max_{\mathrm{all\ + * columns\ }j}\sum_{\mathrm{all\ rows\ + * } i} |M_{ij}|$, (max. sum of + * columns). This is the natural + * matrix norm that is compatible to + * the $l_1$-norm for vectors, i.e. + * $|Mv|_1\leq |M|_1 |v|_1$. + * (cf. Haemmerlin-Hoffmann : + * Numerische Mathematik) + */ + real_type l1_norm () const; + + /** + * Return the $l_\infty$-norm of the + * matrix, that is + * $|M|_\infty=\max_{\mathrm{all\ rows\ + * }i}\sum_{\mathrm{all\ columns\ }j} + * |M_{ij}|$, (max. sum of rows). This + * is the natural matrix norm that is + * compatible to the $l_\infty$-norm of + * vectors, i.e. $|Mv|_\infty \leq + * |M|_\infty |v|_\infty$. + * (cf. Haemmerlin-Hoffmann : + * Numerische Mathematik) + */ + real_type linfty_norm () const; + + /** + * Return the frobenius norm of the + * matrix, i.e. the square root of the + * sum of squares of all entries in the + * matrix. + */ + real_type frobenius_norm () const; //@} - /** - * @name Preconditioning methods - */ + /** + * @name Preconditioning methods + */ //@{ - /** - * Apply the Jacobi - * preconditioner, which - * multiplies every element of - * the src vector by the - * inverse of the respective - * diagonal element and - * multiplies the result with the - * relaxation factor omega. - */ - template - void precondition_Jacobi (Vector &dst, - const Vector &src, - const number omega = 1.) const; - - /** - * Apply SSOR preconditioning to - * src with damping - * omega. The optional - * argument - * pos_right_of_diagonal is - * supposed to provide an array where - * each entry specifies the position - * just right of the diagonal in the - * global array of nonzeros. - */ - template - void precondition_SSOR (Vector &dst, - const Vector &src, - const number omega = 1., - const std::vector&pos_right_of_diagonal=std::vector()) const; - - /** - * Apply SOR preconditioning - * matrix to src. - */ - template - void precondition_SOR (Vector &dst, - const Vector &src, - const number om = 1.) const; - - /** - * Apply transpose SOR - * preconditioning matrix to - * src. - */ - template - void precondition_TSOR (Vector &dst, + /** + * Apply the Jacobi + * preconditioner, which + * multiplies every element of + * the src vector by the + * inverse of the respective + * diagonal element and + * multiplies the result with the + * relaxation factor omega. + */ + template + void precondition_Jacobi (Vector &dst, const Vector &src, - const number om = 1.) const; - - /** - * Perform SSOR preconditioning - * in-place. Apply the - * preconditioner matrix without - * copying to a second vector. - * omega is the relaxation - * parameter. - */ - template - void SSOR (Vector &v, - const number omega = 1.) const; - - /** - * Perform an SOR preconditioning - * in-place. omega is - * the relaxation parameter. - */ - template - void SOR (Vector &v, + const number omega = 1.) const; + + /** + * Apply SSOR preconditioning to + * src with damping + * omega. The optional + * argument + * pos_right_of_diagonal is + * supposed to provide an array where + * each entry specifies the position + * just right of the diagonal in the + * global array of nonzeros. + */ + template + void precondition_SSOR (Vector &dst, + const Vector &src, + const number omega = 1., + const std::vector &pos_right_of_diagonal=std::vector()) const; + + /** + * Apply SOR preconditioning + * matrix to src. + */ + template + void precondition_SOR (Vector &dst, + const Vector &src, + const number om = 1.) const; + + /** + * Apply transpose SOR + * preconditioning matrix to + * src. + */ + template + void precondition_TSOR (Vector &dst, + const Vector &src, + const number om = 1.) const; + + /** + * Perform SSOR preconditioning + * in-place. Apply the + * preconditioner matrix without + * copying to a second vector. + * omega is the relaxation + * parameter. + */ + template + void SSOR (Vector &v, + const number omega = 1.) const; + + /** + * Perform an SOR preconditioning + * in-place. omega is + * the relaxation parameter. + */ + template + void SOR (Vector &v, + const number om = 1.) const; + + /** + * Perform a transpose SOR + * preconditioning in-place. + * omega is the + * relaxation parameter. + */ + template + void TSOR (Vector &v, + const number om = 1.) const; + + /** + * Perform a permuted SOR + * preconditioning in-place. + * + * The standard SOR method is + * applied in the order + * prescribed by permutation, + * that is, first the row + * permutation[0], then + * permutation[1] and so + * on. For efficiency reasons, + * the permutation as well as its + * inverse are required. + * + * omega is the + * relaxation parameter. + */ + template + void PSOR (Vector &v, + const std::vector &permutation, + const std::vector &inverse_permutation, + const number om = 1.) const; + + /** + * Perform a transposed permuted SOR + * preconditioning in-place. + * + * The transposed SOR method is + * applied in the order + * prescribed by + * permutation, that is, + * first the row + * permutation[m()-1], + * then + * permutation[m()-2] + * and so on. For efficiency + * reasons, the permutation as + * well as its inverse are + * required. + * + * omega is the + * relaxation parameter. + */ + template + void TPSOR (Vector &v, + const std::vector &permutation, + const std::vector &inverse_permutation, const number om = 1.) const; - /** - * Perform a transpose SOR - * preconditioning in-place. - * omega is the - * relaxation parameter. - */ - template - void TSOR (Vector &v, - const number om = 1.) const; - - /** - * Perform a permuted SOR - * preconditioning in-place. - * - * The standard SOR method is - * applied in the order - * prescribed by permutation, - * that is, first the row - * permutation[0], then - * permutation[1] and so - * on. For efficiency reasons, - * the permutation as well as its - * inverse are required. - * - * omega is the - * relaxation parameter. - */ - template - void PSOR (Vector &v, - const std::vector& permutation, - const std::vector& inverse_permutation, - const number om = 1.) const; - - /** - * Perform a transposed permuted SOR - * preconditioning in-place. - * - * The transposed SOR method is - * applied in the order - * prescribed by - * permutation, that is, - * first the row - * permutation[m()-1], - * then - * permutation[m()-2] - * and so on. For efficiency - * reasons, the permutation as - * well as its inverse are - * required. - * - * omega is the - * relaxation parameter. - */ - template - void TPSOR (Vector &v, - const std::vector& permutation, - const std::vector& inverse_permutation, - const number om = 1.) const; - - /** - * Do one Jacobi step on - * v. Performs a direct - * Jacobi step with right hand - * side b. This function - * will need an auxiliary vector, - * which is acquired from - * GrowingVectorMemory. - */ - template - void Jacobi_step (Vector &v, - const Vector &b, - const number om = 1.) const; - - /** - * Do one SOR step on v. - * Performs a direct SOR step - * with right hand side - * b. - */ - template - void SOR_step (Vector &v, - const Vector &b, - const number om = 1.) const; - - /** - * Do one adjoint SOR step on - * v. Performs a direct - * TSOR step with right hand side - * b. - */ - template - void TSOR_step (Vector &v, + /** + * Do one Jacobi step on + * v. Performs a direct + * Jacobi step with right hand + * side b. This function + * will need an auxiliary vector, + * which is acquired from + * GrowingVectorMemory. + */ + template + void Jacobi_step (Vector &v, const Vector &b, const number om = 1.) const; diff --cc deal.II/include/deal.II/lac/sparse_matrix.templates.h index b86cd104cb,60f3753857..fba60c974e --- a/deal.II/include/deal.II/lac/sparse_matrix.templates.h +++ b/deal.II/include/deal.II/lac/sparse_matrix.templates.h @@@ -92,11 -92,11 +92,11 @@@ SparseMatrix::SparseMatrix (con template SparseMatrix::SparseMatrix (const SparsityPattern &c, - const IdentityMatrix &id) + const IdentityMatrix &id) - : - cols(0, "SparseMatrix"), - val(0), - max_len(0) + : + cols(0, "SparseMatrix"), + val(0), + max_len(0) { Assert (c.n_rows() == id.m(), ExcDimensionMismatch (c.n_rows(), id.m())); Assert (c.n_cols() == id.n(), ExcDimensionMismatch (c.n_cols(), id.n())); @@@ -1338,19 -1338,19 +1338,19 @@@ SparseMatrix::precondition_Jaco const unsigned int n = src.size(); somenumber *dst_ptr = dst.begin(); const somenumber *src_ptr = src.begin(); - const std::size_t *rowstart_ptr = &cols->rowstart[0]; + const std::size_t *rowstart_ptr = &cols->rowstart[0]; - // optimize the following loop for - // the case that the relaxation - // factor is one. In that case, we - // can save one FP multiplication - // per row - // - // note that for square matrices, - // the diagonal entry is the first - // in each row, i.e. at index - // rowstart[i]. and we do have a - // square matrix by above assertion + // optimize the following loop for + // the case that the relaxation + // factor is one. In that case, we + // can save one FP multiplication + // per row + // + // note that for square matrices, + // the diagonal entry is the first + // in each row, i.e. at index + // rowstart[i]. and we do have a + // square matrix by above assertion if (om != 1.) for (unsigned int i=0; i::precondition_SSO Assert (src.size() == n(), ExcDimensionMismatch (src.size(), n())); const unsigned int n = src.size(); - const std::size_t *rowstart_ptr = &cols->rowstart[0]; + const std::size_t *rowstart_ptr = &cols->rowstart[0]; somenumber *dst_ptr = &dst(0); - // case when we have stored the position - // just right of the diagonal (then we - // don't have to search for it). + // case when we have stored the position + // just right of the diagonal (then we + // don't have to search for it). if (pos_right_of_diagonal.size() != 0) { Assert (pos_right_of_diagonal.size() == dst.size(), diff --cc deal.II/include/deal.II/lac/sparsity_pattern.h index 4e8c8cc280,e0f102402a..0eb8828807 --- a/deal.II/include/deal.II/lac/sparsity_pattern.h +++ b/deal.II/include/deal.II/lac/sparsity_pattern.h @@@ -307,858 -307,858 +307,858 @@@ namespace SparsityPatternIterator */ class SparsityPattern : public Subscriptor { - public: - /** - * Typedef an iterator class that allows - * to walk over all nonzero elements of a - * sparsity pattern. - */ - typedef - SparsityPatternIterators::Iterator - const_iterator; - - /** - * Typedef an iterator class that allows - * to walk over the nonzero elements of a - * row of a sparsity pattern. - */ - typedef - const unsigned int * row_iterator; - - /** - * Typedef an iterator class that allows - * to walk over all nonzero elements of a - * sparsity pattern. - * - * Since the iterator does not allow to - * modify the sparsity pattern, this type - * is the same as that for @p - * const_iterator. - */ - typedef - SparsityPatternIterators::Iterator - iterator; - - - /** - * Define a value which is used - * to indicate that a certain - * value in the #colnums array - * is unused, i.e. does not - * represent a certain column - * number index. - * - * Indices with this invalid - * value are used to insert new - * entries to the sparsity - * pattern using the add() member - * function, and are removed when - * calling compress(). - * - * You should not assume that the - * variable declared here has a - * certain value. The - * initialization is given here - * only to enable the compiler to - * perform some optimizations, - * but the actual value of the - * variable may change over time. - */ - static const unsigned int invalid_entry = numbers::invalid_unsigned_int; - - /** - * @name Construction and setup - * Constructors, destructor; functions initializing, copying and filling an object. - */ + public: + /** + * Typedef an iterator class that allows + * to walk over all nonzero elements of a + * sparsity pattern. + */ + typedef + SparsityPatternIterators::Iterator + const_iterator; + + /** + * Typedef an iterator class that allows + * to walk over the nonzero elements of a + * row of a sparsity pattern. + */ + typedef + const unsigned int *row_iterator; + + /** + * Typedef an iterator class that allows + * to walk over all nonzero elements of a + * sparsity pattern. + * + * Since the iterator does not allow to + * modify the sparsity pattern, this type + * is the same as that for @p + * const_iterator. + */ + typedef + SparsityPatternIterators::Iterator + iterator; + + + /** + * Define a value which is used + * to indicate that a certain + * value in the #colnums array + * is unused, i.e. does not + * represent a certain column + * number index. + * + * Indices with this invalid + * value are used to insert new + * entries to the sparsity + * pattern using the add() member + * function, and are removed when + * calling compress(). + * + * You should not assume that the + * variable declared here has a + * certain value. The + * initialization is given here + * only to enable the compiler to + * perform some optimizations, + * but the actual value of the + * variable may change over time. + */ + static const unsigned int invalid_entry = numbers::invalid_unsigned_int; + + /** + * @name Construction and setup + * Constructors, destructor; functions initializing, copying and filling an object. + */ // @{ - /** - * Initialize the matrix empty, - * that is with no memory - * allocated. This is useful if - * you want such objects as - * member variables in other - * classes. You can make the - * structure usable by calling - * the reinit() function. - */ - SparsityPattern (); - - /** - * Copy constructor. This - * constructor is only allowed to - * be called if the matrix - * structure to be copied is - * empty. This is so in order to - * prevent involuntary copies of - * objects for temporaries, which - * can use large amounts of - * computing time. However, copy - * constructors are needed if yo - * want to use the STL data types - * on classes like this, e.g. to - * write such statements like - * v.push_back - * (SparsityPattern());, - * with v a vector of - * SparsityPattern objects. - * - * Usually, it is sufficient to - * use the explicit keyword to - * disallow unwanted temporaries, - * but for the STL vectors, this - * does not work. Since copying a - * structure like this is not - * useful anyway because multiple - * matrices can use the same - * sparsity structure, copies are - * only allowed for empty - * objects, as described above. - */ - SparsityPattern (const SparsityPattern &); - - /** - * Initialize a rectangular - * matrix. - * - * @arg m number of rows - * @arg n number of columns - * @arg max_per_row maximum - * number of nonzero entries per row - * - * @arg optimize_diagonal store - * diagonal entries first in row; - * see optimize_diagonal(). This - * takes effect for quadratic - * matrices only. - */ - SparsityPattern (const unsigned int m, - const unsigned int n, - const unsigned int max_per_row, - const bool optimize_diagonal = true); - - /** - * Initialize a rectangular - * matrix. - * - * @arg m number of rows - * @arg n number of columns - * - * @arg row_lengths possible - * number of nonzero entries for - * each row. This vector must - * have one entry for each row. - * - * @arg optimize_diagonal store - * diagonal entries first in row; - * see optimize_diagonal(). This - * takes effect for quadratic - * matrices only. - */ - SparsityPattern (const unsigned int m, - const unsigned int n, - const std::vector& row_lengths, - const bool optimize_diagonal = true); - - /** - * Initialize a quadratic matrix - * of dimension n with - * at most max_per_row - * nonzero entries per row. - * - * This constructor automatically - * enables optimized storage of - * diagonal elements. To avoid - * this, use the constructor - * taking row and column numbers - * separately. - */ - SparsityPattern (const unsigned int n, - const unsigned int max_per_row); - - /** - * Initialize a quadratic matrix. - * - * @arg m number of rows and columns - * - * @arg row_lengths possible - * number of nonzero entries for - * each row. This vector must - * have one entry for each row. - * - * @arg optimize_diagonal store - * diagonal entries first in row; - * see optimize_diagonal(). - */ - SparsityPattern (const unsigned int m, - const std::vector& row_lengths, - const bool optimize_diagonal = true); - - /** - * Make a copy with extra off-diagonals. - * - * This constructs objects intended for - * the application of the ILU(n)-method - * or other incomplete decompositions. - * Therefore, additional to the original - * entry structure, space for - * extra_off_diagonals - * side-diagonals is provided on both - * sides of the main diagonal. - * - * max_per_row is the - * maximum number of nonzero - * elements per row which this - * structure is to hold. It is - * assumed that this number is - * sufficiently large to - * accommodate both the elements - * in original as well - * as the new off-diagonal - * elements created by this - * constructor. You will usually - * want to give the same number - * as you gave for - * original plus the - * number of side diagonals times - * two. You may however give a - * larger value if you wish to - * add further nonzero entries - * for the decomposition based on - * other criteria than their - * being on side-diagonals. - * - * This function requires that - * original refers to a - * quadratic matrix structure. - * It must be compressed. The - * matrix structure is not - * compressed after this function - * finishes. - */ - SparsityPattern (const SparsityPattern &original, - const unsigned int max_per_row, - const unsigned int extra_off_diagonals); - - /** - * Destructor. - */ - ~SparsityPattern (); - - /** - * Copy operator. For this the - * same holds as for the copy - * constructor: it is declared, - * defined and fine to be called, - * but the latter only for empty - * objects. - */ - SparsityPattern & operator = (const SparsityPattern &); - - /** - * Reallocate memory and set up data - * structures for a new matrix with - * m rows and n columns, - * with at most max_per_row - * nonzero entries per row. - * - * This function simply maps its - * operations to the other - * reinit function. - */ - void reinit (const unsigned int m, - const unsigned int n, - const unsigned int max_per_row, - const bool optimize_diagonal = true); - - /** - * Reallocate memory for a matrix - * of size m x n. The - * number of entries for each row - * is taken from the array - * row_lengths which has to - * give this number of each row - * i=1...m. - * - * If m*n==0 all memory is freed, - * resulting in a total reinitialization - * of the object. If it is nonzero, new - * memory is only allocated if the new - * size extends the old one. This is done - * to save time and to avoid fragmentation - * of the heap. - * - * If the number of rows equals - * the number of columns and the - * last parameter is true, - * diagonal elements are stored - * first in each row to allow - * optimized access in relaxation - * methods of SparseMatrix. - */ - void reinit (const unsigned int m, - const unsigned int n, - const std::vector &row_lengths, - const bool optimize_diagonal = true); - - /** - * Same as above, but with a - * VectorSlice argument instead. - */ - void reinit (const unsigned int m, - const unsigned int n, - const VectorSlice > &row_lengths, - const bool optimize_diagonal = true); - - /** - * This function compresses the sparsity - * structure that this object represents. - * It does so by eliminating unused - * entries and sorting the remaining ones - * to allow faster access by usage of - * binary search algorithms. A special - * sorting scheme is used for the - * diagonal entry of quadratic matrices, - * which is always the first entry of - * each row. - * - * The memory which is no more - * needed is released. - * - * SparseMatrix objects require the - * SparsityPattern objects they are - * initialized with to be compressed, to - * reduce memory requirements. - */ - void compress (); - - /** - * This function can be used as a - * replacement for reinit(), - * subsequent calls to add() and - * a final call to close() if you - * know exactly in advance the - * entries that will form the - * matrix sparsity pattern. - * - * The first two parameters - * determine the size of the - * matrix. For the two last ones, - * note that a sparse matrix can - * be described by a sequence of - * rows, each of which is - * represented by a sequence of - * pairs of column indices and - * values. In the present - * context, the begin() and - * end() parameters designate - * iterators (of forward iterator - * type) into a container, one - * representing one row. The - * distance between begin() - * and end() should therefore - * be equal to - * n_rows(). These iterators - * may be iterators of - * std::vector, - * std::list, pointers into a - * C-style array, or any other - * iterator satisfying the - * requirements of a forward - * iterator. The objects pointed - * to by these iterators - * (i.e. what we get after - * applying operator* or - * operator-> to one of these - * iterators) must be a container - * itself that provides functions - * begin and end - * designating a range of - * iterators that describe the - * contents of one - * line. Dereferencing these - * inner iterators must either - * yield a pair of an unsigned - * integer as column index and a - * value of arbitrary type (such - * a type would be used if we - * wanted to describe a sparse - * matrix with one such object), - * or simply an unsigned integer - * (of we only wanted to describe - * a sparsity pattern). The - * function is able to determine - * itself whether an unsigned - * integer or a pair is what we - * get after dereferencing the - * inner iterators, through some - * template magic. - * - * While the order of the outer - * iterators denotes the - * different rows of the matrix, - * the order of the inner - * iterator denoting the columns - * does not matter, as they are - * sorted internal to this - * function anyway. - * - * Since that all sounds very - * complicated, consider the - * following example code, which - * may be used to fill a sparsity - * pattern: - * @code - * std::vector > column_indices (n_rows); - * for (unsigned int row=0; rowbegin and - * end (namely - * std::vectors), and the - * inner iterators dereferenced - * yield unsigned integers as - * column indices. Note that we - * could have replaced each of - * the two std::vector - * occurrences by std::list, - * and the inner one by - * std::set as well. - * - * Another example would be as - * follows, where we initialize a - * whole matrix, not only a - * sparsity pattern: - * @code - * std::vector > entries (n_rows); - * for (unsigned int row=0; rowstd::vector - * could be replaced by - * std::list, and the inner - * std::map - * could be replaced by - * std::vector >, - * or a list or set of such - * pairs, as they all return - * iterators that point to such - * pairs. - */ - template - void copy_from (const unsigned int n_rows, - const unsigned int n_cols, - const ForwardIterator begin, - const ForwardIterator end, - const bool optimize_diagonal = true); - - /** - * Copy data from an object of type - * CompressedSparsityPattern, - * CompressedSetSparsityPattern or - * CompressedSimpleSparsityPattern. - * Previous content of this object is - * lost, and the sparsity pattern is in - * compressed mode afterwards. - */ - template - void copy_from (const CompressedSparsityType &csp, - const bool optimize_diagonal = true); - - /** - * Take a full matrix and use its - * nonzero entries to generate a - * sparse matrix entry pattern - * for this object. - * - * Previous content of this - * object is lost, and the - * sparsity pattern is in - * compressed mode afterwards. - */ - template - void copy_from (const FullMatrix &matrix, - const bool optimize_diagonal = true); - - /** - * Make the sparsity pattern - * symmetric by adding the - * sparsity pattern of the - * transpose object. - * - * This function throws an - * exception if the sparsity - * pattern does not represent a - * quadratic matrix. - */ - void symmetrize (); - - /** - * Add a nonzero entry to the matrix. - * This function may only be called - * for non-compressed sparsity patterns. - * - * If the entry already exists, nothing - * bad happens. - */ - void add (const unsigned int i, - const unsigned int j); - - /** - * Add several nonzero entries to the - * specified matrix row. This function - * may only be called for - * non-compressed sparsity patterns. - * - * If some of the entries already - * exist, nothing bad happens. - */ - template - void add_entries (const unsigned int row, - ForwardIterator begin, - ForwardIterator end, - const bool indices_are_sorted = false); + /** + * Initialize the matrix empty, + * that is with no memory + * allocated. This is useful if + * you want such objects as + * member variables in other + * classes. You can make the + * structure usable by calling + * the reinit() function. + */ + SparsityPattern (); + + /** + * Copy constructor. This + * constructor is only allowed to + * be called if the matrix + * structure to be copied is + * empty. This is so in order to + * prevent involuntary copies of + * objects for temporaries, which + * can use large amounts of + * computing time. However, copy + * constructors are needed if yo + * want to use the STL data types + * on classes like this, e.g. to + * write such statements like + * v.push_back + * (SparsityPattern());, + * with v a vector of + * SparsityPattern objects. + * + * Usually, it is sufficient to + * use the explicit keyword to + * disallow unwanted temporaries, + * but for the STL vectors, this + * does not work. Since copying a + * structure like this is not + * useful anyway because multiple + * matrices can use the same + * sparsity structure, copies are + * only allowed for empty + * objects, as described above. + */ + SparsityPattern (const SparsityPattern &); + + /** + * Initialize a rectangular + * matrix. + * + * @arg m number of rows + * @arg n number of columns + * @arg max_per_row maximum + * number of nonzero entries per row + * + * @arg optimize_diagonal store + * diagonal entries first in row; + * see optimize_diagonal(). This + * takes effect for quadratic + * matrices only. + */ + SparsityPattern (const unsigned int m, + const unsigned int n, + const unsigned int max_per_row, + const bool optimize_diagonal = true); + + /** + * Initialize a rectangular + * matrix. + * + * @arg m number of rows + * @arg n number of columns + * + * @arg row_lengths possible + * number of nonzero entries for + * each row. This vector must + * have one entry for each row. + * + * @arg optimize_diagonal store + * diagonal entries first in row; + * see optimize_diagonal(). This + * takes effect for quadratic + * matrices only. + */ + SparsityPattern (const unsigned int m, + const unsigned int n, + const std::vector &row_lengths, + const bool optimize_diagonal = true); + + /** + * Initialize a quadratic matrix + * of dimension n with + * at most max_per_row + * nonzero entries per row. + * + * This constructor automatically + * enables optimized storage of + * diagonal elements. To avoid + * this, use the constructor + * taking row and column numbers + * separately. + */ + SparsityPattern (const unsigned int n, + const unsigned int max_per_row); + + /** + * Initialize a quadratic matrix. + * + * @arg m number of rows and columns + * + * @arg row_lengths possible + * number of nonzero entries for + * each row. This vector must + * have one entry for each row. + * + * @arg optimize_diagonal store + * diagonal entries first in row; + * see optimize_diagonal(). + */ + SparsityPattern (const unsigned int m, + const std::vector &row_lengths, + const bool optimize_diagonal = true); + + /** + * Make a copy with extra off-diagonals. + * + * This constructs objects intended for + * the application of the ILU(n)-method + * or other incomplete decompositions. + * Therefore, additional to the original + * entry structure, space for + * extra_off_diagonals + * side-diagonals is provided on both + * sides of the main diagonal. + * + * max_per_row is the + * maximum number of nonzero + * elements per row which this + * structure is to hold. It is + * assumed that this number is + * sufficiently large to + * accommodate both the elements + * in original as well + * as the new off-diagonal + * elements created by this + * constructor. You will usually + * want to give the same number + * as you gave for + * original plus the + * number of side diagonals times + * two. You may however give a + * larger value if you wish to + * add further nonzero entries + * for the decomposition based on + * other criteria than their + * being on side-diagonals. + * + * This function requires that + * original refers to a + * quadratic matrix structure. + * It must be compressed. The + * matrix structure is not + * compressed after this function + * finishes. + */ - SparsityPattern (const SparsityPattern &original, ++ SparsityPattern (const SparsityPattern &original, + const unsigned int max_per_row, + const unsigned int extra_off_diagonals); + + /** + * Destructor. + */ + ~SparsityPattern (); + + /** + * Copy operator. For this the + * same holds as for the copy + * constructor: it is declared, + * defined and fine to be called, + * but the latter only for empty + * objects. + */ + SparsityPattern &operator = (const SparsityPattern &); + + /** + * Reallocate memory and set up data + * structures for a new matrix with + * m rows and n columns, + * with at most max_per_row + * nonzero entries per row. + * + * This function simply maps its + * operations to the other + * reinit function. + */ + void reinit (const unsigned int m, + const unsigned int n, + const unsigned int max_per_row, + const bool optimize_diagonal = true); + + /** + * Reallocate memory for a matrix + * of size m x n. The + * number of entries for each row + * is taken from the array + * row_lengths which has to + * give this number of each row + * i=1...m. + * + * If m*n==0 all memory is freed, + * resulting in a total reinitialization + * of the object. If it is nonzero, new + * memory is only allocated if the new + * size extends the old one. This is done + * to save time and to avoid fragmentation + * of the heap. + * + * If the number of rows equals + * the number of columns and the + * last parameter is true, + * diagonal elements are stored + * first in each row to allow + * optimized access in relaxation + * methods of SparseMatrix. + */ + void reinit (const unsigned int m, + const unsigned int n, + const std::vector &row_lengths, + const bool optimize_diagonal = true); + + /** + * Same as above, but with a + * VectorSlice argument instead. + */ + void reinit (const unsigned int m, + const unsigned int n, + const VectorSlice > &row_lengths, + const bool optimize_diagonal = true); + + /** + * This function compresses the sparsity + * structure that this object represents. + * It does so by eliminating unused + * entries and sorting the remaining ones + * to allow faster access by usage of + * binary search algorithms. A special + * sorting scheme is used for the + * diagonal entry of quadratic matrices, + * which is always the first entry of + * each row. + * + * The memory which is no more + * needed is released. + * + * SparseMatrix objects require the + * SparsityPattern objects they are + * initialized with to be compressed, to + * reduce memory requirements. + */ + void compress (); + + /** + * This function can be used as a + * replacement for reinit(), + * subsequent calls to add() and + * a final call to close() if you + * know exactly in advance the + * entries that will form the + * matrix sparsity pattern. + * + * The first two parameters + * determine the size of the + * matrix. For the two last ones, + * note that a sparse matrix can + * be described by a sequence of + * rows, each of which is + * represented by a sequence of + * pairs of column indices and + * values. In the present + * context, the begin() and + * end() parameters designate + * iterators (of forward iterator + * type) into a container, one + * representing one row. The + * distance between begin() + * and end() should therefore + * be equal to + * n_rows(). These iterators + * may be iterators of + * std::vector, + * std::list, pointers into a + * C-style array, or any other + * iterator satisfying the + * requirements of a forward + * iterator. The objects pointed + * to by these iterators + * (i.e. what we get after + * applying operator* or + * operator-> to one of these + * iterators) must be a container + * itself that provides functions + * begin and end + * designating a range of + * iterators that describe the + * contents of one + * line. Dereferencing these + * inner iterators must either + * yield a pair of an unsigned + * integer as column index and a + * value of arbitrary type (such + * a type would be used if we + * wanted to describe a sparse + * matrix with one such object), + * or simply an unsigned integer + * (of we only wanted to describe + * a sparsity pattern). The + * function is able to determine + * itself whether an unsigned + * integer or a pair is what we + * get after dereferencing the + * inner iterators, through some + * template magic. + * + * While the order of the outer + * iterators denotes the + * different rows of the matrix, + * the order of the inner + * iterator denoting the columns + * does not matter, as they are + * sorted internal to this + * function anyway. + * + * Since that all sounds very + * complicated, consider the + * following example code, which + * may be used to fill a sparsity + * pattern: + * @code + * std::vector > column_indices (n_rows); + * for (unsigned int row=0; rowbegin and + * end (namely + * std::vectors), and the + * inner iterators dereferenced + * yield unsigned integers as + * column indices. Note that we + * could have replaced each of + * the two std::vector + * occurrences by std::list, + * and the inner one by + * std::set as well. + * + * Another example would be as + * follows, where we initialize a + * whole matrix, not only a + * sparsity pattern: + * @code + * std::vector > entries (n_rows); + * for (unsigned int row=0; rowstd::vector + * could be replaced by + * std::list, and the inner + * std::map + * could be replaced by + * std::vector >, + * or a list or set of such + * pairs, as they all return + * iterators that point to such + * pairs. + */ + template + void copy_from (const unsigned int n_rows, + const unsigned int n_cols, + const ForwardIterator begin, + const ForwardIterator end, + const bool optimize_diagonal = true); + + /** + * Copy data from an object of type + * CompressedSparsityPattern, + * CompressedSetSparsityPattern or + * CompressedSimpleSparsityPattern. + * Previous content of this object is + * lost, and the sparsity pattern is in + * compressed mode afterwards. + */ + template + void copy_from (const CompressedSparsityType &csp, + const bool optimize_diagonal = true); + + /** + * Take a full matrix and use its + * nonzero entries to generate a + * sparse matrix entry pattern + * for this object. + * + * Previous content of this + * object is lost, and the + * sparsity pattern is in + * compressed mode afterwards. + */ + template + void copy_from (const FullMatrix &matrix, + const bool optimize_diagonal = true); + + /** + * Make the sparsity pattern + * symmetric by adding the + * sparsity pattern of the + * transpose object. + * + * This function throws an + * exception if the sparsity + * pattern does not represent a + * quadratic matrix. + */ + void symmetrize (); + + /** + * Add a nonzero entry to the matrix. + * This function may only be called + * for non-compressed sparsity patterns. + * + * If the entry already exists, nothing + * bad happens. + */ + void add (const unsigned int i, + const unsigned int j); + + /** + * Add several nonzero entries to the + * specified matrix row. This function + * may only be called for + * non-compressed sparsity patterns. + * + * If some of the entries already + * exist, nothing bad happens. + */ + template + void add_entries (const unsigned int row, + ForwardIterator begin, + ForwardIterator end, + const bool indices_are_sorted = false); // @} - /** - * @name Iterators - */ + /** + * @name Iterators + */ // @{ - /** - * STL-like iterator with the first entry - * of the matrix. The resulting iterator - * can be used to walk over all nonzero - * entries of the sparsity pattern. - */ - inline iterator begin () const; - - /** - * Final iterator. - */ - inline iterator end () const; - - /** - * STL-like iterator with the first entry - * of row r. - * - * Note that if the given row is empty, - * i.e. does not contain any nonzero - * entries, then the iterator returned by - * this function equals - * end(r). Note also that the - * iterator may not be dereferencable in - * that case. - */ - inline iterator begin (const unsigned int r) const; - - /** - * Final iterator of row r. It - * points to the first element past the - * end of line @p r, or past the end of - * the entire sparsity pattern. - * - * Note that the end iterator is not - * necessarily dereferencable. This is in - * particular the case if it is the end - * iterator for the last row of a matrix. - */ - inline iterator end (const unsigned int r) const; - - /** - * STL-like iterator with the first entry - * of row r. - * - * Note that if the given row is empty, - * i.e. does not contain any nonzero - * entries, then the iterator returned by - * this function equals - * end(r). Note also that the - * iterator may not be dereferencable in - * that case. - */ - inline row_iterator row_begin (const unsigned int r) const; - - /** - * Final iterator of row r. It - * points to the first element past the - * end of line @p r, or past the end of - * the entire sparsity pattern. - * - * Note that the end iterator is not - * necessarily dereferencable. This is in - * particular the case if it is the end - * iterator for the last row of a matrix. - */ - inline row_iterator row_end (const unsigned int r) const; + /** + * STL-like iterator with the first entry + * of the matrix. The resulting iterator + * can be used to walk over all nonzero + * entries of the sparsity pattern. + */ + inline iterator begin () const; + + /** + * Final iterator. + */ + inline iterator end () const; + + /** + * STL-like iterator with the first entry + * of row r. + * + * Note that if the given row is empty, + * i.e. does not contain any nonzero + * entries, then the iterator returned by + * this function equals + * end(r). Note also that the + * iterator may not be dereferencable in + * that case. + */ + inline iterator begin (const unsigned int r) const; + + /** + * Final iterator of row r. It + * points to the first element past the + * end of line @p r, or past the end of + * the entire sparsity pattern. + * + * Note that the end iterator is not + * necessarily dereferencable. This is in + * particular the case if it is the end + * iterator for the last row of a matrix. + */ + inline iterator end (const unsigned int r) const; + + /** + * STL-like iterator with the first entry + * of row r. + * + * Note that if the given row is empty, + * i.e. does not contain any nonzero + * entries, then the iterator returned by + * this function equals + * end(r). Note also that the + * iterator may not be dereferencable in + * that case. + */ + inline row_iterator row_begin (const unsigned int r) const; + + /** + * Final iterator of row r. It + * points to the first element past the + * end of line @p r, or past the end of + * the entire sparsity pattern. + * + * Note that the end iterator is not + * necessarily dereferencable. This is in + * particular the case if it is the end + * iterator for the last row of a matrix. + */ + inline row_iterator row_end (const unsigned int r) const; // @} - /** - * @name Querying information - */ + /** + * @name Querying information + */ // @{ - /** - * Test for equality of two SparsityPatterns. - */ - bool operator == (const SparsityPattern &) const; - - /** - * Return whether the object is empty. It - * is empty if no memory is allocated, - * which is the same as that both - * dimensions are zero. - */ - bool empty () const; - - /** - * Return the maximum number of entries per - * row. Before compression, this equals the - * number given to the constructor, while - * after compression, it equals the maximum - * number of entries actually allocated by - * the user. - */ - unsigned int max_entries_per_row () const; - - /** - * Compute the bandwidth of the matrix - * represented by this structure. The - * bandwidth is the maximum of $|i-j|$ - * for which the index pair $(i,j)$ - * represents a nonzero entry of the - * matrix. Consequently, the maximum - * bandwidth a $n\times m$ matrix can - * have is $\max\{n-1,m-1\}$. - */ - unsigned int bandwidth () const; - - /** - * Return the number of nonzero elements of - * this matrix. Actually, it returns the - * number of entries in the sparsity - * pattern; if any of the entries should - * happen to be zero, it is counted - * anyway. - * - * This function may only be called if the - * matrix struct is compressed. It does not - * make too much sense otherwise anyway. - */ - std::size_t n_nonzero_elements () const; - - /** - * Return whether the structure is - * compressed or not. - */ - bool is_compressed () const; - - /** - * Return number of rows of this - * matrix, which equals the dimension - * of the image space. - */ - inline unsigned int n_rows () const; - - /** - * Return number of columns of this - * matrix, which equals the dimension - * of the range space. - */ - inline unsigned int n_cols () const; - - /** - * Number of entries in a specific row. - */ - unsigned int row_length (const unsigned int row) const; - - /** - * Determine whether the matrix - * uses special convention for - * quadratic matrices. - * - * A return value true means - * that diagonal elements are stored - * first in each row. A number of - * functions in this class and the - * library in general, for example - * relaxation methods like Jacobi() and - * SOR(), require this to make their - * operations more efficient, since they - * need to quickly access the diagonal - * elements and do not have to search for - * them if they are the first element of - * each row. A side effect of this scheme - * is that each row contains at least one - * element, even if the row is empty - * (i.e. the diagonal element exists, but - * has value zero). - * - * A return value false means - * that diagonal elements are stored - * anywhere in the row, or not at all. In - * particular, a row or even the whole - * matrix may be empty. This can be used - * if you have block matrices where the - * off-diagonal blocks are quadratic but - * are never used for operations like the - * ones mentioned above. In this case, - * some memory can be saved by not using - * the diagonal storage optimization. - */ - bool optimize_diagonal () const; - - /** - * Return whether this object stores only - * those entries that have been added - * explicitly, or if the sparsity pattern - * contains elements that have been added - * through other means (implicitly) while - * building it. For the current class, - * the result is true iff optimize_diag - * in the constructor or reinit() calls - * has been set to false, or if the - * represented matrix is not square. - * - * This function mainly serves the - * purpose of describing the current - * class in cases where several kinds of - * sparsity patterns can be passed as - * template arguments. - */ - bool stores_only_added_elements () const; - - /** - * Determine an estimate for the - * memory consumption (in bytes) - * of this object. See - * MemoryConsumption. - */ - std::size_t memory_consumption () const; + /** + * Test for equality of two SparsityPatterns. + */ + bool operator == (const SparsityPattern &) const; + + /** + * Return whether the object is empty. It + * is empty if no memory is allocated, + * which is the same as that both + * dimensions are zero. + */ + bool empty () const; + + /** + * Return the maximum number of entries per + * row. Before compression, this equals the + * number given to the constructor, while + * after compression, it equals the maximum + * number of entries actually allocated by + * the user. + */ + unsigned int max_entries_per_row () const; + + /** + * Compute the bandwidth of the matrix + * represented by this structure. The + * bandwidth is the maximum of $|i-j|$ + * for which the index pair $(i,j)$ + * represents a nonzero entry of the + * matrix. Consequently, the maximum + * bandwidth a $n\times m$ matrix can + * have is $\max\{n-1,m-1\}$. + */ + unsigned int bandwidth () const; + + /** + * Return the number of nonzero elements of + * this matrix. Actually, it returns the + * number of entries in the sparsity + * pattern; if any of the entries should + * happen to be zero, it is counted + * anyway. + * + * This function may only be called if the + * matrix struct is compressed. It does not + * make too much sense otherwise anyway. + */ + std::size_t n_nonzero_elements () const; + + /** + * Return whether the structure is + * compressed or not. + */ + bool is_compressed () const; + + /** + * Return number of rows of this + * matrix, which equals the dimension + * of the image space. + */ + inline unsigned int n_rows () const; + + /** + * Return number of columns of this + * matrix, which equals the dimension + * of the range space. + */ + inline unsigned int n_cols () const; + + /** + * Number of entries in a specific row. + */ + unsigned int row_length (const unsigned int row) const; + + /** + * Determine whether the matrix + * uses special convention for + * quadratic matrices. + * + * A return value true means + * that diagonal elements are stored + * first in each row. A number of + * functions in this class and the + * library in general, for example + * relaxation methods like Jacobi() and + * SOR(), require this to make their + * operations more efficient, since they + * need to quickly access the diagonal + * elements and do not have to search for + * them if they are the first element of + * each row. A side effect of this scheme + * is that each row contains at least one + * element, even if the row is empty + * (i.e. the diagonal element exists, but + * has value zero). + * + * A return value false means + * that diagonal elements are stored + * anywhere in the row, or not at all. In + * particular, a row or even the whole + * matrix may be empty. This can be used + * if you have block matrices where the + * off-diagonal blocks are quadratic but + * are never used for operations like the + * ones mentioned above. In this case, + * some memory can be saved by not using + * the diagonal storage optimization. + */ + bool optimize_diagonal () const; + + /** + * Return whether this object stores only + * those entries that have been added + * explicitly, or if the sparsity pattern + * contains elements that have been added + * through other means (implicitly) while + * building it. For the current class, + * the result is true iff optimize_diag + * in the constructor or reinit() calls + * has been set to false, or if the + * represented matrix is not square. + * + * This function mainly serves the + * purpose of describing the current + * class in cases where several kinds of + * sparsity patterns can be passed as + * template arguments. + */ + bool stores_only_added_elements () const; + + /** + * Determine an estimate for the + * memory consumption (in bytes) + * of this object. See + * MemoryConsumption. + */ + std::size_t memory_consumption () const; // @} - /** - * @name Accessing entries - */ + /** + * @name Accessing entries + */ // @{ - /** - * Return the index of the matrix - * element with row number i - * and column number j. If - * the matrix element is not a - * nonzero one, return - * SparsityPattern::invalid_entry. - * - * This function is usually - * called by the - * SparseMatrix::operator()(). It - * may only be called for - * compressed sparsity patterns, - * since in this case searching - * whether the entry exists can - * be done quite fast with a - * binary sort algorithm because - * the column numbers are sorted. - * - * If m is the number of - * entries in row, then the - * complexity of this function is - * log(m) if the sparsity - * pattern is compressed. - * - * @deprecated Use - * SparseMatrix::const_iterator - */ - unsigned int operator() (const unsigned int i, - const unsigned int j) const; - - /** - * This is the inverse operation - * to operator()(): given a - * global index, find out row and - * column of the matrix entry to - * which it belongs. The returned - * value is the pair composed of - * row and column index. - * - * This function may only be - * called if the sparsity pattern - * is closed. The global index - * must then be between zero and - * n_nonzero_elements(). - * - * If N is the number of - * rows of this matrix, then the - * complexity of this function is - * log(N). - */ - std::pair - matrix_position (const unsigned int global_index) const; - - /** - * Check if a value at a certain - * position may be non-zero. - */ - bool exists (const unsigned int i, - const unsigned int j) const; - - /** - * The index of a global matrix - * entry in its row. - * - * This function is analogous to - * operator(), but it computes - * the index not with respect to - * the total field, but only with - * respect to the row j. - */ - unsigned int row_position(const unsigned int i, - const unsigned int j) const; - - /** - * Access to column number field. - * Return the column number of - * the indexth entry in - * row. Note that if - * diagonal elements are - * optimized, the first element - * in each row is the diagonal - * element, - * i.e. column_number(row,0)==row. - * - * If the sparsity pattern is - * already compressed, then - * (except for the diagonal - * element), the entries are - * sorted by columns, - * i.e. column_number(row,i) - * < column_number(row,i+1). - */ - unsigned int column_number (const unsigned int row, - const unsigned int index) const; + /** + * Return the index of the matrix + * element with row number i + * and column number j. If + * the matrix element is not a + * nonzero one, return + * SparsityPattern::invalid_entry. + * + * This function is usually + * called by the + * SparseMatrix::operator()(). It + * may only be called for + * compressed sparsity patterns, + * since in this case searching + * whether the entry exists can + * be done quite fast with a + * binary sort algorithm because + * the column numbers are sorted. + * + * If m is the number of + * entries in row, then the + * complexity of this function is + * log(m) if the sparsity + * pattern is compressed. + * + * @deprecated Use + * SparseMatrix::const_iterator + */ + unsigned int operator() (const unsigned int i, + const unsigned int j) const; + + /** + * This is the inverse operation + * to operator()(): given a + * global index, find out row and + * column of the matrix entry to + * which it belongs. The returned + * value is the pair composed of + * row and column index. + * + * This function may only be + * called if the sparsity pattern + * is closed. The global index + * must then be between zero and + * n_nonzero_elements(). + * + * If N is the number of + * rows of this matrix, then the + * complexity of this function is + * log(N). + */ + std::pair + matrix_position (const unsigned int global_index) const; + + /** + * Check if a value at a certain + * position may be non-zero. + */ + bool exists (const unsigned int i, + const unsigned int j) const; + + /** + * The index of a global matrix + * entry in its row. + * + * This function is analogous to + * operator(), but it computes + * the index not with respect to + * the total field, but only with + * respect to the row j. + */ + unsigned int row_position(const unsigned int i, + const unsigned int j) const; + + /** + * Access to column number field. + * Return the column number of + * the indexth entry in + * row. Note that if + * diagonal elements are + * optimized, the first element + * in each row is the diagonal + * element, + * i.e. column_number(row,0)==row. + * + * If the sparsity pattern is + * already compressed, then + * (except for the diagonal + * element), the entries are + * sorted by columns, + * i.e. column_number(row,i) + * < column_number(row,i+1). + */ + unsigned int column_number (const unsigned int row, + const unsigned int index) const; // @} diff --cc deal.II/include/deal.II/lac/trilinos_block_sparse_matrix.h index 767c2082a4,0680b3c883..b49b57eb6c --- a/deal.II/include/deal.II/lac/trilinos_block_sparse_matrix.h +++ b/deal.II/include/deal.II/lac/trilinos_block_sparse_matrix.h @@@ -41,588 -41,588 +41,588 @@@ template class BlockS namespace TrilinosWrappers { - /*! @addtogroup TrilinosWrappers - *@{ - */ - - /** - * Blocked sparse matrix based on the TrilinosWrappers::SparseMatrix class. This - * class implements the functions that are specific to the Trilinos SparseMatrix - * base objects for a blocked sparse matrix, and leaves the actual work - * relaying most of the calls to the individual blocks to the functions - * implemented in the base class. See there also for a description of when - * this class is useful. - * - * In contrast to the deal.II-type SparseMatrix class, the Trilinos matrices do - * not have external objects for the sparsity patterns. Thus, one does not - * determine the size of the individual blocks of a block matrix of this type - * by attaching a block sparsity pattern, but by calling reinit() to set the - * number of blocks and then by setting the size of each block separately. In - * order to fix the data structures of the block matrix, it is then necessary - * to let it know that we have changed the sizes of the underlying - * matrices. For this, one has to call the collect_sizes() function, for much - * the same reason as is documented with the BlockSparsityPattern class. - * - * @ingroup Matrix1 - * @see @ref GlossBlockLA "Block (linear algebra)" - * @author Martin Kronbichler, Wolfgang Bangerth, 2008 - */ + /*! @addtogroup TrilinosWrappers + *@{ + */ + + /** + * Blocked sparse matrix based on the TrilinosWrappers::SparseMatrix class. This + * class implements the functions that are specific to the Trilinos SparseMatrix + * base objects for a blocked sparse matrix, and leaves the actual work + * relaying most of the calls to the individual blocks to the functions + * implemented in the base class. See there also for a description of when + * this class is useful. + * + * In contrast to the deal.II-type SparseMatrix class, the Trilinos matrices do + * not have external objects for the sparsity patterns. Thus, one does not + * determine the size of the individual blocks of a block matrix of this type + * by attaching a block sparsity pattern, but by calling reinit() to set the + * number of blocks and then by setting the size of each block separately. In + * order to fix the data structures of the block matrix, it is then necessary + * to let it know that we have changed the sizes of the underlying + * matrices. For this, one has to call the collect_sizes() function, for much + * the same reason as is documented with the BlockSparsityPattern class. + * + * @ingroup Matrix1 + * @see @ref GlossBlockLA "Block (linear algebra)" + * @author Martin Kronbichler, Wolfgang Bangerth, 2008 + */ class BlockSparseMatrix : public BlockMatrixBase { - public: - /** - * Typedef the base class for simpler - * access to its own typedefs. - */ - typedef BlockMatrixBase BaseClass; - - /** - * Typedef the type of the underlying - * matrix. - */ - typedef BaseClass::BlockType BlockType; - - /** - * Import the typedefs from the base - * class. - */ - typedef BaseClass::value_type value_type; - typedef BaseClass::pointer pointer; - typedef BaseClass::const_pointer const_pointer; - typedef BaseClass::reference reference; - typedef BaseClass::const_reference const_reference; - typedef BaseClass::size_type size_type; - typedef BaseClass::iterator iterator; - typedef BaseClass::const_iterator const_iterator; - - /** - * Constructor; initializes the - * matrix to be empty, without - * any structure, i.e. the - * matrix is not usable at - * all. This constructor is - * therefore only useful for - * matrices which are members of - * a class. All other matrices - * should be created at a point - * in the data flow where all - * necessary information is - * available. - * - * You have to initialize the - * matrix before usage with - * reinit(BlockSparsityPattern). The - * number of blocks per row and - * column are then determined by - * that function. - */ - BlockSparseMatrix (); - - /** - * Destructor. - */ - ~BlockSparseMatrix (); - - /** - * Pseudo copy operator only copying - * empty objects. The sizes of the block - * matrices need to be the same. - */ - BlockSparseMatrix & - operator = (const BlockSparseMatrix &); - - /** - * This operator assigns a scalar to a - * matrix. Since this does usually not - * make much sense (should we set all - * matrix entries to this value? Only - * the nonzero entries of the sparsity - * pattern?), this operation is only - * allowed if the actual value to be - * assigned is zero. This operator only - * exists to allow for the obvious - * notation matrix=0, which - * sets all elements of the matrix to - * zero, but keep the sparsity pattern - * previously used. - */ - BlockSparseMatrix & - operator = (const double d); - - /** - * Resize the matrix, by setting - * the number of block rows and - * columns. This deletes all - * blocks and replaces them by - * unitialized ones, i.e. ones - * for which also the sizes are - * not yet set. You have to do - * that by calling the @p reinit - * functions of the blocks - * themselves. Do not forget to - * call collect_sizes() after - * that on this object. - * - * The reason that you have to - * set sizes of the blocks - * yourself is that the sizes may - * be varying, the maximum number - * of elements per row may be - * varying, etc. It is simpler - * not to reproduce the interface - * of the @p SparsityPattern - * class here but rather let the - * user call whatever function - * she desires. - */ - void reinit (const unsigned int n_block_rows, - const unsigned int n_block_columns); - - /** - * Resize the matrix, by using an - * array of Epetra maps to determine - * the %parallel distribution of the - * individual matrices. This function - * assumes that a quadratic block - * matrix is generated. - */ - template - void reinit (const std::vector &input_maps, - const BlockSparsityType &block_sparsity_pattern); - - /** - * Resize the matrix, by using an - * array of index sets to determine - * the %parallel distribution of the - * individual matrices. This function - * assumes that a quadratic block - * matrix is generated. - */ - template - void reinit (const std::vector &input_maps, - const BlockSparsityType &block_sparsity_pattern, - const MPI_Comm &communicator = MPI_COMM_WORLD); - - /** - * Resize the matrix and initialize it - * by the given sparsity pattern. Since - * no distribution map is given, the - * result is a block matrix for which - * all elements are stored locally. - */ - template - void reinit (const BlockSparsityType &block_sparsity_pattern); - - /** - * This function initializes the - * Trilinos matrix using the deal.II - * sparse matrix and the entries stored - * therein. It uses a threshold - * to copy only elements whose - * modulus is larger than the - * threshold (so zeros in the - * deal.II matrix can be filtered - * away). - */ - void reinit (const std::vector &input_maps, - const ::dealii::BlockSparseMatrix &deal_ii_sparse_matrix, - const double drop_tolerance=1e-13); - - /** - * This function initializes - * the Trilinos matrix using - * the deal.II sparse matrix - * and the entries stored - * therein. It uses a threshold - * to copy only elements whose - * modulus is larger than the - * threshold (so zeros in the - * deal.II matrix can be - * filtered away). Since no - * Epetra_Map is given, all the - * elements will be locally - * stored. - */ - void reinit (const ::dealii::BlockSparseMatrix &deal_ii_sparse_matrix, - const double drop_tolerance=1e-13); - - /** - * Returns the state of the - * matrix, i.e., whether - * compress() needs to be called - * after an operation requiring - * data exchange. Does only - * return non-true values when - * used in debug mode, - * since it is quite expensive to - * keep track of all operations - * that lead to the need for - * compress(). - */ - bool is_compressed () const; - - /** - * This function collects the - * sizes of the sub-objects and - * stores them in internal - * arrays, in order to be able to - * relay global indices into the - * matrix to indices into the - * subobjects. You *must* call - * this function each time after - * you have changed the size of - * the sub-objects. Note that - * this is a collective - * operation, i.e., it needs to - * be called on all MPI - * processes. This command - * internally calls the method - * compress(), so you - * don't need to call that - * function in case you use - * collect_sizes(). - */ - void collect_sizes (); - - /** - * Return the number of nonzero - * elements of this - * matrix. - */ - unsigned int n_nonzero_elements () const; - - /** - * Matrix-vector multiplication: - * let $dst = M*src$ with $M$ - * being this matrix. - */ - void vmult (MPI::BlockVector &dst, - const MPI::BlockVector &src) const; - - - /** - * Matrix-vector multiplication: - * let $dst = M*src$ with $M$ - * being this matrix, now applied - * to localized block vectors - * (works only when run on one - * processor). - */ - void vmult (BlockVector &dst, - const BlockVector &src) const; - - /** - * Matrix-vector - * multiplication. Just like the - * previous function, but only - * applicable if the matrix has - * only one block column. - */ - void vmult (MPI::BlockVector &dst, - const MPI::Vector &src) const; - - /** - * Matrix-vector - * multiplication. Just like the - * previous function, but only - * applicable if the matrix has - * only one block column, now - * applied to localized vectors - * (works only when run on one - * processor). - */ - void vmult (BlockVector &dst, - const Vector &src) const; - - /** - * Matrix-vector - * multiplication. Just like the - * previous function, but only - * applicable if the matrix has - * only one block row. - */ - void vmult (MPI::Vector &dst, - const MPI::BlockVector &src) const; - - /** - * Matrix-vector - * multiplication. Just like the - * previous function, but only - * applicable if the matrix has - * only one block row, now - * applied to localized vectors - * (works only when run on one - * processor). - */ - void vmult (Vector &dst, - const BlockVector &src) const; - - /** - * Matrix-vector - * multiplication. Just like the - * previous function, but only - * applicable if the matrix has - * only one block. - */ - void vmult (VectorBase &dst, - const VectorBase &src) const; - - /** - * Matrix-vector multiplication: - * let $dst = M^T*src$ with $M$ - * being this matrix. This - * function does the same as - * vmult() but takes the - * transposed matrix. - */ - void Tvmult (MPI::BlockVector &dst, - const MPI::BlockVector &src) const; - - /** - * Matrix-vector multiplication: - * let $dst = M^T*src$ with $M$ - * being this matrix. This - * function does the same as - * vmult() but takes the - * transposed matrix, now applied - * to localized Trilinos vectors - * (works only when run on one - * processor). - */ - void Tvmult (BlockVector &dst, - const BlockVector &src) const; - - /** - * Matrix-vector - * multiplication. Just like the - * previous function, but only - * applicable if the matrix has - * only one block row. - */ - void Tvmult (MPI::BlockVector &dst, - const MPI::Vector &src) const; - - /** - * Matrix-vector - * multiplication. Just like the - * previous function, but only - * applicable if the matrix has - * only one block row, now - * applied to localized Trilinos - * vectors (works only when run - * on one processor). - */ - void Tvmult (BlockVector &dst, - const Vector &src) const; - - /** - * Matrix-vector - * multiplication. Just like the - * previous function, but only - * applicable if the matrix has - * only one block column. - */ - void Tvmult (MPI::Vector &dst, - const MPI::BlockVector &src) const; - - /** - * Matrix-vector - * multiplication. Just like the - * previous function, but only - * applicable if the matrix has - * only one block column, now - * applied to localized Trilinos - * vectors (works only when run - * on one processor). - */ - void Tvmult (Vector &dst, - const BlockVector &src) const; - - /** - * Matrix-vector - * multiplication. Just like the - * previous function, but only - * applicable if the matrix has - * only one block. - */ - void Tvmult (VectorBase &dst, - const VectorBase &src) const; - - /** - * Compute the residual of an - * equation Mx=b, where - * the residual is defined to - * be r=b-Mx. Write the - * residual into @p dst. The - * l2 norm of - * the residual vector is - * returned. - * - * Source x and - * destination dst must - * not be the same vector. - * - * Note that both vectors have - * to be distributed vectors - * generated using the same Map - * as was used for the matrix - * in case you work on a - * distributed memory - * architecture, using the - * interface in the - * TrilinosWrappers::MPI::BlockVector - * class. - */ - TrilinosScalar residual (MPI::BlockVector &dst, - const MPI::BlockVector &x, - const MPI::BlockVector &b) const; - - /** - * Compute the residual of an - * equation Mx=b, where - * the residual is defined to - * be r=b-Mx. Write the - * residual into @p dst. The - * l2 norm of - * the residual vector is - * returned. - * - * Source x and - * destination dst must - * not be the same vector. - * - * Note that both vectors have - * to be distributed vectors - * generated using the same Map - * as was used for the matrix - * in case you work on a - * distributed memory - * architecture, using the - * interface in the - * TrilinosWrappers::BlockVector - * class. Since the block - * matrix is in general - * distributed among processes, - * this function only works - * when running the program on - * one processor. - */ - TrilinosScalar residual (BlockVector &dst, - const BlockVector &x, - const BlockVector &b) const; - - /** - * Compute the residual of an - * equation Mx=b, where - * the residual is defined to - * be r=b-Mx. Write the - * residual into @p dst. The - * l2 norm of - * the residual vector is - * returned. Just like the - * previous function, but only - * applicable if the matrix - * only has one block row. - */ - TrilinosScalar residual (MPI::BlockVector &dst, - const MPI::Vector &x, - const MPI::BlockVector &b) const; - - /** - * Compute the residual of an - * equation Mx=b, where - * the residual is defined to - * be r=b-Mx. Write the - * residual into @p dst. The - * l2 norm of - * the residual vector is - * returned. Just like the - * previous function, but only - * applicable if the matrix - * only has one block row. - */ - TrilinosScalar residual (BlockVector &dst, - const Vector &x, - const BlockVector &b) const; - - /** - * Compute the residual of an - * equation Mx=b, where - * the residual is defined to - * be r=b-Mx. Write the - * residual into @p dst. The - * l2 norm of - * the residual vector is - * returned. Just like the - * previous function, but only - * applicable if the matrix - * only has one block column. - */ - TrilinosScalar residual (MPI::Vector &dst, - const MPI::BlockVector &x, - const MPI::Vector &b) const; - - /** - * Compute the residual of an - * equation Mx=b, where - * the residual is defined to - * be r=b-Mx. Write the - * residual into @p dst. The - * l2 norm of - * the residual vector is - * returned. Just like the - * previous function, but only - * applicable if the matrix - * only has one block column. - */ - TrilinosScalar residual (Vector &dst, - const BlockVector &x, - const Vector &b) const; - - /** - * Compute the residual of an - * equation Mx=b, where - * the residual is defined to - * be r=b-Mx. Write the - * residual into @p dst. The - * l2 norm of - * the residual vector is - * returned. Just like the - * previous function, but only - * applicable if the matrix - * only has one block. - */ - TrilinosScalar residual (VectorBase &dst, - const VectorBase &x, - const VectorBase &b) const; - - /** - * Make the clear() function in the - * base class visible, though it is - * protected. - */ - using BlockMatrixBase::clear; - - /** @addtogroup Exceptions - * @{ - */ - - /** - * Exception - */ - DeclException4 (ExcIncompatibleRowNumbers, - int, int, int, int, - << "The blocks [" << arg1 << ',' << arg2 << "] and [" - << arg3 << ',' << arg4 << "] have differing row numbers."); - - /** - * Exception - */ - DeclException4 (ExcIncompatibleColNumbers, - int, int, int, int, - << "The blocks [" << arg1 << ',' << arg2 << "] and [" - << arg3 << ',' << arg4 << "] have differing column numbers."); - ///@} + public: + /** + * Typedef the base class for simpler + * access to its own typedefs. + */ + typedef BlockMatrixBase BaseClass; + + /** + * Typedef the type of the underlying + * matrix. + */ + typedef BaseClass::BlockType BlockType; + + /** + * Import the typedefs from the base + * class. + */ + typedef BaseClass::value_type value_type; + typedef BaseClass::pointer pointer; + typedef BaseClass::const_pointer const_pointer; + typedef BaseClass::reference reference; + typedef BaseClass::const_reference const_reference; + typedef BaseClass::size_type size_type; + typedef BaseClass::iterator iterator; + typedef BaseClass::const_iterator const_iterator; + + /** + * Constructor; initializes the + * matrix to be empty, without + * any structure, i.e. the + * matrix is not usable at + * all. This constructor is + * therefore only useful for + * matrices which are members of + * a class. All other matrices + * should be created at a point + * in the data flow where all + * necessary information is + * available. + * + * You have to initialize the + * matrix before usage with + * reinit(BlockSparsityPattern). The + * number of blocks per row and + * column are then determined by + * that function. + */ + BlockSparseMatrix (); + + /** + * Destructor. + */ + ~BlockSparseMatrix (); + + /** + * Pseudo copy operator only copying + * empty objects. The sizes of the block + * matrices need to be the same. + */ + BlockSparseMatrix & + operator = (const BlockSparseMatrix &); + + /** + * This operator assigns a scalar to a + * matrix. Since this does usually not + * make much sense (should we set all + * matrix entries to this value? Only + * the nonzero entries of the sparsity + * pattern?), this operation is only + * allowed if the actual value to be + * assigned is zero. This operator only + * exists to allow for the obvious + * notation matrix=0, which + * sets all elements of the matrix to + * zero, but keep the sparsity pattern + * previously used. + */ + BlockSparseMatrix & + operator = (const double d); + + /** + * Resize the matrix, by setting + * the number of block rows and + * columns. This deletes all + * blocks and replaces them by + * unitialized ones, i.e. ones + * for which also the sizes are + * not yet set. You have to do + * that by calling the @p reinit + * functions of the blocks + * themselves. Do not forget to + * call collect_sizes() after + * that on this object. + * + * The reason that you have to + * set sizes of the blocks + * yourself is that the sizes may + * be varying, the maximum number + * of elements per row may be + * varying, etc. It is simpler + * not to reproduce the interface + * of the @p SparsityPattern + * class here but rather let the + * user call whatever function + * she desires. + */ + void reinit (const unsigned int n_block_rows, + const unsigned int n_block_columns); + + /** + * Resize the matrix, by using an + * array of Epetra maps to determine + * the %parallel distribution of the + * individual matrices. This function + * assumes that a quadratic block + * matrix is generated. + */ + template + void reinit (const std::vector &input_maps, + const BlockSparsityType &block_sparsity_pattern); + + /** + * Resize the matrix, by using an + * array of index sets to determine + * the %parallel distribution of the + * individual matrices. This function + * assumes that a quadratic block + * matrix is generated. + */ + template + void reinit (const std::vector &input_maps, + const BlockSparsityType &block_sparsity_pattern, + const MPI_Comm &communicator = MPI_COMM_WORLD); + + /** + * Resize the matrix and initialize it + * by the given sparsity pattern. Since + * no distribution map is given, the + * result is a block matrix for which + * all elements are stored locally. + */ + template + void reinit (const BlockSparsityType &block_sparsity_pattern); + + /** + * This function initializes the + * Trilinos matrix using the deal.II + * sparse matrix and the entries stored + * therein. It uses a threshold + * to copy only elements whose + * modulus is larger than the + * threshold (so zeros in the + * deal.II matrix can be filtered + * away). + */ + void reinit (const std::vector &input_maps, + const ::dealii::BlockSparseMatrix &deal_ii_sparse_matrix, + const double drop_tolerance=1e-13); + + /** + * This function initializes + * the Trilinos matrix using + * the deal.II sparse matrix + * and the entries stored + * therein. It uses a threshold + * to copy only elements whose + * modulus is larger than the + * threshold (so zeros in the + * deal.II matrix can be + * filtered away). Since no + * Epetra_Map is given, all the + * elements will be locally + * stored. + */ + void reinit (const ::dealii::BlockSparseMatrix &deal_ii_sparse_matrix, + const double drop_tolerance=1e-13); + + /** + * Returns the state of the + * matrix, i.e., whether + * compress() needs to be called + * after an operation requiring + * data exchange. Does only + * return non-true values when + * used in debug mode, + * since it is quite expensive to + * keep track of all operations + * that lead to the need for + * compress(). + */ + bool is_compressed () const; + + /** + * This function collects the + * sizes of the sub-objects and + * stores them in internal + * arrays, in order to be able to + * relay global indices into the + * matrix to indices into the + * subobjects. You *must* call + * this function each time after + * you have changed the size of + * the sub-objects. Note that + * this is a collective + * operation, i.e., it needs to + * be called on all MPI + * processes. This command + * internally calls the method + * compress(), so you + * don't need to call that + * function in case you use + * collect_sizes(). + */ + void collect_sizes (); + + /** + * Return the number of nonzero + * elements of this + * matrix. + */ + unsigned int n_nonzero_elements () const; + + /** + * Matrix-vector multiplication: + * let $dst = M*src$ with $M$ + * being this matrix. + */ + void vmult (MPI::BlockVector &dst, + const MPI::BlockVector &src) const; + + + /** + * Matrix-vector multiplication: + * let $dst = M*src$ with $M$ + * being this matrix, now applied + * to localized block vectors + * (works only when run on one + * processor). + */ + void vmult (BlockVector &dst, + const BlockVector &src) const; + + /** + * Matrix-vector + * multiplication. Just like the + * previous function, but only + * applicable if the matrix has + * only one block column. + */ - void vmult (MPI::BlockVector &dst, ++ void vmult (MPI::BlockVector &dst, + const MPI::Vector &src) const; + + /** + * Matrix-vector + * multiplication. Just like the + * previous function, but only + * applicable if the matrix has + * only one block column, now + * applied to localized vectors + * (works only when run on one + * processor). + */ - void vmult (BlockVector &dst, ++ void vmult (BlockVector &dst, + const Vector &src) const; + + /** + * Matrix-vector + * multiplication. Just like the + * previous function, but only + * applicable if the matrix has + * only one block row. + */ + void vmult (MPI::Vector &dst, + const MPI::BlockVector &src) const; + + /** + * Matrix-vector + * multiplication. Just like the + * previous function, but only + * applicable if the matrix has + * only one block row, now + * applied to localized vectors + * (works only when run on one + * processor). + */ + void vmult (Vector &dst, + const BlockVector &src) const; + + /** + * Matrix-vector + * multiplication. Just like the + * previous function, but only + * applicable if the matrix has + * only one block. + */ + void vmult (VectorBase &dst, + const VectorBase &src) const; + + /** + * Matrix-vector multiplication: + * let $dst = M^T*src$ with $M$ + * being this matrix. This + * function does the same as + * vmult() but takes the + * transposed matrix. + */ + void Tvmult (MPI::BlockVector &dst, + const MPI::BlockVector &src) const; + + /** + * Matrix-vector multiplication: + * let $dst = M^T*src$ with $M$ + * being this matrix. This + * function does the same as + * vmult() but takes the + * transposed matrix, now applied + * to localized Trilinos vectors + * (works only when run on one + * processor). + */ + void Tvmult (BlockVector &dst, + const BlockVector &src) const; + + /** + * Matrix-vector + * multiplication. Just like the + * previous function, but only + * applicable if the matrix has + * only one block row. + */ - void Tvmult (MPI::BlockVector &dst, ++ void Tvmult (MPI::BlockVector &dst, + const MPI::Vector &src) const; + + /** + * Matrix-vector + * multiplication. Just like the + * previous function, but only + * applicable if the matrix has + * only one block row, now + * applied to localized Trilinos + * vectors (works only when run + * on one processor). + */ - void Tvmult (BlockVector &dst, ++ void Tvmult (BlockVector &dst, + const Vector &src) const; + + /** + * Matrix-vector + * multiplication. Just like the + * previous function, but only + * applicable if the matrix has + * only one block column. + */ + void Tvmult (MPI::Vector &dst, + const MPI::BlockVector &src) const; + + /** + * Matrix-vector + * multiplication. Just like the + * previous function, but only + * applicable if the matrix has + * only one block column, now + * applied to localized Trilinos + * vectors (works only when run + * on one processor). + */ + void Tvmult (Vector &dst, + const BlockVector &src) const; + + /** + * Matrix-vector + * multiplication. Just like the + * previous function, but only + * applicable if the matrix has + * only one block. + */ + void Tvmult (VectorBase &dst, + const VectorBase &src) const; + + /** + * Compute the residual of an + * equation Mx=b, where + * the residual is defined to + * be r=b-Mx. Write the + * residual into @p dst. The + * l2 norm of + * the residual vector is + * returned. + * + * Source x and + * destination dst must + * not be the same vector. + * + * Note that both vectors have + * to be distributed vectors + * generated using the same Map + * as was used for the matrix + * in case you work on a + * distributed memory + * architecture, using the + * interface in the + * TrilinosWrappers::MPI::BlockVector + * class. + */ + TrilinosScalar residual (MPI::BlockVector &dst, + const MPI::BlockVector &x, + const MPI::BlockVector &b) const; + + /** + * Compute the residual of an + * equation Mx=b, where + * the residual is defined to + * be r=b-Mx. Write the + * residual into @p dst. The + * l2 norm of + * the residual vector is + * returned. + * + * Source x and + * destination dst must + * not be the same vector. + * + * Note that both vectors have + * to be distributed vectors + * generated using the same Map + * as was used for the matrix + * in case you work on a + * distributed memory + * architecture, using the + * interface in the + * TrilinosWrappers::BlockVector + * class. Since the block + * matrix is in general + * distributed among processes, + * this function only works + * when running the program on + * one processor. + */ + TrilinosScalar residual (BlockVector &dst, + const BlockVector &x, + const BlockVector &b) const; + + /** + * Compute the residual of an + * equation Mx=b, where + * the residual is defined to + * be r=b-Mx. Write the + * residual into @p dst. The + * l2 norm of + * the residual vector is + * returned. Just like the + * previous function, but only + * applicable if the matrix + * only has one block row. + */ + TrilinosScalar residual (MPI::BlockVector &dst, + const MPI::Vector &x, + const MPI::BlockVector &b) const; + + /** + * Compute the residual of an + * equation Mx=b, where + * the residual is defined to + * be r=b-Mx. Write the + * residual into @p dst. The + * l2 norm of + * the residual vector is + * returned. Just like the + * previous function, but only + * applicable if the matrix + * only has one block row. + */ + TrilinosScalar residual (BlockVector &dst, + const Vector &x, + const BlockVector &b) const; + + /** + * Compute the residual of an + * equation Mx=b, where + * the residual is defined to + * be r=b-Mx. Write the + * residual into @p dst. The + * l2 norm of + * the residual vector is + * returned. Just like the + * previous function, but only + * applicable if the matrix + * only has one block column. + */ + TrilinosScalar residual (MPI::Vector &dst, + const MPI::BlockVector &x, + const MPI::Vector &b) const; + + /** + * Compute the residual of an + * equation Mx=b, where + * the residual is defined to + * be r=b-Mx. Write the + * residual into @p dst. The + * l2 norm of + * the residual vector is + * returned. Just like the + * previous function, but only + * applicable if the matrix + * only has one block column. + */ + TrilinosScalar residual (Vector &dst, + const BlockVector &x, + const Vector &b) const; + + /** + * Compute the residual of an + * equation Mx=b, where + * the residual is defined to + * be r=b-Mx. Write the + * residual into @p dst. The + * l2 norm of + * the residual vector is + * returned. Just like the + * previous function, but only + * applicable if the matrix + * only has one block. + */ + TrilinosScalar residual (VectorBase &dst, + const VectorBase &x, + const VectorBase &b) const; + + /** + * Make the clear() function in the + * base class visible, though it is + * protected. + */ + using BlockMatrixBase::clear; + + /** @addtogroup Exceptions + * @{ + */ + + /** + * Exception + */ + DeclException4 (ExcIncompatibleRowNumbers, + int, int, int, int, + << "The blocks [" << arg1 << ',' << arg2 << "] and [" + << arg3 << ',' << arg4 << "] have differing row numbers."); + + /** + * Exception + */ + DeclException4 (ExcIncompatibleColNumbers, + int, int, int, int, + << "The blocks [" << arg1 << ',' << arg2 << "] and [" + << arg3 << ',' << arg4 << "] have differing column numbers."); + ///@} }; diff --cc deal.II/include/deal.II/lac/trilinos_block_vector.h index 79f47c7484,8c898ed3bb..1bea6bebaf --- a/deal.II/include/deal.II/lac/trilinos_block_vector.h +++ b/deal.II/include/deal.II/lac/trilinos_block_vector.h @@@ -43,384 -43,384 +43,384 @@@ namespace TrilinosWrapper class BlockSparseMatrix; - /** - * An implementation of block vectors based on the vector class - * implemented in TrilinosWrappers. While the base class provides for - * most of the interface, this class handles the actual allocation of - * vectors and provides functions that are specific to the underlying - * vector type. - * - * In contrast to the class MPI::BlockVector, this class is based on a - * localized version of the vectors, which means that the whole vector - * is stored on each processor. Note that matrix vector products with - * this block vector class do only work in case the program is run on - * only one processor, since the Trilinos matrices are inherently - * parallel. - * - * @ingroup Vectors - * @ingroup TrilinosWrappers - * @see @ref GlossBlockLA "Block (linear algebra)" - * @author Martin Kronbichler, 2008 - */ + /** + * An implementation of block vectors based on the vector class + * implemented in TrilinosWrappers. While the base class provides for + * most of the interface, this class handles the actual allocation of + * vectors and provides functions that are specific to the underlying + * vector type. + * + * In contrast to the class MPI::BlockVector, this class is based on a + * localized version of the vectors, which means that the whole vector + * is stored on each processor. Note that matrix vector products with + * this block vector class do only work in case the program is run on + * only one processor, since the Trilinos matrices are inherently + * parallel. + * + * @ingroup Vectors + * @ingroup TrilinosWrappers + * @see @ref GlossBlockLA "Block (linear algebra)" + * @author Martin Kronbichler, 2008 + */ class BlockVector : public BlockVectorBase { - public: - /** - * Typedef the base class for simpler - * access to its own typedefs. - */ - typedef BlockVectorBase BaseClass; - - /** - * Typedef the type of the underlying - * vector. - */ - typedef BaseClass::BlockType BlockType; - - /** - * Import the typedefs from the base - * class. - */ - typedef BaseClass::value_type value_type; - typedef BaseClass::pointer pointer; - typedef BaseClass::const_pointer const_pointer; - typedef BaseClass::reference reference; - typedef BaseClass::const_reference const_reference; - typedef BaseClass::size_type size_type; - typedef BaseClass::iterator iterator; - typedef BaseClass::const_iterator const_iterator; - - /** - * Default constructor. Generate an - * empty vector without any blocks. - */ - BlockVector (); - - /** - * Constructor. Generate a block - * vector with as many blocks as - * there are entries in Input_Maps. - * For this non-distributed vector, - * the %parallel partitioning is not - * used, just the global size of the - * partitioner. - */ - BlockVector (const std::vector &partitioner); - - /** - * Constructor. Generate a block - * vector with as many blocks as - * there are entries in Input_Maps. - * For this non-distributed vector, - * the %parallel partitioning is not - * used, just the global size of the - * partitioner. - */ - BlockVector (const std::vector &partitioner, - const MPI_Comm &communicator = MPI_COMM_WORLD); - - /** - * Copy-Constructor. Set all the - * properties of the non-%parallel - * vector to those of the given - * %parallel vector and import the - * elements. - */ - BlockVector (const MPI::BlockVector &V); - - /** - * Copy-Constructor. Set all the - * properties of the vector to those - * of the given input vector and copy - * the elements. - */ - BlockVector (const BlockVector &V); - - /** - * Creates a block vector - * consisting of - * num_blocks - * components, but there is no - * content in the individual - * components and the user has to - * fill appropriate data using a - * reinit of the blocks. - */ - BlockVector (const unsigned int num_blocks); - - /** - * Constructor. Set the number of - * blocks to n.size() and - * initialize each block with - * n[i] zero elements. - * - * References BlockVector.reinit(). - */ - BlockVector (const std::vector &N); - - /** - * Constructor. Set the number of - * blocks to - * n.size(). Initialize the - * vector with the elements - * pointed to by the range of - * iterators given as second and - * third argument. Apart from the - * first argument, this - * constructor is in complete - * analogy to the respective - * constructor of the - * std::vector class, but the - * first argument is needed in - * order to know how to subdivide - * the block vector into - * different blocks. - */ - template - BlockVector (const std::vector &n, - const InputIterator first, - const InputIterator end); - - /** - * Destructor. Clears memory - */ - ~BlockVector (); - - /** - * use compress(VectorOperation) instead - * - * @deprecated - * - * See @ref GlossCompress "Compressing - * distributed objects" for more - * information. - */ - void compress (const Epetra_CombineMode last_action); - - /** - * so it is not hidden - */ - using BlockVectorBase::compress; - - /** - * Copy operator: fill all - * components of the vector that - * are locally stored with the - * given scalar value. - */ - BlockVector & - operator = (const value_type s); - - /** - * Copy operator for a - * distributed Trilinos vector to - * a localized one. - */ - BlockVector & - operator = (const MPI::BlockVector &V); - - /** - * Copy operator for arguments of - * the same type. - */ - BlockVector & - operator = (const BlockVector &V); - - /** - * Another copy function. This - * one takes a deal.II block - * vector and copies it into a - * TrilinosWrappers block - * vector. Note that the number - * of blocks has to be the same - * in the vector as in the input - * vector. Use the reinit() - * command for resizing the - * BlockVector or for changing - * the internal structure of the - * block components. - * - * Since Trilinos only works on - * doubles, this function is - * limited to accept only one - * possible number type in the - * deal.II vector. - */ - template - BlockVector & - operator = (const ::dealii::BlockVector &V); - - /** - * Reinitialize the BlockVector to - * contain as many blocks as there - * are Epetra_Maps given in the - * input argument, according to the - * global size of the individual - * components described in the - * maps. Note that the resulting - * vector will be stored completely - * on each process. The Epetra_Map - * is useful when data exchange - * with a distributed vector based - * on the same Epetra_map is - * intended. In that case, the same - * communicator is used for data - * exchange. - * - * If fast==false, the vector - * is filled with zeros. - */ - void reinit (const std::vector &partitioning, - const bool fast = false); - - /** - * Reinitialize the BlockVector to - * contain as many blocks as there - * are index sets given in the - * input argument, according to the - * global size of the individual - * components described in the - * index set, and using a given MPI - * communicator. The MPI - * communicator is useful when data - * exchange with a distributed - * vector based on the same - * initialization is intended. In - * that case, the same communicator - * is used for data exchange. - * - * If fast==false, the vector - * is filled with zeros. - */ - void reinit (const std::vector &partitioning, - const MPI_Comm &communicator = MPI_COMM_WORLD, - const bool fast = false); - - /** - * Reinitialize the BlockVector to - * contain as many blocks as there - * are elements in the first - * argument, and with the respective - * sizes. Since no distribution map - * is given, all vectors are local - * vectors. - * - * If fast==false, the vector - * is filled with zeros. - */ - void reinit (const std::vector &N, - const bool fast=false); - - /** - * Reinit the function - * according to a distributed - * block vector. The elements - * will be copied in this - * process. - */ - void reinit (const MPI::BlockVector &V); - - /** - * Change the dimension to that - * of the vector V. The same - * applies as for the other - * reinit() function. - * - * The elements of V are not - * copied, i.e. this function is - * the same as calling reinit - * (V.size(), fast). - * - * Note that you must call this - * (or the other reinit() - * functions) function, rather - * than calling the reinit() - * functions of an individual - * block, to allow the block - * vector to update its caches of - * vector sizes. If you call - * reinit() on one of the - * blocks, then subsequent - * actions on this object may - * yield unpredictable results - * since they may be routed to - * the wrong block. - */ - void reinit (const BlockVector &V, - const bool fast = false); - - /** - * Change the number of blocks to - * num_blocks. The individual - * blocks will get initialized with - * zero size, so it is assumed that - * the user resizes the - * individual blocks by herself - * in an appropriate way, and - * calls collect_sizes - * afterwards. - */ - void reinit (const unsigned int num_blocks); - - /** - * Swap the contents of this - * vector and the other vector - * v. One could do this - * operation with a temporary - * variable and copying over the - * data elements, but this - * function is significantly more - * efficient since it only swaps - * the pointers to the data of - * the two vectors and therefore - * does not need to allocate - * temporary storage and move - * data around. - * - * Limitation: right now this - * function only works if both - * vectors have the same number - * of blocks. If needed, the - * numbers of blocks should be - * exchanged, too. - * - * This function is analog to the - * the swap() function of all C++ - * standard containers. Also, - * there is a global function - * swap(u,v) that simply calls - * u.swap(v), again in analogy - * to standard functions. - */ - void swap (BlockVector &v); - - /** - * Print to a stream. - */ - void print (std::ostream &out, - const unsigned int precision = 3, - const bool scientific = true, - const bool across = true) const; - - /** - * Exception - */ - DeclException0 (ExcIteratorRangeDoesNotMatchVectorSize); - - /** - * Exception - */ - DeclException0 (ExcNonMatchingBlockVectors); - - /** - * Exception - */ - DeclException2 (ExcNonLocalizedMap, - int, int, - << "For the generation of a localized vector the map has " - << "to assign all elements to all vectors! " - << "local_size = global_size is a necessary condition, but" - << arg1 << " != " << arg2 << " was given!"); + public: + /** + * Typedef the base class for simpler + * access to its own typedefs. + */ + typedef BlockVectorBase BaseClass; + + /** + * Typedef the type of the underlying + * vector. + */ + typedef BaseClass::BlockType BlockType; + + /** + * Import the typedefs from the base + * class. + */ + typedef BaseClass::value_type value_type; + typedef BaseClass::pointer pointer; + typedef BaseClass::const_pointer const_pointer; + typedef BaseClass::reference reference; + typedef BaseClass::const_reference const_reference; + typedef BaseClass::size_type size_type; + typedef BaseClass::iterator iterator; + typedef BaseClass::const_iterator const_iterator; + + /** + * Default constructor. Generate an + * empty vector without any blocks. + */ + BlockVector (); + + /** + * Constructor. Generate a block + * vector with as many blocks as + * there are entries in Input_Maps. + * For this non-distributed vector, + * the %parallel partitioning is not + * used, just the global size of the + * partitioner. + */ + BlockVector (const std::vector &partitioner); + + /** + * Constructor. Generate a block + * vector with as many blocks as + * there are entries in Input_Maps. + * For this non-distributed vector, + * the %parallel partitioning is not + * used, just the global size of the + * partitioner. + */ + BlockVector (const std::vector &partitioner, + const MPI_Comm &communicator = MPI_COMM_WORLD); + + /** + * Copy-Constructor. Set all the + * properties of the non-%parallel + * vector to those of the given + * %parallel vector and import the + * elements. + */ + BlockVector (const MPI::BlockVector &V); + + /** + * Copy-Constructor. Set all the + * properties of the vector to those + * of the given input vector and copy + * the elements. + */ - BlockVector (const BlockVector &V); ++ BlockVector (const BlockVector &V); + + /** + * Creates a block vector + * consisting of + * num_blocks + * components, but there is no + * content in the individual + * components and the user has to + * fill appropriate data using a + * reinit of the blocks. + */ + BlockVector (const unsigned int num_blocks); + + /** + * Constructor. Set the number of + * blocks to n.size() and + * initialize each block with + * n[i] zero elements. + * + * References BlockVector.reinit(). + */ + BlockVector (const std::vector &N); + + /** + * Constructor. Set the number of + * blocks to + * n.size(). Initialize the + * vector with the elements + * pointed to by the range of + * iterators given as second and + * third argument. Apart from the + * first argument, this + * constructor is in complete + * analogy to the respective + * constructor of the + * std::vector class, but the + * first argument is needed in + * order to know how to subdivide + * the block vector into + * different blocks. + */ + template + BlockVector (const std::vector &n, + const InputIterator first, + const InputIterator end); + + /** + * Destructor. Clears memory + */ + ~BlockVector (); + + /** + * use compress(VectorOperation) instead + * + * @deprecated + * + * See @ref GlossCompress "Compressing + * distributed objects" for more + * information. + */ + void compress (const Epetra_CombineMode last_action); + + /** + * so it is not hidden + */ + using BlockVectorBase::compress; + + /** + * Copy operator: fill all + * components of the vector that + * are locally stored with the + * given scalar value. + */ + BlockVector & + operator = (const value_type s); + + /** + * Copy operator for a + * distributed Trilinos vector to + * a localized one. + */ + BlockVector & + operator = (const MPI::BlockVector &V); + + /** + * Copy operator for arguments of + * the same type. + */ + BlockVector & + operator = (const BlockVector &V); + + /** + * Another copy function. This + * one takes a deal.II block + * vector and copies it into a + * TrilinosWrappers block + * vector. Note that the number + * of blocks has to be the same + * in the vector as in the input + * vector. Use the reinit() + * command for resizing the + * BlockVector or for changing + * the internal structure of the + * block components. + * + * Since Trilinos only works on + * doubles, this function is + * limited to accept only one + * possible number type in the + * deal.II vector. + */ + template + BlockVector & + operator = (const ::dealii::BlockVector &V); + + /** + * Reinitialize the BlockVector to + * contain as many blocks as there + * are Epetra_Maps given in the + * input argument, according to the + * global size of the individual + * components described in the + * maps. Note that the resulting + * vector will be stored completely + * on each process. The Epetra_Map + * is useful when data exchange + * with a distributed vector based + * on the same Epetra_map is + * intended. In that case, the same + * communicator is used for data + * exchange. + * + * If fast==false, the vector + * is filled with zeros. + */ + void reinit (const std::vector &partitioning, + const bool fast = false); + + /** + * Reinitialize the BlockVector to + * contain as many blocks as there + * are index sets given in the + * input argument, according to the + * global size of the individual + * components described in the + * index set, and using a given MPI + * communicator. The MPI + * communicator is useful when data + * exchange with a distributed + * vector based on the same + * initialization is intended. In + * that case, the same communicator + * is used for data exchange. + * + * If fast==false, the vector + * is filled with zeros. + */ + void reinit (const std::vector &partitioning, + const MPI_Comm &communicator = MPI_COMM_WORLD, + const bool fast = false); + + /** + * Reinitialize the BlockVector to + * contain as many blocks as there + * are elements in the first + * argument, and with the respective + * sizes. Since no distribution map + * is given, all vectors are local + * vectors. + * + * If fast==false, the vector + * is filled with zeros. + */ + void reinit (const std::vector &N, + const bool fast=false); + + /** + * Reinit the function + * according to a distributed + * block vector. The elements + * will be copied in this + * process. + */ + void reinit (const MPI::BlockVector &V); + + /** + * Change the dimension to that + * of the vector V. The same + * applies as for the other + * reinit() function. + * + * The elements of V are not + * copied, i.e. this function is + * the same as calling reinit + * (V.size(), fast). + * + * Note that you must call this + * (or the other reinit() + * functions) function, rather + * than calling the reinit() + * functions of an individual + * block, to allow the block + * vector to update its caches of + * vector sizes. If you call + * reinit() on one of the + * blocks, then subsequent + * actions on this object may + * yield unpredictable results + * since they may be routed to + * the wrong block. + */ + void reinit (const BlockVector &V, + const bool fast = false); + + /** + * Change the number of blocks to + * num_blocks. The individual + * blocks will get initialized with + * zero size, so it is assumed that + * the user resizes the + * individual blocks by herself + * in an appropriate way, and + * calls collect_sizes + * afterwards. + */ + void reinit (const unsigned int num_blocks); + + /** + * Swap the contents of this + * vector and the other vector + * v. One could do this + * operation with a temporary + * variable and copying over the + * data elements, but this + * function is significantly more + * efficient since it only swaps + * the pointers to the data of + * the two vectors and therefore + * does not need to allocate + * temporary storage and move + * data around. + * + * Limitation: right now this + * function only works if both + * vectors have the same number + * of blocks. If needed, the + * numbers of blocks should be + * exchanged, too. + * + * This function is analog to the + * the swap() function of all C++ + * standard containers. Also, + * there is a global function + * swap(u,v) that simply calls + * u.swap(v), again in analogy + * to standard functions. + */ + void swap (BlockVector &v); + + /** + * Print to a stream. + */ + void print (std::ostream &out, + const unsigned int precision = 3, + const bool scientific = true, + const bool across = true) const; + + /** + * Exception + */ + DeclException0 (ExcIteratorRangeDoesNotMatchVectorSize); + + /** + * Exception + */ + DeclException0 (ExcNonMatchingBlockVectors); + + /** + * Exception + */ + DeclException2 (ExcNonLocalizedMap, + int, int, + << "For the generation of a localized vector the map has " + << "to assign all elements to all vectors! " + << "local_size = global_size is a necessary condition, but" + << arg1 << " != " << arg2 << " was given!"); }; diff --cc deal.II/include/deal.II/lac/trilinos_parallel_block_vector.h index 6090f6d4d3,34b7bd28e3..fd80a509dc --- a/deal.II/include/deal.II/lac/trilinos_parallel_block_vector.h +++ b/deal.II/include/deal.II/lac/trilinos_parallel_block_vector.h @@@ -44,351 -44,351 +44,351 @@@ namespace TrilinosWrapper namespace MPI { - /** - * An implementation of block vectors based on the vector class - * implemented in TrilinosWrappers. While the base class provides for - * most of the interface, this class handles the actual allocation of - * vectors and provides functions that are specific to the underlying - * vector type. - * - * The model of distribution of data is such that each of the blocks - * is distributed across all MPI processes named in the MPI - * communicator. I.e. we don't just distribute the whole vector, but - * each component. In the constructors and reinit() functions, one - * therefore not only has to specify the sizes of the individual - * blocks, but also the number of elements of each of these blocks to - * be stored on the local process. - * - * @ingroup Vectors - * @ingroup TrilinosWrappers - * @see @ref GlossBlockLA "Block (linear algebra)" - * @author Martin Kronbichler, Wolfgang Bangerth, 2008, 2009 - */ + /** + * An implementation of block vectors based on the vector class + * implemented in TrilinosWrappers. While the base class provides for + * most of the interface, this class handles the actual allocation of + * vectors and provides functions that are specific to the underlying + * vector type. + * + * The model of distribution of data is such that each of the blocks + * is distributed across all MPI processes named in the MPI + * communicator. I.e. we don't just distribute the whole vector, but + * each component. In the constructors and reinit() functions, one + * therefore not only has to specify the sizes of the individual + * blocks, but also the number of elements of each of these blocks to + * be stored on the local process. + * + * @ingroup Vectors + * @ingroup TrilinosWrappers + * @see @ref GlossBlockLA "Block (linear algebra)" + * @author Martin Kronbichler, Wolfgang Bangerth, 2008, 2009 + */ class BlockVector : public BlockVectorBase { - public: - /** - * Typedef the base class for simpler - * access to its own typedefs. - */ - typedef BlockVectorBase BaseClass; - - /** - * Typedef the type of the underlying - * vector. - */ - typedef BaseClass::BlockType BlockType; - - /** - * Import the typedefs from the base - * class. - */ - typedef BaseClass::value_type value_type; - typedef BaseClass::pointer pointer; - typedef BaseClass::const_pointer const_pointer; - typedef BaseClass::reference reference; - typedef BaseClass::const_reference const_reference; - typedef BaseClass::size_type size_type; - typedef BaseClass::iterator iterator; - typedef BaseClass::const_iterator const_iterator; - - /** - * Default constructor. Generate an - * empty vector without any blocks. - */ - BlockVector (); - - /** - * Constructor. Generate a block - * vector with as many blocks as - * there are entries in @p - * partitioning. Each Epetra_Map - * contains the layout of the - * distribution of data among the MPI - * processes. - */ - BlockVector (const std::vector ¶llel_partitioning); - - /** - * Constructor. Generate a block - * vector with as many blocks as - * there are entries in - * @p partitioning. Each IndexSet - * together with the MPI communicator - * contains the layout of the - * distribution of data among the MPI - * processes. - */ - BlockVector (const std::vector ¶llel_partitioning, - const MPI_Comm &communicator = MPI_COMM_WORLD); - - /** - * Copy-Constructor. Set all the - * properties of the parallel vector - * to those of the given argument and - * copy the elements. - */ - BlockVector (const BlockVector &V); - - /** - * Creates a block vector - * consisting of - * num_blocks - * components, but there is no - * content in the individual - * components and the user has to - * fill appropriate data using a - * reinit of the blocks. - */ - BlockVector (const unsigned int num_blocks); - - /** - * Destructor. Clears memory - */ - ~BlockVector (); - - /** - * Copy operator: fill all - * components of the vector that - * are locally stored with the - * given scalar value. - */ - BlockVector & - operator = (const value_type s); - - /** - * Copy operator for arguments of - * the same type. - */ - BlockVector & - operator = (const BlockVector &V); - - /** - * Copy operator for arguments of - * the localized Trilinos vector - * type. - */ - BlockVector & - operator = (const ::dealii::TrilinosWrappers::BlockVector &V); - - /** - * Another copy function. This - * one takes a deal.II block - * vector and copies it into a - * TrilinosWrappers block - * vector. Note that the number - * of blocks has to be the same - * in the vector as in the input - * vector. Use the reinit() - * command for resizing the - * BlockVector or for changing - * the internal structure of the - * block components. - * - * Since Trilinos only works on - * doubles, this function is - * limited to accept only one - * possible number type in the - * deal.II vector. - */ - template - BlockVector & - operator = (const ::dealii::BlockVector &V); - - /** - * Reinitialize the BlockVector to - * contain as many blocks as there - * are Epetra_Maps given in the input - * argument, according to the - * parallel distribution of the - * individual components described - * in the maps. - * - * If fast==false, the vector - * is filled with zeros. - */ - void reinit (const std::vector ¶llel_partitioning, - const bool fast = false); - - /** - * Reinitialize the BlockVector to - * contain as many blocks as there - * are index sets given in the input - * argument, according to the - * parallel distribution of the - * individual components described - * in the maps. - * - * If fast==false, the vector - * is filled with zeros. - */ - void reinit (const std::vector ¶llel_partitioning, - const MPI_Comm &communicator = MPI_COMM_WORLD, - const bool fast = false); - - /** - * Change the dimension to that - * of the vector V. The same - * applies as for the other - * reinit() function. - * - * The elements of V are not - * copied, i.e. this function is - * the same as calling reinit - * (V.size(), fast). - * - * Note that you must call this - * (or the other reinit() - * functions) function, rather - * than calling the reinit() - * functions of an individual - * block, to allow the block - * vector to update its caches of - * vector sizes. If you call - * reinit() on one of the - * blocks, then subsequent - * actions on this object may - * yield unpredictable results - * since they may be routed to - * the wrong block. - */ - void reinit (const BlockVector &V, - const bool fast = false); - - /** - * Change the number of blocks to - * num_blocks. The individual - * blocks will get initialized with - * zero size, so it is assumed that - * the user resizes the - * individual blocks by herself - * in an appropriate way, and - * calls collect_sizes - * afterwards. - */ - void reinit (const unsigned int num_blocks); - - /** - * This reinit function is meant to - * be used for parallel - * calculations where some - * non-local data has to be - * used. The typical situation - * where one needs this function is - * the call of the - * FEValues::get_function_values - * function (or of some - * derivatives) in parallel. Since - * it is usually faster to retrieve - * the data in advance, this - * function can be called before - * the assembly forks out to the - * different processors. What this - * function does is the following: - * It takes the information in the - * columns of the given matrix and - * looks which data couples between - * the different processors. That - * data is then queried from the - * input vector. Note that you - * should not write to the - * resulting vector any more, since - * the some data can be stored - * several times on different - * processors, leading to - * unpredictable results. In - * particular, such a vector cannot - * be used for matrix-vector - * products as for example done - * during the solution of linear - * systems. - */ - void import_nonlocal_data_for_fe (const TrilinosWrappers::BlockSparseMatrix &m, - const BlockVector &v); - - - /** - * use compress(VectorOperation) instead - * - * @deprecated - * - * See @ref GlossCompress "Compressing - * distributed objects" for more - * information. - */ - void compress (const Epetra_CombineMode last_action); - - /** - * so it is not hidden - */ - using BlockVectorBase::compress; - - - /** - * Returns the state of the - * vector, i.e., whether - * compress() needs to be - * called after an operation - * requiring data - * exchange. Does only return - * non-true values when used in - * debug mode, since - * it is quite expensive to - * keep track of all operations - * that lead to the need for - * compress(). - */ - bool is_compressed () const; - - /** - * Swap the contents of this - * vector and the other vector - * v. One could do this - * operation with a temporary - * variable and copying over the - * data elements, but this - * function is significantly more - * efficient since it only swaps - * the pointers to the data of - * the two vectors and therefore - * does not need to allocate - * temporary storage and move - * data around. - * - * Limitation: right now this - * function only works if both - * vectors have the same number - * of blocks. If needed, the - * numbers of blocks should be - * exchanged, too. - * - * This function is analog to the - * the swap() function of all C++ - * standard containers. Also, - * there is a global function - * swap(u,v) that simply calls - * u.swap(v), again in analogy - * to standard functions. - */ - void swap (BlockVector &v); - - /** - * Print to a stream. - */ - void print (std::ostream &out, - const unsigned int precision = 3, - const bool scientific = true, - const bool across = true) const; - - /** - * Exception - */ - DeclException0 (ExcIteratorRangeDoesNotMatchVectorSize); - - /** - * Exception - */ - DeclException0 (ExcNonMatchingBlockVectors); + public: + /** + * Typedef the base class for simpler + * access to its own typedefs. + */ + typedef BlockVectorBase BaseClass; + + /** + * Typedef the type of the underlying + * vector. + */ + typedef BaseClass::BlockType BlockType; + + /** + * Import the typedefs from the base + * class. + */ + typedef BaseClass::value_type value_type; + typedef BaseClass::pointer pointer; + typedef BaseClass::const_pointer const_pointer; + typedef BaseClass::reference reference; + typedef BaseClass::const_reference const_reference; + typedef BaseClass::size_type size_type; + typedef BaseClass::iterator iterator; + typedef BaseClass::const_iterator const_iterator; + + /** + * Default constructor. Generate an + * empty vector without any blocks. + */ + BlockVector (); + + /** + * Constructor. Generate a block + * vector with as many blocks as + * there are entries in @p + * partitioning. Each Epetra_Map + * contains the layout of the + * distribution of data among the MPI + * processes. + */ + BlockVector (const std::vector ¶llel_partitioning); + + /** + * Constructor. Generate a block + * vector with as many blocks as + * there are entries in + * @p partitioning. Each IndexSet + * together with the MPI communicator + * contains the layout of the + * distribution of data among the MPI + * processes. + */ + BlockVector (const std::vector ¶llel_partitioning, + const MPI_Comm &communicator = MPI_COMM_WORLD); + + /** + * Copy-Constructor. Set all the + * properties of the parallel vector + * to those of the given argument and + * copy the elements. + */ - BlockVector (const BlockVector &V); ++ BlockVector (const BlockVector &V); + + /** + * Creates a block vector + * consisting of + * num_blocks + * components, but there is no + * content in the individual + * components and the user has to + * fill appropriate data using a + * reinit of the blocks. + */ + BlockVector (const unsigned int num_blocks); + + /** + * Destructor. Clears memory + */ + ~BlockVector (); + + /** + * Copy operator: fill all + * components of the vector that + * are locally stored with the + * given scalar value. + */ + BlockVector & + operator = (const value_type s); + + /** + * Copy operator for arguments of + * the same type. + */ + BlockVector & + operator = (const BlockVector &V); + + /** + * Copy operator for arguments of + * the localized Trilinos vector + * type. + */ + BlockVector & + operator = (const ::dealii::TrilinosWrappers::BlockVector &V); + + /** + * Another copy function. This + * one takes a deal.II block + * vector and copies it into a + * TrilinosWrappers block + * vector. Note that the number + * of blocks has to be the same + * in the vector as in the input + * vector. Use the reinit() + * command for resizing the + * BlockVector or for changing + * the internal structure of the + * block components. + * + * Since Trilinos only works on + * doubles, this function is + * limited to accept only one + * possible number type in the + * deal.II vector. + */ + template + BlockVector & + operator = (const ::dealii::BlockVector &V); + + /** + * Reinitialize the BlockVector to + * contain as many blocks as there + * are Epetra_Maps given in the input + * argument, according to the + * parallel distribution of the + * individual components described + * in the maps. + * + * If fast==false, the vector + * is filled with zeros. + */ + void reinit (const std::vector ¶llel_partitioning, + const bool fast = false); + + /** + * Reinitialize the BlockVector to + * contain as many blocks as there + * are index sets given in the input + * argument, according to the + * parallel distribution of the + * individual components described + * in the maps. + * + * If fast==false, the vector + * is filled with zeros. + */ + void reinit (const std::vector ¶llel_partitioning, + const MPI_Comm &communicator = MPI_COMM_WORLD, + const bool fast = false); + + /** + * Change the dimension to that + * of the vector V. The same + * applies as for the other + * reinit() function. + * + * The elements of V are not + * copied, i.e. this function is + * the same as calling reinit + * (V.size(), fast). + * + * Note that you must call this + * (or the other reinit() + * functions) function, rather + * than calling the reinit() + * functions of an individual + * block, to allow the block + * vector to update its caches of + * vector sizes. If you call + * reinit() on one of the + * blocks, then subsequent + * actions on this object may + * yield unpredictable results + * since they may be routed to + * the wrong block. + */ + void reinit (const BlockVector &V, + const bool fast = false); + + /** + * Change the number of blocks to + * num_blocks. The individual + * blocks will get initialized with + * zero size, so it is assumed that + * the user resizes the + * individual blocks by herself + * in an appropriate way, and + * calls collect_sizes + * afterwards. + */ + void reinit (const unsigned int num_blocks); + + /** + * This reinit function is meant to + * be used for parallel + * calculations where some + * non-local data has to be + * used. The typical situation + * where one needs this function is + * the call of the + * FEValues::get_function_values + * function (or of some + * derivatives) in parallel. Since + * it is usually faster to retrieve + * the data in advance, this + * function can be called before + * the assembly forks out to the + * different processors. What this + * function does is the following: + * It takes the information in the + * columns of the given matrix and + * looks which data couples between + * the different processors. That + * data is then queried from the + * input vector. Note that you + * should not write to the + * resulting vector any more, since + * the some data can be stored + * several times on different + * processors, leading to + * unpredictable results. In + * particular, such a vector cannot + * be used for matrix-vector + * products as for example done + * during the solution of linear + * systems. + */ + void import_nonlocal_data_for_fe (const TrilinosWrappers::BlockSparseMatrix &m, + const BlockVector &v); + + + /** + * use compress(VectorOperation) instead + * + * @deprecated + * + * See @ref GlossCompress "Compressing + * distributed objects" for more + * information. + */ + void compress (const Epetra_CombineMode last_action); + + /** + * so it is not hidden + */ + using BlockVectorBase::compress; + + + /** + * Returns the state of the + * vector, i.e., whether + * compress() needs to be + * called after an operation + * requiring data + * exchange. Does only return + * non-true values when used in + * debug mode, since + * it is quite expensive to + * keep track of all operations + * that lead to the need for + * compress(). + */ + bool is_compressed () const; + + /** + * Swap the contents of this + * vector and the other vector + * v. One could do this + * operation with a temporary + * variable and copying over the + * data elements, but this + * function is significantly more + * efficient since it only swaps + * the pointers to the data of + * the two vectors and therefore + * does not need to allocate + * temporary storage and move + * data around. + * + * Limitation: right now this + * function only works if both + * vectors have the same number + * of blocks. If needed, the + * numbers of blocks should be + * exchanged, too. + * + * This function is analog to the + * the swap() function of all C++ + * standard containers. Also, + * there is a global function + * swap(u,v) that simply calls + * u.swap(v), again in analogy + * to standard functions. + */ + void swap (BlockVector &v); + + /** + * Print to a stream. + */ + void print (std::ostream &out, + const unsigned int precision = 3, + const bool scientific = true, + const bool across = true) const; + + /** + * Exception + */ + DeclException0 (ExcIteratorRangeDoesNotMatchVectorSize); + + /** + * Exception + */ + DeclException0 (ExcNonMatchingBlockVectors); }; diff --cc deal.II/include/deal.II/lac/trilinos_solver.h index a76f34d1e4,7460a835a8..57c3798338 --- a/deal.II/include/deal.II/lac/trilinos_solver.h +++ b/deal.II/include/deal.II/lac/trilinos_solver.h @@@ -37,202 -37,202 +37,202 @@@ namespace TrilinosWrapper class PreconditionBase; - /** - * Base class for solver classes using the Trilinos solvers. Since - * solvers in Trilinos are selected based on flags passed to a generic - * solver object, basically all the actual solver calls happen in this - * class, and derived classes simply set the right flags to select one - * solver or another, or to set certain parameters for individual - * solvers. For a general discussion on the Trilinos solver package - * AztecOO, we refer to the AztecOO - * user guide. - * - * This solver class can also be used as a standalone class, where the - * respective Krylov method is set via the flag - * solver_name. This can be done at runtime (e.g., when - * parsing the solver from a ParameterList) and is similar to the - * deal.II class SolverSelector. - * - * @ingroup TrilinosWrappers - * @author Martin Kronbichler, 2008, 2009 - */ + /** + * Base class for solver classes using the Trilinos solvers. Since + * solvers in Trilinos are selected based on flags passed to a generic + * solver object, basically all the actual solver calls happen in this + * class, and derived classes simply set the right flags to select one + * solver or another, or to set certain parameters for individual + * solvers. For a general discussion on the Trilinos solver package + * AztecOO, we refer to the AztecOO + * user guide. + * + * This solver class can also be used as a standalone class, where the + * respective Krylov method is set via the flag + * solver_name. This can be done at runtime (e.g., when + * parsing the solver from a ParameterList) and is similar to the + * deal.II class SolverSelector. + * + * @ingroup TrilinosWrappers + * @author Martin Kronbichler, 2008, 2009 + */ class SolverBase { - public: - - /** - * Enumeration object that is - * set in the constructor of - * the derived classes and - * tells Trilinos which solver - * to use. This option can also - * be set in the user program, - * so one might use this base - * class instead of one of the - * specialized derived classes - * when the solver should be - * set at runtime. Currently - * enabled options are: - */ - enum SolverName {cg, cgs, gmres, bicgstab, tfqmr} solver_name; - - /** - * Standardized data struct to - * pipe additional data to the - * solver. - */ - - struct AdditionalData - { - /** - * Sets the additional data field to - * the desired output format and puts - * the restart parameter in case the - * derived class is GMRES. - * - * TODO: Find a better way for - * setting the GMRES restart - * parameter since it is quite - * inelegant to set a specific option - * of one solver in the base class - * for all solvers. - */ - AdditionalData (const bool output_solver_details = false, - const unsigned int gmres_restart_parameter = 30); - - /** - * Enables/disables the output of - * solver details (residual in each - * iterations etc.). - */ - const bool output_solver_details; - - /** - * Restart parameter for GMRES - * solver. - */ - const unsigned int gmres_restart_parameter; - }; - - /** - * Constructor. Takes the - * solver control object and - * creates the solver. - */ - SolverBase (SolverControl &cn); - - /** - * Second constructor. This - * constructor takes an enum - * object that specifies the - * solver name and sets the - * appropriate Krylov - * method. - */ - SolverBase (const enum SolverName solver_name, - SolverControl &cn); - - /** - * Destructor. - */ - virtual ~SolverBase (); - - /** - * Solve the linear system - * Ax=b. Depending on - * the information provided by - * derived classes and the - * object passed as a - * preconditioner, one of the - * linear solvers and - * preconditioners of Trilinos - * is chosen. - */ - void - solve (const SparseMatrix &A, - VectorBase &x, - const VectorBase &b, - const PreconditionBase &preconditioner); - - /** - * Solve the linear system - * Ax=b. Depending on the - * information provided by derived - * classes and the object passed as a - * preconditioner, one of the linear - * solvers and preconditioners of - * Trilinos is chosen. This class - * works with matrices according to - * the TrilinosWrappers format, but - * can take deal.II vectors as - * argument. Since deal.II are serial - * vectors (not distributed), this - * function does only what you expect - * in case the matrix is locally - * owned. Otherwise, an exception - * will be thrown. - */ - void - solve (const SparseMatrix &A, - dealii::Vector &x, - const dealii::Vector &b, - const PreconditionBase &preconditioner); - - /** - * Access to object that controls - * convergence. - */ - SolverControl & control() const; - - /** - * Exception - */ - DeclException1 (ExcTrilinosError, - int, - << "An error with error number " << arg1 - << " occurred while calling a Trilinos function"); - - protected: - - /** - * Reference to the object that - * controls convergence of the - * iterative solver. In fact, - * for these Trilinos wrappers, - * Trilinos does so itself, but - * we copy the data from this - * object before starting the - * solution process, and copy - * the data back into it - * afterwards. - */ - SolverControl &solver_control; - - private: - - /** - * A structure that collects - * the Trilinos sparse matrix, - * the right hand side vector - * and the solution vector, - * which is passed down to the - * Trilinos solver. - */ - std_cxx1x::shared_ptr linear_problem; - - /** - * A structure that contains - * the Trilinos solver and - * preconditioner objects. - */ - AztecOO solver; - - /** - * Store a copy of the flags for this - * particular solver. - */ - const AdditionalData additional_data; + public: + + /** + * Enumeration object that is + * set in the constructor of + * the derived classes and + * tells Trilinos which solver + * to use. This option can also + * be set in the user program, + * so one might use this base + * class instead of one of the + * specialized derived classes + * when the solver should be + * set at runtime. Currently + * enabled options are: + */ + enum SolverName {cg, cgs, gmres, bicgstab, tfqmr} solver_name; + + /** + * Standardized data struct to + * pipe additional data to the + * solver. + */ + + struct AdditionalData + { + /** + * Sets the additional data field to + * the desired output format and puts + * the restart parameter in case the + * derived class is GMRES. + * + * TODO: Find a better way for + * setting the GMRES restart + * parameter since it is quite + * inelegant to set a specific option + * of one solver in the base class + * for all solvers. + */ + AdditionalData (const bool output_solver_details = false, + const unsigned int gmres_restart_parameter = 30); + + /** + * Enables/disables the output of + * solver details (residual in each + * iterations etc.). + */ + const bool output_solver_details; + + /** + * Restart parameter for GMRES + * solver. + */ + const unsigned int gmres_restart_parameter; + }; + + /** + * Constructor. Takes the + * solver control object and + * creates the solver. + */ - SolverBase (SolverControl &cn); ++ SolverBase (SolverControl &cn); + + /** + * Second constructor. This + * constructor takes an enum + * object that specifies the + * solver name and sets the + * appropriate Krylov + * method. + */ + SolverBase (const enum SolverName solver_name, + SolverControl &cn); + + /** + * Destructor. + */ + virtual ~SolverBase (); + + /** + * Solve the linear system + * Ax=b. Depending on + * the information provided by + * derived classes and the + * object passed as a + * preconditioner, one of the + * linear solvers and + * preconditioners of Trilinos + * is chosen. + */ + void + solve (const SparseMatrix &A, + VectorBase &x, + const VectorBase &b, + const PreconditionBase &preconditioner); + + /** + * Solve the linear system + * Ax=b. Depending on the + * information provided by derived + * classes and the object passed as a + * preconditioner, one of the linear + * solvers and preconditioners of + * Trilinos is chosen. This class + * works with matrices according to + * the TrilinosWrappers format, but + * can take deal.II vectors as + * argument. Since deal.II are serial + * vectors (not distributed), this + * function does only what you expect + * in case the matrix is locally + * owned. Otherwise, an exception + * will be thrown. + */ + void + solve (const SparseMatrix &A, + dealii::Vector &x, + const dealii::Vector &b, + const PreconditionBase &preconditioner); + + /** + * Access to object that controls + * convergence. + */ + SolverControl &control() const; + + /** + * Exception + */ + DeclException1 (ExcTrilinosError, + int, + << "An error with error number " << arg1 + << " occurred while calling a Trilinos function"); + + protected: + + /** + * Reference to the object that + * controls convergence of the + * iterative solver. In fact, + * for these Trilinos wrappers, + * Trilinos does so itself, but + * we copy the data from this + * object before starting the + * solution process, and copy + * the data back into it + * afterwards. + */ + SolverControl &solver_control; + + private: + + /** + * A structure that collects + * the Trilinos sparse matrix, + * the right hand side vector + * and the solution vector, + * which is passed down to the + * Trilinos solver. + */ + std_cxx1x::shared_ptr linear_problem; + + /** + * A structure that contains + * the Trilinos solver and + * preconditioner objects. + */ + AztecOO solver; + + /** + * Store a copy of the flags for this + * particular solver. + */ + const AdditionalData additional_data; }; @@@ -517,140 -517,140 +517,140 @@@ - /** - * An implementation of the Trilinos KLU direct solver (using the Amesos - * package). - * - * @ingroup TrilinosWrappers - * @author Martin Kronbichler, 2009 - */ + /** + * An implementation of the Trilinos KLU direct solver (using the Amesos + * package). + * + * @ingroup TrilinosWrappers + * @author Martin Kronbichler, 2009 + */ class SolverDirect { - public: - - /** - * Standardized data struct to - * pipe additional data to the - * solver. - */ - - struct AdditionalData - { - /** - * Sets the additional data field to - * the desired output format. - */ - AdditionalData (const bool output_solver_details = false); - - /** - * Enables/disables the output of - * solver details (residual in each - * iterations etc.). - */ - bool output_solver_details; - }; - - /** - * Constructor. Takes the - * solver control object and - * creates the solver. - */ - SolverDirect (SolverControl &cn, - const AdditionalData &data = AdditionalData()); - - /** - * Destructor. - */ - virtual ~SolverDirect (); - - /** - * Solve the linear system - * Ax=b. Creates a KLU - * factorization of the matrix and - * performs the solve. Note that - * there is no need for a - * preconditioner here. - */ - void - solve (const SparseMatrix &A, - VectorBase &x, - const VectorBase &b); - - /** - * Solve the linear system - * Ax=b. Depending on the - * information provided by derived - * classes and the object passed as a - * preconditioner, one of the linear - * solvers and preconditioners of - * Trilinos is chosen. This class - * works with matrices according to - * the TrilinosWrappers format, but - * can take deal.II vectors as - * argument. Since deal.II are serial - * vectors (not distributed), this - * function does only what you expect - * in case the matrix is locally - * owned. Otherwise, an exception - * will be thrown. - */ - void - solve (const SparseMatrix &A, - dealii::Vector &x, - const dealii::Vector &b); - - /** - * Access to object that controls - * convergence. - */ - SolverControl & control() const; - - /** - * Exception - */ - DeclException1 (ExcTrilinosError, - int, - << "An error with error number " << arg1 - << " occurred while calling a Trilinos function"); - - private: - - /** - * Reference to the object that - * controls convergence of the - * iterative solver. In fact, - * for these Trilinos wrappers, - * Trilinos does so itself, but - * we copy the data from this - * object before starting the - * solution process, and copy - * the data back into it - * afterwards. - */ - SolverControl &solver_control; - - /** - * A structure that collects - * the Trilinos sparse matrix, - * the right hand side vector - * and the solution vector, - * which is passed down to the - * Trilinos solver. - */ - std_cxx1x::shared_ptr linear_problem; - - /** - * A structure that contains - * the Trilinos solver and - * preconditioner objects. - */ - std_cxx1x::shared_ptr solver; - - /** - * Store a copy of the flags for this - * particular solver. - */ - const AdditionalData additional_data; + public: + + /** + * Standardized data struct to + * pipe additional data to the + * solver. + */ + + struct AdditionalData + { + /** + * Sets the additional data field to + * the desired output format. + */ + AdditionalData (const bool output_solver_details = false); + + /** + * Enables/disables the output of + * solver details (residual in each + * iterations etc.). + */ + bool output_solver_details; + }; + + /** + * Constructor. Takes the + * solver control object and + * creates the solver. + */ - SolverDirect (SolverControl &cn, ++ SolverDirect (SolverControl &cn, + const AdditionalData &data = AdditionalData()); + + /** + * Destructor. + */ + virtual ~SolverDirect (); + + /** + * Solve the linear system + * Ax=b. Creates a KLU + * factorization of the matrix and + * performs the solve. Note that + * there is no need for a + * preconditioner here. + */ + void + solve (const SparseMatrix &A, + VectorBase &x, + const VectorBase &b); + + /** + * Solve the linear system + * Ax=b. Depending on the + * information provided by derived + * classes and the object passed as a + * preconditioner, one of the linear + * solvers and preconditioners of + * Trilinos is chosen. This class + * works with matrices according to + * the TrilinosWrappers format, but + * can take deal.II vectors as + * argument. Since deal.II are serial + * vectors (not distributed), this + * function does only what you expect + * in case the matrix is locally + * owned. Otherwise, an exception + * will be thrown. + */ + void + solve (const SparseMatrix &A, + dealii::Vector &x, + const dealii::Vector &b); + + /** + * Access to object that controls + * convergence. + */ + SolverControl &control() const; + + /** + * Exception + */ + DeclException1 (ExcTrilinosError, + int, + << "An error with error number " << arg1 + << " occurred while calling a Trilinos function"); + + private: + + /** + * Reference to the object that + * controls convergence of the + * iterative solver. In fact, + * for these Trilinos wrappers, + * Trilinos does so itself, but + * we copy the data from this + * object before starting the + * solution process, and copy + * the data back into it + * afterwards. + */ + SolverControl &solver_control; + + /** + * A structure that collects + * the Trilinos sparse matrix, + * the right hand side vector + * and the solution vector, + * which is passed down to the + * Trilinos solver. + */ + std_cxx1x::shared_ptr linear_problem; + + /** + * A structure that contains + * the Trilinos solver and + * preconditioner objects. + */ + std_cxx1x::shared_ptr solver; + + /** + * Store a copy of the flags for this + * particular solver. + */ + const AdditionalData additional_data; }; diff --cc deal.II/include/deal.II/lac/trilinos_sparse_matrix.h index 6cae9dc445,57f9da6ce7..e522461661 --- a/deal.II/include/deal.II/lac/trilinos_sparse_matrix.h +++ b/deal.II/include/deal.II/lac/trilinos_sparse_matrix.h @@@ -272,1673 -272,1673 +272,1673 @@@ namespace TrilinosWrapper } - /** - * This class implements a wrapper to use the Trilinos distributed - * sparse matrix class Epetra_FECrsMatrix. This is precisely the kind of - * matrix we deal with all the time - we most likely get it from some - * assembly process, where also entries not locally owned might need to - * be written and hence need to be forwarded to the owner process. This - * class is designed to be used in a distributed memory architecture - * with an MPI compiler on the bottom, but works equally well also for - * serial processes. The only requirement for this class to work is that - * Trilinos has been installed with the same compiler as is used for - * generating deal.II. - * - * The interface of this class is modeled after the existing - * SparseMatrix class in deal.II. It has almost the same member - * functions, and is often exchangable. However, since Trilinos only - * supports a single scalar type (double), it is not templated, and only - * works with doubles. - * - * Note that Trilinos only guarantees that operations do what you expect - * if the functions @p GlobalAssemble has been called after matrix - * assembly. Therefore, you need to call SparseMatrix::compress() - * before you actually use the matrix. This also calls @p FillComplete - * that compresses the storage format for sparse matrices by discarding - * unused elements. Trilinos allows to continue with assembling the - * matrix after calls to these functions, though. - * - * @ingroup TrilinosWrappers - * @ingroup Matrix1 - * @author Martin Kronbichler, Wolfgang Bangerth, 2008, 2009 - */ + /** + * This class implements a wrapper to use the Trilinos distributed + * sparse matrix class Epetra_FECrsMatrix. This is precisely the kind of + * matrix we deal with all the time - we most likely get it from some + * assembly process, where also entries not locally owned might need to + * be written and hence need to be forwarded to the owner process. This + * class is designed to be used in a distributed memory architecture + * with an MPI compiler on the bottom, but works equally well also for + * serial processes. The only requirement for this class to work is that + * Trilinos has been installed with the same compiler as is used for + * generating deal.II. + * + * The interface of this class is modeled after the existing + * SparseMatrix class in deal.II. It has almost the same member + * functions, and is often exchangable. However, since Trilinos only + * supports a single scalar type (double), it is not templated, and only + * works with doubles. + * + * Note that Trilinos only guarantees that operations do what you expect + * if the functions @p GlobalAssemble has been called after matrix + * assembly. Therefore, you need to call SparseMatrix::compress() + * before you actually use the matrix. This also calls @p FillComplete + * that compresses the storage format for sparse matrices by discarding + * unused elements. Trilinos allows to continue with assembling the + * matrix after calls to these functions, though. + * + * @ingroup TrilinosWrappers + * @ingroup Matrix1 + * @author Martin Kronbichler, Wolfgang Bangerth, 2008, 2009 + */ class SparseMatrix : public Subscriptor { - public: - /** - * A structure that describes - * some of the traits of this - * class in terms of its run-time - * behavior. Some other classes - * (such as the block matrix - * classes) that take one or - * other of the matrix classes as - * its template parameters can - * tune their behavior based on - * the variables in this class. - */ - struct Traits - { - /** - * It is safe to elide additions - * of zeros to individual - * elements of this matrix. - */ - static const bool zero_addition_can_be_elided = true; - }; + public: + /** + * A structure that describes + * some of the traits of this + * class in terms of its run-time + * behavior. Some other classes + * (such as the block matrix + * classes) that take one or + * other of the matrix classes as + * its template parameters can + * tune their behavior based on + * the variables in this class. + */ + struct Traits + { + /** + * It is safe to elide additions + * of zeros to individual + * elements of this matrix. + */ + static const bool zero_addition_can_be_elided = true; + }; - /** - * Declare a typedef for the - * iterator class. - */ - typedef MatrixIterators::const_iterator const_iterator; - - /** - * Declare a typedef in analogy - * to all the other container - * classes. - */ - typedef TrilinosScalar value_type; - - /** - * @name Constructors and initalization. - */ + /** + * Declare a typedef for the + * iterator class. + */ + typedef MatrixIterators::const_iterator const_iterator; + + /** + * Declare a typedef in analogy + * to all the other container + * classes. + */ + typedef TrilinosScalar value_type; + + /** + * @name Constructors and initalization. + */ //@{ - /** - * Default constructor. Generates - * an empty (zero-size) matrix. - */ - SparseMatrix (); - - /** - * Generate a matrix that is completely - * stored locally, having #m rows and - * #n columns. - * - * The number of columns entries per - * row is specified as the maximum - * number of entries argument. - */ - SparseMatrix (const unsigned int m, - const unsigned int n, - const unsigned int n_max_entries_per_row); - - /** - * Generate a matrix that is completely - * stored locally, having #m rows and - * #n columns. - * - * The vector - * n_entries_per_row - * specifies the number of entries in - * each row. - */ - SparseMatrix (const unsigned int m, - const unsigned int n, - const std::vector &n_entries_per_row); - - /** - * Generate a matrix from a Trilinos - * sparsity pattern object. - */ - SparseMatrix (const SparsityPattern &InputSparsityPattern); - - /** - * Copy constructor. Sets the - * calling matrix to be the same - * as the input matrix, i.e., - * using the same sparsity - * pattern and entries. - */ - SparseMatrix (const SparseMatrix &InputMatrix); - - /** - * Destructor. Made virtual so - * that one can use pointers to - * this class. - */ - virtual ~SparseMatrix (); - - /** - * This function initializes the - * Trilinos matrix with a deal.II - * sparsity pattern, i.e. it makes - * the Trilinos Epetra matrix know - * the position of nonzero entries - * according to the sparsity - * pattern. This function is meant - * for use in serial programs, where - * there is no need to specify how - * the matrix is going to be - * distributed among different - * processors. This function works in - * %parallel, too, but it is - * recommended to manually specify - * the %parallel partioning of the - * matrix using an Epetra_Map. When - * run in %parallel, it is currently - * necessary that each processor - * holds the sparsity_pattern - * structure because each processor - * sets its rows. - * - * This is a collective operation - * that needs to be called on all - * processors in order to avoid a - * dead lock. - */ - template - void reinit (const SparsityType &sparsity_pattern); - - /** - * This function reinitializes the - * Trilinos sparse matrix from a - * (possibly distributed) Trilinos - * sparsity pattern. - * - * This is a collective operation - * that needs to be called on all - * processors in order to avoid a - * dead lock. - */ - void reinit (const SparsityPattern &sparsity_pattern); - - /** - * This function copies the content - * in sparse_matrix to the - * calling matrix. - * - * This is a collective operation - * that needs to be called on all - * processors in order to avoid a - * dead lock. - */ - void reinit (const SparseMatrix &sparse_matrix); - - /** - * This function initializes the - * Trilinos matrix using the deal.II - * sparse matrix and the entries - * stored therein. It uses a - * threshold to copy only elements - * with modulus larger than the - * threshold (so zeros in the deal.II - * matrix can be filtered away). - * - * The optional parameter - * copy_values decides - * whether only the sparsity - * structure of the input matrix - * should be used or the matrix - * entries should be copied, too. - * - * This is a collective operation - * that needs to be called on all - * processors in order to avoid a - * dead lock. - */ - template - void reinit (const ::dealii::SparseMatrix &dealii_sparse_matrix, - const double drop_tolerance=1e-13, - const bool copy_values=true, - const ::dealii::SparsityPattern *use_this_sparsity=0); - - /** - * This reinit function takes as - * input a Trilinos Epetra_CrsMatrix - * and copies its sparsity - * pattern. If so requested, even the - * content (values) will be copied. - */ - void reinit (const Epetra_CrsMatrix &input_matrix, - const bool copy_values = true); + /** + * Default constructor. Generates + * an empty (zero-size) matrix. + */ + SparseMatrix (); + + /** + * Generate a matrix that is completely + * stored locally, having #m rows and + * #n columns. + * + * The number of columns entries per + * row is specified as the maximum + * number of entries argument. + */ + SparseMatrix (const unsigned int m, + const unsigned int n, + const unsigned int n_max_entries_per_row); + + /** + * Generate a matrix that is completely + * stored locally, having #m rows and + * #n columns. + * + * The vector + * n_entries_per_row + * specifies the number of entries in + * each row. + */ + SparseMatrix (const unsigned int m, + const unsigned int n, + const std::vector &n_entries_per_row); + + /** + * Generate a matrix from a Trilinos + * sparsity pattern object. + */ + SparseMatrix (const SparsityPattern &InputSparsityPattern); + + /** + * Copy constructor. Sets the + * calling matrix to be the same + * as the input matrix, i.e., + * using the same sparsity + * pattern and entries. + */ + SparseMatrix (const SparseMatrix &InputMatrix); + + /** + * Destructor. Made virtual so + * that one can use pointers to + * this class. + */ + virtual ~SparseMatrix (); + + /** + * This function initializes the + * Trilinos matrix with a deal.II + * sparsity pattern, i.e. it makes + * the Trilinos Epetra matrix know + * the position of nonzero entries + * according to the sparsity + * pattern. This function is meant + * for use in serial programs, where + * there is no need to specify how + * the matrix is going to be + * distributed among different + * processors. This function works in + * %parallel, too, but it is + * recommended to manually specify + * the %parallel partioning of the + * matrix using an Epetra_Map. When + * run in %parallel, it is currently + * necessary that each processor + * holds the sparsity_pattern + * structure because each processor + * sets its rows. + * + * This is a collective operation + * that needs to be called on all + * processors in order to avoid a + * dead lock. + */ + template + void reinit (const SparsityType &sparsity_pattern); + + /** + * This function reinitializes the + * Trilinos sparse matrix from a + * (possibly distributed) Trilinos + * sparsity pattern. + * + * This is a collective operation + * that needs to be called on all + * processors in order to avoid a + * dead lock. + */ + void reinit (const SparsityPattern &sparsity_pattern); + + /** + * This function copies the content + * in sparse_matrix to the + * calling matrix. + * + * This is a collective operation + * that needs to be called on all + * processors in order to avoid a + * dead lock. + */ + void reinit (const SparseMatrix &sparse_matrix); + + /** + * This function initializes the + * Trilinos matrix using the deal.II + * sparse matrix and the entries + * stored therein. It uses a + * threshold to copy only elements + * with modulus larger than the + * threshold (so zeros in the deal.II + * matrix can be filtered away). + * + * The optional parameter + * copy_values decides + * whether only the sparsity + * structure of the input matrix + * should be used or the matrix + * entries should be copied, too. + * + * This is a collective operation + * that needs to be called on all + * processors in order to avoid a + * dead lock. + */ + template + void reinit (const ::dealii::SparseMatrix &dealii_sparse_matrix, + const double drop_tolerance=1e-13, + const bool copy_values=true, + const ::dealii::SparsityPattern *use_this_sparsity=0); + + /** + * This reinit function takes as + * input a Trilinos Epetra_CrsMatrix + * and copies its sparsity + * pattern. If so requested, even the + * content (values) will be copied. + */ + void reinit (const Epetra_CrsMatrix &input_matrix, + const bool copy_values = true); //@} - /** - * @name Constructors and initialization using an Epetra_Map description - */ + /** + * @name Constructors and initialization using an Epetra_Map description + */ //@{ - /** - * Constructor using an Epetra_Map to - * describe the %parallel - * partitioning. The parameter @p - * n_max_entries_per_row sets the - * number of nonzero entries in each - * row that will be allocated. Note - * that this number does not need to - * be exact, and it is even allowed - * that the actual matrix structure - * has more nonzero entries than - * specified in the - * constructor. However it is still - * advantageous to provide good - * estimates here since this will - * considerably increase the - * performance of the matrix - * setup. However, there is no effect - * in the performance of - * matrix-vector products, since - * Trilinos reorganizes the matrix - * memory prior to use (in the - * compress() step). - */ - SparseMatrix (const Epetra_Map ¶llel_partitioning, - const unsigned int n_max_entries_per_row = 0); - - /** - * Same as before, but now set a - * value of nonzeros for each matrix - * row. Since we know the number of - * elements in the matrix exactly in - * this case, we can already allocate - * the right amount of memory, which - * makes the creation process - * including the insertion of nonzero - * elements by the respective - * SparseMatrix::reinit call - * considerably faster. - */ - SparseMatrix (const Epetra_Map ¶llel_partitioning, - const std::vector &n_entries_per_row); - - /** - * This constructor is similar to the - * one above, but it now takes two - * different Epetra maps for rows and - * columns. This interface is meant - * to be used for generating - * rectangular matrices, where one - * map describes the %parallel - * partitioning of the dofs - * associated with the matrix rows - * and the other one the partitioning - * of dofs in the matrix - * columns. Note that there is no - * real parallelism along the columns - * – the processor that owns a - * certain row always owns all the - * column elements, no matter how far - * they might be spread out. The - * second Epetra_Map is only used to - * specify the number of columns and - * for internal arragements when - * doing matrix-vector products with - * vectors based on that column map. - * - * The integer input @p - * n_max_entries_per_row defines the - * number of columns entries per row - * that will be allocated. - */ - SparseMatrix (const Epetra_Map &row_parallel_partitioning, - const Epetra_Map &col_parallel_partitioning, - const unsigned int n_max_entries_per_row = 0); - - /** - * This constructor is similar to the - * one above, but it now takes two - * different Epetra maps for rows and - * columns. This interface is meant - * to be used for generating - * rectangular matrices, where one - * map specifies the %parallel - * distribution of degrees of freedom - * associated with matrix rows and - * the second one specifies the - * %parallel distribution the dofs - * associated with columns in the - * matrix. The second map also - * provides information for the - * internal arrangement in matrix - * vector products (i.e., the - * distribution of vector this matrix - * is to be multiplied with), but is - * not used for the distribution of - * the columns – rather, all - * column elements of a row are - * stored on the same processor in - * any case. The vector - * n_entries_per_row - * specifies the number of entries in - * each row of the newly generated - * matrix. - */ - SparseMatrix (const Epetra_Map &row_parallel_partitioning, - const Epetra_Map &col_parallel_partitioning, - const std::vector &n_entries_per_row); - - /** - * This function is initializes the - * Trilinos Epetra matrix according to - * the specified sparsity_pattern, and - * also reassigns the matrix rows to - * different processes according to a - * user-supplied Epetra map. In - * programs following the style of the - * tutorial programs, this function - * (and the respective call for a - * rectangular matrix) are the natural - * way to initialize the matrix size, - * its distribution among the MPI - * processes (if run in %parallel) as - * well as the locatoin of non-zero - * elements. Trilinos stores the - * sparsity pattern internally, so it - * won't be needed any more after this - * call, in contrast to the deal.II own - * object. The optional argument @p - * exchange_data can be used for - * reinitialization with a sparsity - * pattern that is not fully - * constructed. This feature is only - * implemented for input sparsity - * patterns of type - * CompressedSimpleSparsityPattern. If - * the flag is not set, each processor - * just sets the elements in the - * sparsity pattern that belong to its - * rows. - * - * This is a collective operation - * that needs to be called on all - * processors in order to avoid a - * dead lock. - */ - template - void reinit (const Epetra_Map ¶llel_partitioning, - const SparsityType &sparsity_pattern, - const bool exchange_data = false); - - /** - * This function is similar to the - * other initialization function - * above, but now also reassigns the - * matrix rows and columns according - * to two user-supplied Epetra maps. - * To be used for rectangular - * matrices. The optional argument @p - * exchange_data can be used for - * reinitialization with a sparsity - * pattern that is not fully - * constructed. This feature is only - * implemented for input sparsity - * patterns of type - * CompressedSimpleSparsityPattern. - * - * This is a collective operation - * that needs to be called on all - * processors in order to avoid a - * dead lock. - */ - template - void reinit (const Epetra_Map &row_parallel_partitioning, - const Epetra_Map &col_parallel_partitioning, - const SparsityType &sparsity_pattern, - const bool exchange_data = false); - - /** - * This function initializes the - * Trilinos matrix using the deal.II - * sparse matrix and the entries - * stored therein. It uses a - * threshold to copy only elements - * with modulus larger than the - * threshold (so zeros in the deal.II - * matrix can be filtered away). In - * contrast to the other reinit - * function with deal.II sparse - * matrix argument, this function - * takes a %parallel partitioning - * specified by the user instead of - * internally generating it. - * - * The optional parameter - * copy_values decides - * whether only the sparsity - * structure of the input matrix - * should be used or the matrix - * entries should be copied, too. - * - * This is a collective operation - * that needs to be called on all - * processors in order to avoid a - * dead lock. - */ - template - void reinit (const Epetra_Map ¶llel_partitioning, - const ::dealii::SparseMatrix &dealii_sparse_matrix, - const double drop_tolerance=1e-13, - const bool copy_values=true, - const ::dealii::SparsityPattern *use_this_sparsity=0); - - /** - * This function is similar to the - * other initialization function with - * deal.II sparse matrix input above, - * but now takes Epetra maps for both - * the rows and the columns of the - * matrix. Chosen for rectangular - * matrices. - * - * The optional parameter - * copy_values decides - * whether only the sparsity - * structure of the input matrix - * should be used or the matrix - * entries should be copied, too. - * - * This is a collective operation - * that needs to be called on all - * processors in order to avoid a - * dead lock. - */ - template - void reinit (const Epetra_Map &row_parallel_partitioning, - const Epetra_Map &col_parallel_partitioning, - const ::dealii::SparseMatrix &dealii_sparse_matrix, - const double drop_tolerance=1e-13, - const bool copy_values=true, - const ::dealii::SparsityPattern *use_this_sparsity=0); + /** + * Constructor using an Epetra_Map to + * describe the %parallel + * partitioning. The parameter @p + * n_max_entries_per_row sets the + * number of nonzero entries in each + * row that will be allocated. Note + * that this number does not need to + * be exact, and it is even allowed + * that the actual matrix structure + * has more nonzero entries than + * specified in the + * constructor. However it is still + * advantageous to provide good + * estimates here since this will + * considerably increase the + * performance of the matrix + * setup. However, there is no effect + * in the performance of + * matrix-vector products, since + * Trilinos reorganizes the matrix + * memory prior to use (in the + * compress() step). + */ + SparseMatrix (const Epetra_Map ¶llel_partitioning, + const unsigned int n_max_entries_per_row = 0); + + /** + * Same as before, but now set a + * value of nonzeros for each matrix + * row. Since we know the number of + * elements in the matrix exactly in + * this case, we can already allocate + * the right amount of memory, which + * makes the creation process + * including the insertion of nonzero + * elements by the respective + * SparseMatrix::reinit call + * considerably faster. + */ + SparseMatrix (const Epetra_Map ¶llel_partitioning, + const std::vector &n_entries_per_row); + + /** + * This constructor is similar to the + * one above, but it now takes two + * different Epetra maps for rows and + * columns. This interface is meant + * to be used for generating + * rectangular matrices, where one + * map describes the %parallel + * partitioning of the dofs + * associated with the matrix rows + * and the other one the partitioning + * of dofs in the matrix + * columns. Note that there is no + * real parallelism along the columns + * – the processor that owns a + * certain row always owns all the + * column elements, no matter how far + * they might be spread out. The + * second Epetra_Map is only used to + * specify the number of columns and + * for internal arragements when + * doing matrix-vector products with + * vectors based on that column map. + * + * The integer input @p + * n_max_entries_per_row defines the + * number of columns entries per row + * that will be allocated. + */ + SparseMatrix (const Epetra_Map &row_parallel_partitioning, + const Epetra_Map &col_parallel_partitioning, + const unsigned int n_max_entries_per_row = 0); + + /** + * This constructor is similar to the + * one above, but it now takes two + * different Epetra maps for rows and + * columns. This interface is meant + * to be used for generating + * rectangular matrices, where one + * map specifies the %parallel + * distribution of degrees of freedom + * associated with matrix rows and + * the second one specifies the + * %parallel distribution the dofs + * associated with columns in the + * matrix. The second map also + * provides information for the + * internal arrangement in matrix + * vector products (i.e., the + * distribution of vector this matrix + * is to be multiplied with), but is + * not used for the distribution of + * the columns – rather, all + * column elements of a row are + * stored on the same processor in + * any case. The vector + * n_entries_per_row + * specifies the number of entries in + * each row of the newly generated + * matrix. + */ + SparseMatrix (const Epetra_Map &row_parallel_partitioning, + const Epetra_Map &col_parallel_partitioning, + const std::vector &n_entries_per_row); + + /** + * This function is initializes the + * Trilinos Epetra matrix according to + * the specified sparsity_pattern, and + * also reassigns the matrix rows to + * different processes according to a + * user-supplied Epetra map. In + * programs following the style of the + * tutorial programs, this function + * (and the respective call for a + * rectangular matrix) are the natural + * way to initialize the matrix size, + * its distribution among the MPI + * processes (if run in %parallel) as + * well as the locatoin of non-zero + * elements. Trilinos stores the + * sparsity pattern internally, so it + * won't be needed any more after this + * call, in contrast to the deal.II own + * object. The optional argument @p + * exchange_data can be used for + * reinitialization with a sparsity + * pattern that is not fully + * constructed. This feature is only + * implemented for input sparsity + * patterns of type + * CompressedSimpleSparsityPattern. If + * the flag is not set, each processor + * just sets the elements in the + * sparsity pattern that belong to its + * rows. + * + * This is a collective operation + * that needs to be called on all + * processors in order to avoid a + * dead lock. + */ + template + void reinit (const Epetra_Map ¶llel_partitioning, - const SparsityType &sparsity_pattern, ++ const SparsityType &sparsity_pattern, + const bool exchange_data = false); + + /** + * This function is similar to the + * other initialization function + * above, but now also reassigns the + * matrix rows and columns according + * to two user-supplied Epetra maps. + * To be used for rectangular + * matrices. The optional argument @p + * exchange_data can be used for + * reinitialization with a sparsity + * pattern that is not fully + * constructed. This feature is only + * implemented for input sparsity + * patterns of type + * CompressedSimpleSparsityPattern. + * + * This is a collective operation + * that needs to be called on all + * processors in order to avoid a + * dead lock. + */ + template + void reinit (const Epetra_Map &row_parallel_partitioning, + const Epetra_Map &col_parallel_partitioning, - const SparsityType &sparsity_pattern, ++ const SparsityType &sparsity_pattern, + const bool exchange_data = false); + + /** + * This function initializes the + * Trilinos matrix using the deal.II + * sparse matrix and the entries + * stored therein. It uses a + * threshold to copy only elements + * with modulus larger than the + * threshold (so zeros in the deal.II + * matrix can be filtered away). In + * contrast to the other reinit + * function with deal.II sparse + * matrix argument, this function + * takes a %parallel partitioning + * specified by the user instead of + * internally generating it. + * + * The optional parameter + * copy_values decides + * whether only the sparsity + * structure of the input matrix + * should be used or the matrix + * entries should be copied, too. + * + * This is a collective operation + * that needs to be called on all + * processors in order to avoid a + * dead lock. + */ + template + void reinit (const Epetra_Map ¶llel_partitioning, + const ::dealii::SparseMatrix &dealii_sparse_matrix, + const double drop_tolerance=1e-13, + const bool copy_values=true, + const ::dealii::SparsityPattern *use_this_sparsity=0); + + /** + * This function is similar to the + * other initialization function with + * deal.II sparse matrix input above, + * but now takes Epetra maps for both + * the rows and the columns of the + * matrix. Chosen for rectangular + * matrices. + * + * The optional parameter + * copy_values decides + * whether only the sparsity + * structure of the input matrix + * should be used or the matrix + * entries should be copied, too. + * + * This is a collective operation + * that needs to be called on all + * processors in order to avoid a + * dead lock. + */ + template + void reinit (const Epetra_Map &row_parallel_partitioning, + const Epetra_Map &col_parallel_partitioning, - const ::dealii::SparseMatrix &dealii_sparse_matrix, ++ const ::dealii::SparseMatrix &dealii_sparse_matrix, + const double drop_tolerance=1e-13, + const bool copy_values=true, + const ::dealii::SparsityPattern *use_this_sparsity=0); //@} - /** - * @name Constructors and initialization using an IndexSet description - */ + /** + * @name Constructors and initialization using an IndexSet description + */ //@{ - /** - * Constructor using an IndexSet and - * an MPI communicator to describe - * the %parallel partitioning. The - * parameter @p n_max_entries_per_row - * sets the number of nonzero entries - * in each row that will be - * allocated. Note that this number - * does not need to be exact, and it - * is even allowed that the actual - * matrix structure has more nonzero - * entries than specified in the - * constructor. However it is still - * advantageous to provide good - * estimates here since this will - * considerably increase the - * performance of the matrix - * setup. However, there is no effect - * in the performance of - * matrix-vector products, since - * Trilinos reorganizes the matrix - * memory prior to use (in the - * compress() step). - */ - SparseMatrix (const IndexSet ¶llel_partitioning, - const MPI_Comm &communicator = MPI_COMM_WORLD, - const unsigned int n_max_entries_per_row = 0); - - /** - * Same as before, but now set the - * number of nonzeros in each matrix - * row separately. Since we know the - * number of elements in the matrix - * exactly in this case, we can - * already allocate the right amount - * of memory, which makes the - * creation process including the - * insertion of nonzero elements by - * the respective - * SparseMatrix::reinit call - * considerably faster. - */ - SparseMatrix (const IndexSet ¶llel_partitioning, - const MPI_Comm &communicator, - const std::vector &n_entries_per_row); - - /** - * This constructor is similar to the - * one above, but it now takes two - * different IndexSet partitions for - * row and columns. This interface is - * meant to be used for generating - * rectangular matrices, where the - * first index set describes the - * %parallel partitioning of the - * degrees of freedom associated with - * the matrix rows and the second one - * the partitioning of the matrix - * columns. The second index set - * specifies the partitioning of the - * vectors this matrix is to be - * multiplied with, not the - * distribution of the elements that - * actually appear in the matrix. - * - * The parameter @p - * n_max_entries_per_row defines how - * much memory will be allocated for - * each row. This number does not - * need to be accurate, as the - * structure is reorganized in the - * compress() call. - */ - SparseMatrix (const IndexSet &row_parallel_partitioning, - const IndexSet &col_parallel_partitioning, - const MPI_Comm &communicator = MPI_COMM_WORLD, - const unsigned int n_max_entries_per_row = 0); - - /** - * This constructor is similar to the - * one above, but it now takes two - * different Epetra maps for rows and - * columns. This interface is meant - * to be used for generating - * rectangular matrices, where one - * map specifies the %parallel - * distribution of degrees of freedom - * associated with matrix rows and - * the second one specifies the - * %parallel distribution the dofs - * associated with columns in the - * matrix. The second map also - * provides information for the - * internal arrangement in matrix - * vector products (i.e., the - * distribution of vector this matrix - * is to be multiplied with), but is - * not used for the distribution of - * the columns – rather, all - * column elements of a row are - * stored on the same processor in - * any case. The vector - * n_entries_per_row - * specifies the number of entries in - * each row of the newly generated - * matrix. - */ - SparseMatrix (const IndexSet &row_parallel_partitioning, - const IndexSet &col_parallel_partitioning, - const MPI_Comm &communicator, - const std::vector &n_entries_per_row); - - /** - * This function is initializes the - * Trilinos Epetra matrix according - * to the specified sparsity_pattern, - * and also reassigns the matrix rows - * to different processes according - * to a user-supplied index set and - * %parallel communicator. In - * programs following the style of - * the tutorial programs, this - * function (and the respective call - * for a rectangular matrix) are the - * natural way to initialize the - * matrix size, its distribution - * among the MPI processes (if run in - * %parallel) as well as the locatoin - * of non-zero elements. Trilinos - * stores the sparsity pattern - * internally, so it won't be needed - * any more after this call, in - * contrast to the deal.II own - * object. The optional argument @p - * exchange_data can be used for - * reinitialization with a sparsity - * pattern that is not fully - * constructed. This feature is only - * implemented for input sparsity - * patterns of type - * CompressedSimpleSparsityPattern. If - * the flag is not set, each - * processor just sets the elements - * in the sparsity pattern that - * belong to its rows. - * - * This is a collective operation - * that needs to be called on all - * processors in order to avoid a - * dead lock. - */ - template - void reinit (const IndexSet ¶llel_partitioning, - const SparsityType &sparsity_pattern, - const MPI_Comm &communicator = MPI_COMM_WORLD, - const bool exchange_data = false); - - /** - * This function is similar to the - * other initialization function - * above, but now also reassigns the - * matrix rows and columns according - * to two user-supplied index sets. - * To be used for rectangular - * matrices. The optional argument @p - * exchange_data can be used for - * reinitialization with a sparsity - * pattern that is not fully - * constructed. This feature is only - * implemented for input sparsity - * patterns of type - * CompressedSimpleSparsityPattern. - * - * This is a collective operation - * that needs to be called on all - * processors in order to avoid a - * dead lock. - */ - template - void reinit (const IndexSet &row_parallel_partitioning, - const IndexSet &col_parallel_partitioning, - const SparsityType &sparsity_pattern, - const MPI_Comm &communicator = MPI_COMM_WORLD, - const bool exchange_data = false); - - /** - * This function initializes the - * Trilinos matrix using the deal.II - * sparse matrix and the entries - * stored therein. It uses a - * threshold to copy only elements - * with modulus larger than the - * threshold (so zeros in the deal.II - * matrix can be filtered away). In - * contrast to the other reinit - * function with deal.II sparse - * matrix argument, this function - * takes a %parallel partitioning - * specified by the user instead of - * internally generating it. - * - * The optional parameter - * copy_values decides - * whether only the sparsity - * structure of the input matrix - * should be used or the matrix - * entries should be copied, too. - * - * This is a collective operation - * that needs to be called on all - * processors in order to avoid a - * dead lock. - */ - template - void reinit (const IndexSet ¶llel_partitioning, - const ::dealii::SparseMatrix &dealii_sparse_matrix, - const MPI_Comm &communicator = MPI_COMM_WORLD, - const double drop_tolerance=1e-13, - const bool copy_values=true, - const ::dealii::SparsityPattern *use_this_sparsity=0); - - /** - * This function is similar to the - * other initialization function with - * deal.II sparse matrix input above, - * but now takes index sets for both - * the rows and the columns of the - * matrix. Chosen for rectangular - * matrices. - * - * The optional parameter - * copy_values decides - * whether only the sparsity - * structure of the input matrix - * should be used or the matrix - * entries should be copied, too. - * - * This is a collective operation - * that needs to be called on all - * processors in order to avoid a - * dead lock. - */ - template - void reinit (const IndexSet &row_parallel_partitioning, - const IndexSet &col_parallel_partitioning, - const ::dealii::SparseMatrix &dealii_sparse_matrix, - const MPI_Comm &communicator = MPI_COMM_WORLD, - const double drop_tolerance=1e-13, - const bool copy_values=true, - const ::dealii::SparsityPattern *use_this_sparsity=0); + /** + * Constructor using an IndexSet and + * an MPI communicator to describe + * the %parallel partitioning. The + * parameter @p n_max_entries_per_row + * sets the number of nonzero entries + * in each row that will be + * allocated. Note that this number + * does not need to be exact, and it + * is even allowed that the actual + * matrix structure has more nonzero + * entries than specified in the + * constructor. However it is still + * advantageous to provide good + * estimates here since this will + * considerably increase the + * performance of the matrix + * setup. However, there is no effect + * in the performance of + * matrix-vector products, since + * Trilinos reorganizes the matrix + * memory prior to use (in the + * compress() step). + */ + SparseMatrix (const IndexSet ¶llel_partitioning, + const MPI_Comm &communicator = MPI_COMM_WORLD, + const unsigned int n_max_entries_per_row = 0); + + /** + * Same as before, but now set the + * number of nonzeros in each matrix + * row separately. Since we know the + * number of elements in the matrix + * exactly in this case, we can + * already allocate the right amount + * of memory, which makes the + * creation process including the + * insertion of nonzero elements by + * the respective + * SparseMatrix::reinit call + * considerably faster. + */ + SparseMatrix (const IndexSet ¶llel_partitioning, + const MPI_Comm &communicator, + const std::vector &n_entries_per_row); + + /** + * This constructor is similar to the + * one above, but it now takes two + * different IndexSet partitions for + * row and columns. This interface is + * meant to be used for generating + * rectangular matrices, where the + * first index set describes the + * %parallel partitioning of the + * degrees of freedom associated with + * the matrix rows and the second one + * the partitioning of the matrix + * columns. The second index set + * specifies the partitioning of the + * vectors this matrix is to be + * multiplied with, not the + * distribution of the elements that + * actually appear in the matrix. + * + * The parameter @p + * n_max_entries_per_row defines how + * much memory will be allocated for + * each row. This number does not + * need to be accurate, as the + * structure is reorganized in the + * compress() call. + */ + SparseMatrix (const IndexSet &row_parallel_partitioning, + const IndexSet &col_parallel_partitioning, + const MPI_Comm &communicator = MPI_COMM_WORLD, + const unsigned int n_max_entries_per_row = 0); + + /** + * This constructor is similar to the + * one above, but it now takes two + * different Epetra maps for rows and + * columns. This interface is meant + * to be used for generating + * rectangular matrices, where one + * map specifies the %parallel + * distribution of degrees of freedom + * associated with matrix rows and + * the second one specifies the + * %parallel distribution the dofs + * associated with columns in the + * matrix. The second map also + * provides information for the + * internal arrangement in matrix + * vector products (i.e., the + * distribution of vector this matrix + * is to be multiplied with), but is + * not used for the distribution of + * the columns – rather, all + * column elements of a row are + * stored on the same processor in + * any case. The vector + * n_entries_per_row + * specifies the number of entries in + * each row of the newly generated + * matrix. + */ + SparseMatrix (const IndexSet &row_parallel_partitioning, + const IndexSet &col_parallel_partitioning, + const MPI_Comm &communicator, + const std::vector &n_entries_per_row); + + /** + * This function is initializes the + * Trilinos Epetra matrix according + * to the specified sparsity_pattern, + * and also reassigns the matrix rows + * to different processes according + * to a user-supplied index set and + * %parallel communicator. In + * programs following the style of + * the tutorial programs, this + * function (and the respective call + * for a rectangular matrix) are the + * natural way to initialize the + * matrix size, its distribution + * among the MPI processes (if run in + * %parallel) as well as the locatoin + * of non-zero elements. Trilinos + * stores the sparsity pattern + * internally, so it won't be needed + * any more after this call, in + * contrast to the deal.II own + * object. The optional argument @p + * exchange_data can be used for + * reinitialization with a sparsity + * pattern that is not fully + * constructed. This feature is only + * implemented for input sparsity + * patterns of type + * CompressedSimpleSparsityPattern. If + * the flag is not set, each + * processor just sets the elements + * in the sparsity pattern that + * belong to its rows. + * + * This is a collective operation + * that needs to be called on all + * processors in order to avoid a + * dead lock. + */ + template + void reinit (const IndexSet ¶llel_partitioning, - const SparsityType &sparsity_pattern, ++ const SparsityType &sparsity_pattern, + const MPI_Comm &communicator = MPI_COMM_WORLD, + const bool exchange_data = false); + + /** + * This function is similar to the + * other initialization function + * above, but now also reassigns the + * matrix rows and columns according + * to two user-supplied index sets. + * To be used for rectangular + * matrices. The optional argument @p + * exchange_data can be used for + * reinitialization with a sparsity + * pattern that is not fully + * constructed. This feature is only + * implemented for input sparsity + * patterns of type + * CompressedSimpleSparsityPattern. + * + * This is a collective operation + * that needs to be called on all + * processors in order to avoid a + * dead lock. + */ + template + void reinit (const IndexSet &row_parallel_partitioning, + const IndexSet &col_parallel_partitioning, - const SparsityType &sparsity_pattern, ++ const SparsityType &sparsity_pattern, + const MPI_Comm &communicator = MPI_COMM_WORLD, + const bool exchange_data = false); + + /** + * This function initializes the + * Trilinos matrix using the deal.II + * sparse matrix and the entries + * stored therein. It uses a + * threshold to copy only elements + * with modulus larger than the + * threshold (so zeros in the deal.II + * matrix can be filtered away). In + * contrast to the other reinit + * function with deal.II sparse + * matrix argument, this function + * takes a %parallel partitioning + * specified by the user instead of + * internally generating it. + * + * The optional parameter + * copy_values decides + * whether only the sparsity + * structure of the input matrix + * should be used or the matrix + * entries should be copied, too. + * + * This is a collective operation + * that needs to be called on all + * processors in order to avoid a + * dead lock. + */ + template + void reinit (const IndexSet ¶llel_partitioning, + const ::dealii::SparseMatrix &dealii_sparse_matrix, + const MPI_Comm &communicator = MPI_COMM_WORLD, + const double drop_tolerance=1e-13, + const bool copy_values=true, + const ::dealii::SparsityPattern *use_this_sparsity=0); + + /** + * This function is similar to the + * other initialization function with + * deal.II sparse matrix input above, + * but now takes index sets for both + * the rows and the columns of the + * matrix. Chosen for rectangular + * matrices. + * + * The optional parameter + * copy_values decides + * whether only the sparsity + * structure of the input matrix + * should be used or the matrix + * entries should be copied, too. + * + * This is a collective operation + * that needs to be called on all + * processors in order to avoid a + * dead lock. + */ + template + void reinit (const IndexSet &row_parallel_partitioning, + const IndexSet &col_parallel_partitioning, - const ::dealii::SparseMatrix &dealii_sparse_matrix, ++ const ::dealii::SparseMatrix &dealii_sparse_matrix, + const MPI_Comm &communicator = MPI_COMM_WORLD, + const double drop_tolerance=1e-13, + const bool copy_values=true, + const ::dealii::SparsityPattern *use_this_sparsity=0); //@} - /** - * @name Information on the matrix - */ + /** + * @name Information on the matrix + */ //@{ - /** - * Return the number of rows in - * this matrix. - */ - unsigned int m () const; - - /** - * Return the number of columns - * in this matrix. - */ - unsigned int n () const; - - /** - * Return the local dimension - * of the matrix, i.e. the - * number of rows stored on the - * present MPI process. For - * sequential matrices, this - * number is the same as m(), - * but for %parallel matrices it - * may be smaller. - * - * To figure out which elements - * exactly are stored locally, - * use local_range(). - */ - unsigned int local_size () const; - - /** - * Return a pair of indices - * indicating which rows of - * this matrix are stored - * locally. The first number is - * the index of the first row - * stored, the second the index - * of the one past the last one - * that is stored locally. If - * this is a sequential matrix, - * then the result will be the - * pair (0,m()), otherwise it - * will be a pair (i,i+n), - * where - * n=local_size(). - */ - std::pair - local_range () const; - - /** - * Return whether @p index is - * in the local range or not, - * see also local_range(). - */ - bool in_local_range (const unsigned int index) const; - - /** - * Return the number of nonzero - * elements of this matrix. - */ - unsigned int n_nonzero_elements () const; - - /** - * Number of entries in a - * specific row. - */ - unsigned int row_length (const unsigned int row) const; - - /** - * Returns the state of the matrix, - * i.e., whether compress() needs to - * be called after an operation - * requiring data exchange. A call to - * compress() is also needed when the - * method set() has been called (even - * when working in serial). - */ - bool is_compressed () const; - - /** - * Determine an estimate for the memory - * consumption (in bytes) of this - * object. Note that only the memory - * reserved on the current processor is - * returned in case this is called in - * an MPI-based program. - */ - std::size_t memory_consumption () const; + /** + * Return the number of rows in + * this matrix. + */ + unsigned int m () const; + + /** + * Return the number of columns + * in this matrix. + */ + unsigned int n () const; + + /** + * Return the local dimension + * of the matrix, i.e. the + * number of rows stored on the + * present MPI process. For + * sequential matrices, this + * number is the same as m(), + * but for %parallel matrices it + * may be smaller. + * + * To figure out which elements + * exactly are stored locally, + * use local_range(). + */ + unsigned int local_size () const; + + /** + * Return a pair of indices + * indicating which rows of + * this matrix are stored + * locally. The first number is + * the index of the first row + * stored, the second the index + * of the one past the last one + * that is stored locally. If + * this is a sequential matrix, + * then the result will be the + * pair (0,m()), otherwise it + * will be a pair (i,i+n), + * where + * n=local_size(). + */ + std::pair + local_range () const; + + /** + * Return whether @p index is + * in the local range or not, + * see also local_range(). + */ + bool in_local_range (const unsigned int index) const; + + /** + * Return the number of nonzero + * elements of this matrix. + */ + unsigned int n_nonzero_elements () const; + + /** + * Number of entries in a + * specific row. + */ + unsigned int row_length (const unsigned int row) const; + + /** + * Returns the state of the matrix, + * i.e., whether compress() needs to + * be called after an operation + * requiring data exchange. A call to + * compress() is also needed when the + * method set() has been called (even + * when working in serial). + */ + bool is_compressed () const; + + /** + * Determine an estimate for the memory + * consumption (in bytes) of this + * object. Note that only the memory + * reserved on the current processor is + * returned in case this is called in + * an MPI-based program. + */ + std::size_t memory_consumption () const; //@} - /** - * @name Modifying entries - */ + /** + * @name Modifying entries + */ //@{ - /** - * This operator assigns a scalar to - * a matrix. Since this does usually - * not make much sense (should we set - * all matrix entries to this value? - * Only the nonzero entries of the - * sparsity pattern?), this operation - * is only allowed if the actual - * value to be assigned is zero. This - * operator only exists to allow for - * the obvious notation - * matrix=0, which sets all - * elements of the matrix to zero, - * but keeps the sparsity pattern - * previously used. - */ - SparseMatrix & - operator = (const double d); - - /** - * Release all memory and return to a - * state just like after having - * called the default constructor. - * - * This is a collective operation - * that needs to be called on all - * processors in order to avoid a - * dead lock. - */ - void clear (); - - /** - * This command does two things: - *
    - *
  • If the matrix was initialized - * without a sparsity pattern, - * elements have been added manually - * using the set() command. When this - * process is completed, a call to - * compress() reorganizes the - * internal data structures (aparsity - * pattern) so that a fast access to - * data is possible in matrix-vector - * products. - *
  • If the matrix structure has - * already been fixed (either by - * initialization with a sparsity - * pattern or by calling compress() - * during the setup phase), this - * command does the %parallel - * exchange of data. This is - * necessary when we perform assembly - * on more than one (MPI) process, - * because then some non-local row - * data will accumulate on nodes that - * belong to the current's processor - * element, but are actually held by - * another. This command is usually - * called after all elements have - * been traversed. - *
- * - * In both cases, this function - * compresses the data structures and - * allows the resulting matrix to be - * used in all other operations like - * matrix-vector products. This is a - * collective operation, i.e., it - * needs to be run on all processors - * when used in %parallel. - * - * See @ref GlossCompress "Compressing distributed objects" - * for more information. - */ - void compress (::dealii::VectorOperation::values operation - =::dealii::VectorOperation::unknown); - - /** - * Set the element (i,j) - * to @p value. - * - * This function is able to insert new - * elements into the matrix as long as - * compress() has not been called, so - * the sparsity pattern will be - * extended. When compress() is called - * for the first time, then this is no - * longer possible and an insertion of - * elements at positions which have not - * been initialized will throw an - * exception. Note that in case - * elements need to be inserted, it is - * mandatory that elements are inserted - * only once. Otherwise, the elements - * will actually be added in the end - * (since it is not possible to - * efficiently find values to the same - * entry before compress() has been - * called). In the case that an element - * is set more than once, initialize - * the matrix with a sparsity pattern - * first. - */ - void set (const unsigned int i, - const unsigned int j, - const TrilinosScalar value); - - /** - * Set all elements given in a - * FullMatrix into the sparse - * matrix locations given by - * indices. In other words, - * this function writes the elements - * in full_matrix into the - * calling matrix, using the - * local-to-global indexing specified - * by indices for both the - * rows and the columns of the - * matrix. This function assumes a - * quadratic sparse matrix and a - * quadratic full_matrix, the usual - * situation in FE calculations. - * - * This function is able to insert - * new elements into the matrix as - * long as compress() has not been - * called, so the sparsity pattern - * will be extended. When compress() - * is called for the first time, then - * this is no longer possible and an - * insertion of elements at positions - * which have not been initialized - * will throw an exception. - * - * The optional parameter - * elide_zero_values can be - * used to specify whether zero - * values should be inserted anyway - * or they should be filtered - * away. The default value is - * false, i.e., even zero - * values are inserted/replaced. - */ - void set (const std::vector &indices, - const FullMatrix &full_matrix, - const bool elide_zero_values = false); - - /** - * Same function as before, but now - * including the possibility to use - * rectangular full_matrices and - * different local-to-global indexing - * on rows and columns, respectively. - */ - void set (const std::vector &row_indices, - const std::vector &col_indices, - const FullMatrix &full_matrix, - const bool elide_zero_values = false); - - /** - * Set several elements in the - * specified row of the matrix with - * column indices as given by - * col_indices to the - * respective value. - * - * This function is able to insert - * new elements into the matrix as - * long as compress() has not been - * called, so the sparsity pattern - * will be extended. When compress() - * is called for the first time, then - * this is no longer possible and an - * insertion of elements at positions - * which have not been initialized - * will throw an exception. - * - * The optional parameter - * elide_zero_values can be - * used to specify whether zero - * values should be inserted anyway - * or they should be filtered - * away. The default value is - * false, i.e., even zero - * values are inserted/replaced. - */ - void set (const unsigned int row, - const std::vector &col_indices, - const std::vector &values, - const bool elide_zero_values = false); - - /** - * Set several elements to values - * given by values in a - * given row in columns given by - * col_indices into the sparse - * matrix. - * - * This function is able to insert - * new elements into the matrix as - * long as compress() has not been - * called, so the sparsity pattern - * will be extended. When compress() - * is called for the first time, then - * this is no longer possible and an - * insertion of elements at positions - * which have not been initialized - * will throw an exception. - * - * The optional parameter - * elide_zero_values can be - * used to specify whether zero - * values should be inserted anyway - * or they should be filtered - * away. The default value is - * false, i.e., even zero - * values are inserted/replaced. - */ - void set (const unsigned int row, - const unsigned int n_cols, - const unsigned int *col_indices, - const TrilinosScalar *values, - const bool elide_zero_values = false); - - /** - * Add @p value to the element - * (i,j). - * - * Just as the respective call in - * deal.II SparseMatrix - * class (but in contrast to the - * situation for PETSc based - * matrices), this function - * throws an exception if an - * entry does not exist in the - * sparsity pattern. Moreover, if - * value is not a finite - * number an exception is thrown. - */ - void add (const unsigned int i, - const unsigned int j, - const TrilinosScalar value); - - /** - * Add all elements given in a - * FullMatrix into sparse - * matrix locations given by - * indices. In other words, - * this function adds the elements in - * full_matrix to the - * respective entries in calling - * matrix, using the local-to-global - * indexing specified by - * indices for both the rows - * and the columns of the - * matrix. This function assumes a - * quadratic sparse matrix and a - * quadratic full_matrix, the usual - * situation in FE calculations. - * - * Just as the respective call in - * deal.II SparseMatrix - * class (but in contrast to the - * situation for PETSc based - * matrices), this function - * throws an exception if an - * entry does not exist in the - * sparsity pattern. - * - * The optional parameter - * elide_zero_values can be - * used to specify whether zero - * values should be added anyway or - * these should be filtered away and - * only non-zero data is added. The - * default value is true, - * i.e., zero values won't be added - * into the matrix. - */ - void add (const std::vector &indices, - const FullMatrix &full_matrix, - const bool elide_zero_values = true); - - /** - * Same function as before, but now - * including the possibility to use - * rectangular full_matrices and - * different local-to-global indexing - * on rows and columns, respectively. - */ - void add (const std::vector &row_indices, - const std::vector &col_indices, - const FullMatrix &full_matrix, - const bool elide_zero_values = true); - - /** - * Set several elements in the - * specified row of the matrix with - * column indices as given by - * col_indices to the - * respective value. - * - * Just as the respective call in - * deal.II SparseMatrix - * class (but in contrast to the - * situation for PETSc based - * matrices), this function - * throws an exception if an - * entry does not exist in the - * sparsity pattern. - * - * The optional parameter - * elide_zero_values can be - * used to specify whether zero - * values should be added anyway or - * these should be filtered away and - * only non-zero data is added. The - * default value is true, - * i.e., zero values won't be added - * into the matrix. - */ - void add (const unsigned int row, - const std::vector &col_indices, - const std::vector &values, - const bool elide_zero_values = true); - - /** - * Add an array of values given by - * values in the given - * global matrix row at columns - * specified by col_indices in the - * sparse matrix. - * - * Just as the respective call in - * deal.II SparseMatrix class - * (but in contrast to the situation - * for PETSc based matrices), this - * function throws an exception if an - * entry does not exist in the - * sparsity pattern. - * - * The optional parameter - * elide_zero_values can be - * used to specify whether zero - * values should be added anyway or - * these should be filtered away and - * only non-zero data is added. The - * default value is true, - * i.e., zero values won't be added - * into the matrix. - */ - void add (const unsigned int row, - const unsigned int n_cols, - const unsigned int *col_indices, - const TrilinosScalar *values, - const bool elide_zero_values = true, - const bool col_indices_are_sorted = false); - - /** - * Multiply the entire matrix - * by a fixed factor. - */ - SparseMatrix & operator *= (const TrilinosScalar factor); - - /** - * Divide the entire matrix by - * a fixed factor. - */ - SparseMatrix & operator /= (const TrilinosScalar factor); - - /** - * Copy the given (Trilinos) matrix - * (sparsity pattern and entries). - */ - void copy_from (const SparseMatrix &source); - - /** - * Add matrix scaled by - * factor to this matrix, - * i.e. the matrix - * factor*matrix is added to - * this. If the sparsity - * pattern of the calling matrix does - * not contain all the elements in - * the sparsity pattern of the input - * matrix, this function will throw - * an exception. - */ - void add (const TrilinosScalar factor, - const SparseMatrix &matrix); - - /** - * Remove all elements from - * this row by setting - * them to zero. The function - * does not modify the number - * of allocated nonzero - * entries, it only sets some - * entries to zero. It may drop - * them from the sparsity - * pattern, though (but retains - * the allocated memory in case - * new entries are again added - * later). Note that this is a - * global operation, so this - * needs to be done on all MPI - * processes. - * - * This operation is used in - * eliminating constraints - * (e.g. due to hanging nodes) - * and makes sure that we can - * write this modification to - * the matrix without having to - * read entries (such as the - * locations of non-zero - * elements) from it — - * without this operation, - * removing constraints on - * %parallel matrices is a - * rather complicated - * procedure. - * - * The second parameter can be - * used to set the diagonal - * entry of this row to a value - * different from zero. The - * default is to set it to - * zero. - */ - void clear_row (const unsigned int row, - const TrilinosScalar new_diag_value = 0); - - /** - * Same as clear_row(), except - * that it works on a number of - * rows at once. - * - * The second parameter can be - * used to set the diagonal - * entries of all cleared rows - * to something different from - * zero. Note that all of these - * diagonal entries get the - * same value -- if you want - * different values for the - * diagonal entries, you have - * to set them by hand. - */ - void clear_rows (const std::vector &rows, - const TrilinosScalar new_diag_value = 0); - - /** - * Make an in-place transpose - * of a matrix. - */ - void transpose (); + /** + * This operator assigns a scalar to + * a matrix. Since this does usually + * not make much sense (should we set + * all matrix entries to this value? + * Only the nonzero entries of the + * sparsity pattern?), this operation + * is only allowed if the actual + * value to be assigned is zero. This + * operator only exists to allow for + * the obvious notation + * matrix=0, which sets all + * elements of the matrix to zero, + * but keeps the sparsity pattern + * previously used. + */ + SparseMatrix & + operator = (const double d); + + /** + * Release all memory and return to a + * state just like after having + * called the default constructor. + * + * This is a collective operation + * that needs to be called on all + * processors in order to avoid a + * dead lock. + */ + void clear (); + + /** + * This command does two things: + *
    + *
  • If the matrix was initialized + * without a sparsity pattern, + * elements have been added manually + * using the set() command. When this + * process is completed, a call to + * compress() reorganizes the + * internal data structures (aparsity + * pattern) so that a fast access to + * data is possible in matrix-vector + * products. + *
  • If the matrix structure has + * already been fixed (either by + * initialization with a sparsity + * pattern or by calling compress() + * during the setup phase), this + * command does the %parallel + * exchange of data. This is + * necessary when we perform assembly + * on more than one (MPI) process, + * because then some non-local row + * data will accumulate on nodes that + * belong to the current's processor + * element, but are actually held by + * another. This command is usually + * called after all elements have + * been traversed. + *
+ * + * In both cases, this function + * compresses the data structures and + * allows the resulting matrix to be + * used in all other operations like + * matrix-vector products. This is a + * collective operation, i.e., it + * needs to be run on all processors + * when used in %parallel. + * + * See @ref GlossCompress "Compressing distributed objects" + * for more information. + */ + void compress (::dealii::VectorOperation::values operation + =::dealii::VectorOperation::unknown); + + /** + * Set the element (i,j) + * to @p value. + * + * This function is able to insert new + * elements into the matrix as long as + * compress() has not been called, so + * the sparsity pattern will be + * extended. When compress() is called + * for the first time, then this is no + * longer possible and an insertion of + * elements at positions which have not + * been initialized will throw an + * exception. Note that in case + * elements need to be inserted, it is + * mandatory that elements are inserted + * only once. Otherwise, the elements + * will actually be added in the end + * (since it is not possible to + * efficiently find values to the same + * entry before compress() has been + * called). In the case that an element + * is set more than once, initialize + * the matrix with a sparsity pattern + * first. + */ + void set (const unsigned int i, + const unsigned int j, + const TrilinosScalar value); + + /** + * Set all elements given in a + * FullMatrix into the sparse + * matrix locations given by + * indices. In other words, + * this function writes the elements + * in full_matrix into the + * calling matrix, using the + * local-to-global indexing specified + * by indices for both the + * rows and the columns of the + * matrix. This function assumes a + * quadratic sparse matrix and a + * quadratic full_matrix, the usual + * situation in FE calculations. + * + * This function is able to insert + * new elements into the matrix as + * long as compress() has not been + * called, so the sparsity pattern + * will be extended. When compress() + * is called for the first time, then + * this is no longer possible and an + * insertion of elements at positions + * which have not been initialized + * will throw an exception. + * + * The optional parameter + * elide_zero_values can be + * used to specify whether zero + * values should be inserted anyway + * or they should be filtered + * away. The default value is + * false, i.e., even zero + * values are inserted/replaced. + */ - void set (const std::vector &indices, ++ void set (const std::vector &indices, + const FullMatrix &full_matrix, + const bool elide_zero_values = false); + + /** + * Same function as before, but now + * including the possibility to use + * rectangular full_matrices and + * different local-to-global indexing + * on rows and columns, respectively. + */ - void set (const std::vector &row_indices, - const std::vector &col_indices, ++ void set (const std::vector &row_indices, ++ const std::vector &col_indices, + const FullMatrix &full_matrix, + const bool elide_zero_values = false); + + /** + * Set several elements in the + * specified row of the matrix with + * column indices as given by + * col_indices to the + * respective value. + * + * This function is able to insert + * new elements into the matrix as + * long as compress() has not been + * called, so the sparsity pattern + * will be extended. When compress() + * is called for the first time, then + * this is no longer possible and an + * insertion of elements at positions + * which have not been initialized + * will throw an exception. + * + * The optional parameter + * elide_zero_values can be + * used to specify whether zero + * values should be inserted anyway + * or they should be filtered + * away. The default value is + * false, i.e., even zero + * values are inserted/replaced. + */ + void set (const unsigned int row, + const std::vector &col_indices, + const std::vector &values, + const bool elide_zero_values = false); + + /** + * Set several elements to values + * given by values in a + * given row in columns given by + * col_indices into the sparse + * matrix. + * + * This function is able to insert + * new elements into the matrix as + * long as compress() has not been + * called, so the sparsity pattern + * will be extended. When compress() + * is called for the first time, then + * this is no longer possible and an + * insertion of elements at positions + * which have not been initialized + * will throw an exception. + * + * The optional parameter + * elide_zero_values can be + * used to specify whether zero + * values should be inserted anyway + * or they should be filtered + * away. The default value is + * false, i.e., even zero + * values are inserted/replaced. + */ + void set (const unsigned int row, + const unsigned int n_cols, + const unsigned int *col_indices, + const TrilinosScalar *values, + const bool elide_zero_values = false); + + /** + * Add @p value to the element + * (i,j). + * + * Just as the respective call in + * deal.II SparseMatrix + * class (but in contrast to the + * situation for PETSc based + * matrices), this function + * throws an exception if an + * entry does not exist in the + * sparsity pattern. Moreover, if + * value is not a finite + * number an exception is thrown. + */ + void add (const unsigned int i, + const unsigned int j, + const TrilinosScalar value); + + /** + * Add all elements given in a + * FullMatrix into sparse + * matrix locations given by + * indices. In other words, + * this function adds the elements in + * full_matrix to the + * respective entries in calling + * matrix, using the local-to-global + * indexing specified by + * indices for both the rows + * and the columns of the + * matrix. This function assumes a + * quadratic sparse matrix and a + * quadratic full_matrix, the usual + * situation in FE calculations. + * + * Just as the respective call in + * deal.II SparseMatrix + * class (but in contrast to the + * situation for PETSc based + * matrices), this function + * throws an exception if an + * entry does not exist in the + * sparsity pattern. + * + * The optional parameter + * elide_zero_values can be + * used to specify whether zero + * values should be added anyway or + * these should be filtered away and + * only non-zero data is added. The + * default value is true, + * i.e., zero values won't be added + * into the matrix. + */ - void add (const std::vector &indices, ++ void add (const std::vector &indices, + const FullMatrix &full_matrix, + const bool elide_zero_values = true); + + /** + * Same function as before, but now + * including the possibility to use + * rectangular full_matrices and + * different local-to-global indexing + * on rows and columns, respectively. + */ - void add (const std::vector &row_indices, - const std::vector &col_indices, ++ void add (const std::vector &row_indices, ++ const std::vector &col_indices, + const FullMatrix &full_matrix, + const bool elide_zero_values = true); + + /** + * Set several elements in the + * specified row of the matrix with + * column indices as given by + * col_indices to the + * respective value. + * + * Just as the respective call in + * deal.II SparseMatrix + * class (but in contrast to the + * situation for PETSc based + * matrices), this function + * throws an exception if an + * entry does not exist in the + * sparsity pattern. + * + * The optional parameter + * elide_zero_values can be + * used to specify whether zero + * values should be added anyway or + * these should be filtered away and + * only non-zero data is added. The + * default value is true, + * i.e., zero values won't be added + * into the matrix. + */ + void add (const unsigned int row, + const std::vector &col_indices, + const std::vector &values, + const bool elide_zero_values = true); + + /** + * Add an array of values given by + * values in the given + * global matrix row at columns + * specified by col_indices in the + * sparse matrix. + * + * Just as the respective call in + * deal.II SparseMatrix class + * (but in contrast to the situation + * for PETSc based matrices), this + * function throws an exception if an + * entry does not exist in the + * sparsity pattern. + * + * The optional parameter + * elide_zero_values can be + * used to specify whether zero + * values should be added anyway or + * these should be filtered away and + * only non-zero data is added. The + * default value is true, + * i.e., zero values won't be added + * into the matrix. + */ + void add (const unsigned int row, + const unsigned int n_cols, + const unsigned int *col_indices, + const TrilinosScalar *values, + const bool elide_zero_values = true, + const bool col_indices_are_sorted = false); + + /** + * Multiply the entire matrix + * by a fixed factor. + */ + SparseMatrix &operator *= (const TrilinosScalar factor); + + /** + * Divide the entire matrix by + * a fixed factor. + */ + SparseMatrix &operator /= (const TrilinosScalar factor); + + /** + * Copy the given (Trilinos) matrix + * (sparsity pattern and entries). + */ + void copy_from (const SparseMatrix &source); + + /** + * Add matrix scaled by + * factor to this matrix, + * i.e. the matrix + * factor*matrix is added to + * this. If the sparsity + * pattern of the calling matrix does + * not contain all the elements in + * the sparsity pattern of the input + * matrix, this function will throw + * an exception. + */ + void add (const TrilinosScalar factor, + const SparseMatrix &matrix); + + /** + * Remove all elements from + * this row by setting + * them to zero. The function + * does not modify the number + * of allocated nonzero + * entries, it only sets some + * entries to zero. It may drop + * them from the sparsity + * pattern, though (but retains + * the allocated memory in case + * new entries are again added + * later). Note that this is a + * global operation, so this + * needs to be done on all MPI + * processes. + * + * This operation is used in + * eliminating constraints + * (e.g. due to hanging nodes) + * and makes sure that we can + * write this modification to + * the matrix without having to + * read entries (such as the + * locations of non-zero + * elements) from it — + * without this operation, + * removing constraints on + * %parallel matrices is a + * rather complicated + * procedure. + * + * The second parameter can be + * used to set the diagonal + * entry of this row to a value + * different from zero. The + * default is to set it to + * zero. + */ + void clear_row (const unsigned int row, + const TrilinosScalar new_diag_value = 0); + + /** + * Same as clear_row(), except + * that it works on a number of + * rows at once. + * + * The second parameter can be + * used to set the diagonal + * entries of all cleared rows + * to something different from + * zero. Note that all of these + * diagonal entries get the + * same value -- if you want + * different values for the + * diagonal entries, you have + * to set them by hand. + */ + void clear_rows (const std::vector &rows, + const TrilinosScalar new_diag_value = 0); + + /** + * Make an in-place transpose + * of a matrix. + */ + void transpose (); //@} - /** - * @name Entry Access - */ + /** + * @name Entry Access + */ //@{ - /** - * Return the value of the - * entry (i,j). This - * may be an expensive - * operation and you should - * always take care where to - * call this function. As in - * the deal.II sparse matrix - * class, we throw an exception - * if the respective entry - * doesn't exist in the - * sparsity pattern of this - * class, which is requested - * from Trilinos. Moreover, an - * exception will be thrown - * when the requested element - * is not saved on the calling - * process. - */ - TrilinosScalar operator () (const unsigned int i, - const unsigned int j) const; - - /** - * Return the value of the - * matrix entry - * (i,j). If this entry - * does not exist in the - * sparsity pattern, then zero - * is returned. While this may - * be convenient in some cases, - * note that it is simple to - * write algorithms that are - * slow compared to an optimal - * solution, since the sparsity - * of the matrix is not used. - * On the other hand, if you - * want to be sure the entry - * exists, you should use - * operator() instead. - * - * The lack of error checking - * in this function can also - * yield surprising results if - * you have a parallel - * matrix. In that case, just - * because you get a zero - * result from this function - * does not mean that either - * the entry does not exist in - * the sparsity pattern or that - * it does but has a value of - * zero. Rather, it could also - * be that it simply isn't - * stored on the current - * processor; in that case, it - * may be stored on a different - * processor, and possibly so - * with a nonzero value. - */ - TrilinosScalar el (const unsigned int i, - const unsigned int j) const; - - /** - * Return the main diagonal - * element in the ith - * row. This function throws an - * error if the matrix is not - * quadratic and it also throws - * an error if (i,i) is not - * element of the local matrix. - * See also the comment in - * trilinos_sparse_matrix.cc. - */ - TrilinosScalar diag_element (const unsigned int i) const; + /** + * Return the value of the + * entry (i,j). This + * may be an expensive + * operation and you should + * always take care where to + * call this function. As in + * the deal.II sparse matrix + * class, we throw an exception + * if the respective entry + * doesn't exist in the + * sparsity pattern of this + * class, which is requested + * from Trilinos. Moreover, an + * exception will be thrown + * when the requested element + * is not saved on the calling + * process. + */ + TrilinosScalar operator () (const unsigned int i, + const unsigned int j) const; + + /** + * Return the value of the + * matrix entry + * (i,j). If this entry + * does not exist in the + * sparsity pattern, then zero + * is returned. While this may + * be convenient in some cases, + * note that it is simple to + * write algorithms that are + * slow compared to an optimal + * solution, since the sparsity + * of the matrix is not used. + * On the other hand, if you + * want to be sure the entry + * exists, you should use + * operator() instead. + * + * The lack of error checking + * in this function can also + * yield surprising results if + * you have a parallel + * matrix. In that case, just + * because you get a zero + * result from this function + * does not mean that either + * the entry does not exist in + * the sparsity pattern or that + * it does but has a value of + * zero. Rather, it could also + * be that it simply isn't + * stored on the current + * processor; in that case, it + * may be stored on a different + * processor, and possibly so + * with a nonzero value. + */ + TrilinosScalar el (const unsigned int i, + const unsigned int j) const; + + /** + * Return the main diagonal + * element in the ith + * row. This function throws an + * error if the matrix is not + * quadratic and it also throws + * an error if (i,i) is not + * element of the local matrix. + * See also the comment in + * trilinos_sparse_matrix.cc. + */ + TrilinosScalar diag_element (const unsigned int i) const; //@} - /** - * @name Multiplications - */ + /** + * @name Multiplications + */ //@{ - /** - * Matrix-vector multiplication: - * let dst = M*src with - * M being this matrix. - * - * Source and destination must - * not be the same vector. - * - * Note that both vectors have to - * be distributed vectors - * generated using the same Map - * as was used for the matrix in - * case you work on a distributed - * memory architecture, using the - * interface in the - * TrilinosWrappers::VectorBase - * class (or one of the two - * derived classes Vector and - * MPI::Vector). - * - * In case of a localized Vector, - * this function will only work - * when running on one processor, - * since the matrix object is - * inherently - * distributed. Otherwise, and - * exception will be thrown. - */ - void vmult (VectorBase &dst, - const VectorBase &src) const; - - /** - * Same as before, but working with - * deal.II's own distributed vector - * class. - */ - void vmult (parallel::distributed::Vector &dst, - const parallel::distributed::Vector &src) const; - - /** - * Matrix-vector multiplication: - * let dst = - * MT*src with - * M being this - * matrix. This function does the - * same as vmult() but takes the - * transposed matrix. - * - * Source and destination must - * not be the same vector. - * - * Note that both vectors have to - * be distributed vectors - * generated using the same Map - * as was used for the matrix in - * case you work on a distributed - * memory architecture, using the - * interface in the - * TrilinosWrappers::VectorBase - * class (or one of the two - * derived classes Vector and - * MPI::Vector). - * - * In case of a localized Vector, - * this function will only work - * when running on one processor, - * since the matrix object is - * inherently - * distributed. Otherwise, and - * exception will be thrown. - */ - void Tvmult (VectorBase &dst, - const VectorBase &src) const; - - /** - * Same as before, but working with - * deal.II's own distributed vector - * class. - */ - void Tvmult (parallel::distributed::Vector &dst, - const parallel::distributed::Vector &src) const; - - /** - * Adding Matrix-vector - * multiplication. Add - * M*src on dst - * with M being this - * matrix. - * - * Source and destination must - * not be the same vector. - * - * Note that both vectors have to - * be distributed vectors - * generated using the same Map - * as was used for the matrix in - * case you work on a distributed - * memory architecture, using the - * interface in the - * TrilinosWrappers::VectorBase - * class (or one of the two - * derived classes Vector and - * MPI::Vector). - * - * In case of a localized Vector, - * this function will only work - * when running on one processor, - * since the matrix object is - * inherently - * distributed. Otherwise, and - * exception will be thrown. - */ - void vmult_add (VectorBase &dst, - const VectorBase &src) const; - - /** - * Adding Matrix-vector - * multiplication. Add - * MT*src to - * dst with M being - * this matrix. This function - * does the same as vmult_add() - * but takes the transposed - * matrix. - * - * Source and destination must - * not be the same vector. - * - * Note that both vectors have to - * be distributed vectors - * generated using the same Map - * as was used for the matrix in - * case you work on a distributed - * memory architecture, using the - * interface in the - * TrilinosWrappers::VectorBase - * class (or one of the two - * derived classes Vector and - * MPI::Vector). - * - * In case of a localized Vector, - * this function will only work - * when running on one processor, - * since the matrix object is - * inherently - * distributed. Otherwise, and - * exception will be thrown. - */ - void Tvmult_add (VectorBase &dst, - const VectorBase &src) const; - - /** - * Return the square of the norm - * of the vector $v$ with respect - * to the norm induced by this - * matrix, i.e., - * $\left(v,Mv\right)$. This is - * useful, e.g. in the finite - * element context, where the - * $L_2$ norm of a function - * equals the matrix norm with - * respect to the mass matrix of - * the vector representing the - * nodal values of the finite - * element function. - * - * Obviously, the matrix needs to - * be quadratic for this - * operation. - * - * The implementation of this - * function is not as efficient - * as the one in the @p - * SparseMatrix class used in - * deal.II (i.e. the original - * one, not the Trilinos wrapper - * class) since Trilinos doesn't - * support this operation and - * needs a temporary vector. - * - * Note that both vectors have to - * be distributed vectors - * generated using the same Map - * as was used for the matrix in - * case you work on a distributed - * memory architecture, using the - * interface in the - * TrilinosWrappers::VectorBase - * class (or one of the two - * derived classes Vector and - * MPI::Vector). - * - * In case of a localized Vector, - * this function will only work - * when running on one processor, - * since the matrix object is - * inherently - * distributed. Otherwise, and - * exception will be thrown. - */ - TrilinosScalar matrix_norm_square (const VectorBase &v) const; - - /** - * Compute the matrix scalar - * product $\left(u,Mv\right)$. - * - * The implementation of this - * function is not as efficient - * as the one in the @p - * SparseMatrix class used in - * deal.II (i.e. the original - * one, not the Trilinos - * wrapper class) since - * Trilinos doesn't support - * this operation and needs a - * temporary vector. - * - * Note that both vectors have to - * be distributed vectors - * generated using the same Map - * as was used for the matrix in - * case you work on a distributed - * memory architecture, using the - * interface in the - * TrilinosWrappers::VectorBase - * class (or one of the two - * derived classes Vector and - * MPI::Vector). - * - * In case of a localized Vector, - * this function will only work - * when running on one processor, - * since the matrix object is - * inherently - * distributed. Otherwise, and - * exception will be thrown. - */ - TrilinosScalar matrix_scalar_product (const VectorBase &u, - const VectorBase &v) const; - - /** - * Compute the residual of an - * equation Mx=b, where - * the residual is defined to - * be r=b-Mx. Write the - * residual into @p dst. The - * l2 norm of - * the residual vector is - * returned. - * - * Source x and - * destination dst must - * not be the same vector. - * - * Note that both vectors have to - * be distributed vectors - * generated using the same Map - * as was used for the matrix in - * case you work on a distributed - * memory architecture, using the - * interface in the - * TrilinosWrappers::VectorBase - * class (or one of the two - * derived classes Vector and - * MPI::Vector). - * - * In case of a localized Vector, - * this function will only work - * when running on one processor, - * since the matrix object is - * inherently - * distributed. Otherwise, and - * exception will be thrown. - */ - TrilinosScalar residual (VectorBase &dst, - const VectorBase &x, - const VectorBase &b) const; - - /** - * Perform the matrix-matrix - * multiplication C = A * B, - * or, if an optional vector argument - * is given, C = A * diag(V) * - * B, where diag(V) - * defines a diagonal matrix with the - * vector entries. - * - * This function assumes that the - * calling matrix A and - * B have compatible - * sizes. The size of C will - * be set within this function. - * - * The content as well as the sparsity - * pattern of the matrix C will be - * changed by this function, so make - * sure that the sparsity pattern is - * not used somewhere else in your - * program. This is an expensive - * operation, so think twice before you - * use this function. - */ + /** + * Matrix-vector multiplication: + * let dst = M*src with + * M being this matrix. + * + * Source and destination must + * not be the same vector. + * + * Note that both vectors have to + * be distributed vectors + * generated using the same Map + * as was used for the matrix in + * case you work on a distributed + * memory architecture, using the + * interface in the + * TrilinosWrappers::VectorBase + * class (or one of the two + * derived classes Vector and + * MPI::Vector). + * + * In case of a localized Vector, + * this function will only work + * when running on one processor, + * since the matrix object is + * inherently + * distributed. Otherwise, and + * exception will be thrown. + */ + void vmult (VectorBase &dst, + const VectorBase &src) const; + + /** + * Same as before, but working with + * deal.II's own distributed vector + * class. + */ + void vmult (parallel::distributed::Vector &dst, + const parallel::distributed::Vector &src) const; + + /** + * Matrix-vector multiplication: + * let dst = + * MT*src with + * M being this + * matrix. This function does the + * same as vmult() but takes the + * transposed matrix. + * + * Source and destination must + * not be the same vector. + * + * Note that both vectors have to + * be distributed vectors + * generated using the same Map + * as was used for the matrix in + * case you work on a distributed + * memory architecture, using the + * interface in the + * TrilinosWrappers::VectorBase + * class (or one of the two + * derived classes Vector and + * MPI::Vector). + * + * In case of a localized Vector, + * this function will only work + * when running on one processor, + * since the matrix object is + * inherently + * distributed. Otherwise, and + * exception will be thrown. + */ + void Tvmult (VectorBase &dst, + const VectorBase &src) const; + + /** + * Same as before, but working with + * deal.II's own distributed vector + * class. + */ + void Tvmult (parallel::distributed::Vector &dst, + const parallel::distributed::Vector &src) const; + + /** + * Adding Matrix-vector + * multiplication. Add + * M*src on dst + * with M being this + * matrix. + * + * Source and destination must + * not be the same vector. + * + * Note that both vectors have to + * be distributed vectors + * generated using the same Map + * as was used for the matrix in + * case you work on a distributed + * memory architecture, using the + * interface in the + * TrilinosWrappers::VectorBase + * class (or one of the two + * derived classes Vector and + * MPI::Vector). + * + * In case of a localized Vector, + * this function will only work + * when running on one processor, + * since the matrix object is + * inherently + * distributed. Otherwise, and + * exception will be thrown. + */ + void vmult_add (VectorBase &dst, + const VectorBase &src) const; + + /** + * Adding Matrix-vector + * multiplication. Add + * MT*src to + * dst with M being + * this matrix. This function + * does the same as vmult_add() + * but takes the transposed + * matrix. + * + * Source and destination must + * not be the same vector. + * + * Note that both vectors have to + * be distributed vectors + * generated using the same Map + * as was used for the matrix in + * case you work on a distributed + * memory architecture, using the + * interface in the + * TrilinosWrappers::VectorBase + * class (or one of the two + * derived classes Vector and + * MPI::Vector). + * + * In case of a localized Vector, + * this function will only work + * when running on one processor, + * since the matrix object is + * inherently + * distributed. Otherwise, and + * exception will be thrown. + */ + void Tvmult_add (VectorBase &dst, + const VectorBase &src) const; + + /** + * Return the square of the norm + * of the vector $v$ with respect + * to the norm induced by this + * matrix, i.e., + * $\left(v,Mv\right)$. This is + * useful, e.g. in the finite + * element context, where the + * $L_2$ norm of a function + * equals the matrix norm with + * respect to the mass matrix of + * the vector representing the + * nodal values of the finite + * element function. + * + * Obviously, the matrix needs to + * be quadratic for this + * operation. + * + * The implementation of this + * function is not as efficient + * as the one in the @p + * SparseMatrix class used in + * deal.II (i.e. the original + * one, not the Trilinos wrapper + * class) since Trilinos doesn't + * support this operation and + * needs a temporary vector. + * + * Note that both vectors have to + * be distributed vectors + * generated using the same Map + * as was used for the matrix in + * case you work on a distributed + * memory architecture, using the + * interface in the + * TrilinosWrappers::VectorBase + * class (or one of the two + * derived classes Vector and + * MPI::Vector). + * + * In case of a localized Vector, + * this function will only work + * when running on one processor, + * since the matrix object is + * inherently + * distributed. Otherwise, and + * exception will be thrown. + */ + TrilinosScalar matrix_norm_square (const VectorBase &v) const; + + /** + * Compute the matrix scalar + * product $\left(u,Mv\right)$. + * + * The implementation of this + * function is not as efficient + * as the one in the @p + * SparseMatrix class used in + * deal.II (i.e. the original + * one, not the Trilinos + * wrapper class) since + * Trilinos doesn't support + * this operation and needs a + * temporary vector. + * + * Note that both vectors have to + * be distributed vectors + * generated using the same Map + * as was used for the matrix in + * case you work on a distributed + * memory architecture, using the + * interface in the + * TrilinosWrappers::VectorBase + * class (or one of the two + * derived classes Vector and + * MPI::Vector). + * + * In case of a localized Vector, + * this function will only work + * when running on one processor, + * since the matrix object is + * inherently + * distributed. Otherwise, and + * exception will be thrown. + */ + TrilinosScalar matrix_scalar_product (const VectorBase &u, + const VectorBase &v) const; + + /** + * Compute the residual of an + * equation Mx=b, where + * the residual is defined to + * be r=b-Mx. Write the + * residual into @p dst. The + * l2 norm of + * the residual vector is + * returned. + * + * Source x and + * destination dst must + * not be the same vector. + * + * Note that both vectors have to + * be distributed vectors + * generated using the same Map + * as was used for the matrix in + * case you work on a distributed + * memory architecture, using the + * interface in the + * TrilinosWrappers::VectorBase + * class (or one of the two + * derived classes Vector and + * MPI::Vector). + * + * In case of a localized Vector, + * this function will only work + * when running on one processor, + * since the matrix object is + * inherently + * distributed. Otherwise, and + * exception will be thrown. + */ + TrilinosScalar residual (VectorBase &dst, + const VectorBase &x, + const VectorBase &b) const; + + /** + * Perform the matrix-matrix + * multiplication C = A * B, + * or, if an optional vector argument + * is given, C = A * diag(V) * + * B, where diag(V) + * defines a diagonal matrix with the + * vector entries. + * + * This function assumes that the + * calling matrix A and + * B have compatible + * sizes. The size of C will + * be set within this function. + * + * The content as well as the sparsity + * pattern of the matrix C will be + * changed by this function, so make + * sure that the sparsity pattern is + * not used somewhere else in your + * program. This is an expensive + * operation, so think twice before you + * use this function. + */ void mmult (SparseMatrix &C, const SparseMatrix &B, const VectorBase &V = VectorBase()) const; diff --cc deal.II/include/deal.II/lac/trilinos_vector_base.h index 9fe33b8806,6e386f86bf..76bee84634 --- a/deal.II/include/deal.II/lac/trilinos_vector_base.h +++ b/deal.II/include/deal.II/lac/trilinos_vector_base.h @@@ -977,64 -193,848 +193,848 @@@ namespace TrilinosWrapper << arg2 << " through " << arg3 << " are stored locally and can be accessed."); - private: - /** - * Trilinos doesn't allow to - * mix additions to matrix - * entries and overwriting them - * (to make synchronisation of - * parallel computations - * simpler). The way we do it - * is to, for each access - * operation, store whether it - * is an insertion or an - * addition. If the previous - * one was of different type, - * then we first have to flush - * the Trilinos buffers; - * otherwise, we can simply go - * on. Luckily, Trilinos has - * an object for this which - * does already all the - * parallel communications in - * such a case, so we simply - * use their model, which - * stores whether the last - * operation was an addition or - * an insertion. - */ - Epetra_CombineMode last_action; - - /** - * A boolean variable to hold - * information on whether the - * vector is compressed or not. - */ - bool compressed; + /** + * Point to the vector we are + * referencing. + */ + VectorBase &vector; /** - * Whether this vector has ghost elements. This is true - * on all processors even if only one of them has any - * ghost elements. + * Index of the referenced element + * of the vector. */ - bool has_ghosts; - - /** - * An Epetra distibuted vector - * type. Requires an existing - * Epetra_Map for storing data. - */ - std_cxx1x::shared_ptr vector; - - - /** - * Make the reference class a - * friend. - */ - friend class internal::VectorReference; - friend class Vector; - friend class MPI::Vector; + const unsigned int index; + + /** + * Make the vector class a + * friend, so that it can + * create objects of the + * present type. + */ + friend class ::dealii::TrilinosWrappers::VectorBase; + }; + } + /** + * @endcond + */ + + + /** + * Base class for the two types of Trilinos vectors, the distributed + * memory vector MPI::Vector and a localized vector Vector. The latter + * is designed for use in either serial implementations or as a + * localized copy on each processor. The implementation of this class + * is based on the Trilinos vector class Epetra_FEVector, the (parallel) + * partitioning of which is governed by an Epetra_Map. This means that + * the vector type is generic and can be done in this base class, while + * the definition of the partition map (and hence, the constructor and + * reinit function) will have to be done in the derived classes. The + * Epetra_FEVector is precisely the kind of vector we deal with all the + * time - we probably get it from some assembly process, where also + * entries not locally owned might need to written and hence need to be + * forwarded to the owner. The only requirement for this class to work + * is that Trilinos is installed with the same compiler as is used for + * compilation of deal.II. + * + * The interface of this class is modeled after the existing Vector + * class in deal.II. It has almost the same member functions, and is + * often exchangable. However, since Trilinos only supports a single + * scalar type (double), it is not templated, and only works with that + * type. + * + * Note that Trilinos only guarantees that operations do what you expect + * if the function @p GlobalAssemble has been called after vector + * assembly in order to distribute the data. Therefore, you need to call + * Vector::compress() before you actually use the vectors. + * + * @ingroup TrilinosWrappers + * @ingroup Vectors + * @author Martin Kronbichler, 2008 + */ + class VectorBase : public Subscriptor + { + public: + /** + * Declare some of the standard + * types used in all + * containers. These types + * parallel those in the + * C standard libraries + * vector<...> class. + */ + typedef TrilinosScalar value_type; + typedef TrilinosScalar real_type; + typedef std::size_t size_type; + typedef internal::VectorReference reference; + typedef const internal::VectorReference const_reference; + + /** + * @name 1: Basic Object-handling + */ + //@{ + + /** + * Default constructor that + * generates an empty (zero size) + * vector. The function + * reinit() will have to + * give the vector the correct + * size and distribution among + * processes in case of an MPI + * run. + */ + VectorBase (); + + /** + * Copy constructor. Sets the + * dimension to that of the given + * vector, and copies all the + * elements. + */ + VectorBase (const VectorBase &v); + + /** + * Destructor + */ + virtual ~VectorBase (); + + /** + * Release all memory and return + * to a state just like after + * having called the default + * constructor. + */ + void clear (); + + /** + * Reinit functionality, sets the + * dimension and possibly the + * parallel partitioning (Epetra_Map) + * of the calling vector to the + * settings of the input vector. + */ + void reinit (const VectorBase &v, + const bool fast = false); + + /** + * Compress the underlying + * representation of the Trilinos + * object, i.e. flush the buffers + * of the vector object if it has + * any. This function is + * necessary after writing into a + * vector element-by-element and + * before anything else can be + * done on it. + * + * The (defaulted) argument can + * be used to specify the + * compress mode + * (Add or + * Insert) in case + * the vector has not been + * written to since the last + * time this function was + * called. The argument is + * ignored if the vector has + * been added or written to + * since the last time + * compress() was called. + * + * See @ref GlossCompress "Compressing distributed objects" + * for more information. + */ + void compress (::dealii::VectorOperation::values operation + =::dealii::VectorOperation::unknown); + + /** + * @deprecated + */ + void compress (const Epetra_CombineMode last_action); + + /** + * Returns the state of the + * vector, i.e., whether + * compress() has already been + * called after an operation + * requiring data exchange. + */ + bool is_compressed () const; + + /** + * Set all components of the + * vector to the given number @p + * s. Simply pass this down to + * the Trilinos Epetra object, + * but we still need to declare + * this function to make the + * example given in the + * discussion about making the + * constructor explicit work. + * + * Since the semantics of + * assigning a scalar to a vector + * are not immediately clear, + * this operator should really + * only be used if you want to + * set the entire vector to + * zero. This allows the + * intuitive notation + * v=0. Assigning other + * values is deprecated and may + * be disallowed in the future. + */ + VectorBase & + operator = (const TrilinosScalar s); + + /** + * Copy function. This function takes + * a VectorBase vector and copies all + * the elements. The target vector + * will have the same parallel + * distribution as the calling + * vector. + */ + VectorBase & + operator = (const VectorBase &v); + + /** + * Another copy function. This + * one takes a deal.II vector and + * copies it into a + * TrilinosWrapper vector. Note + * that since we do not provide + * any Epetra_map that tells + * about the partitioning of the + * vector among the MPI + * processes, the size of the + * TrilinosWrapper vector has to + * be the same as the size of the + * input vector. In order to + * change the map, use the + * reinit(const Epetra_Map + * &input_map) function. + */ + template + VectorBase & + operator = (const ::dealii::Vector &v); + + /** + * Test for equality. This + * function assumes that the + * present vector and the one to + * compare with have the same + * size already, since comparing + * vectors of different sizes + * makes not much sense anyway. + */ + bool operator == (const VectorBase &v) const; + + /** + * Test for inequality. This + * function assumes that the + * present vector and the one to + * compare with have the same + * size already, since comparing + * vectors of different sizes + * makes not much sense anyway. + */ + bool operator != (const VectorBase &v) const; + + /** + * Return the global dimension of + * the vector. + */ + unsigned int size () const; + + /** + * Return the local dimension of + * the vector, i.e. the number of + * elements stored on the present + * MPI process. For sequential + * vectors, this number is the + * same as size(), but for + * parallel vectors it may be + * smaller. + * + * To figure out which elements + * exactly are stored locally, + * use local_range(). + * + * If the vector contains ghost + * elements, they are included in + * this number. + */ + unsigned int local_size () const; + + /** + * Return a pair of indices + * indicating which elements of + * this vector are stored + * locally. The first number is + * the index of the first element + * stored, the second the index + * of the one past the last one + * that is stored locally. If + * this is a sequential vector, + * then the result will be the + * pair (0,N), otherwise it will + * be a pair (i,i+n), where + * n=local_size(). + */ + std::pair local_range () const; + + /** + * Return whether @p index is in + * the local range or not, see + * also local_range(). + */ + bool in_local_range (const unsigned int index) const; + + /** + * Return if the vector contains ghost + * elements. This answer is true if there + * are ghost elements on at least one + * process. + */ + bool has_ghost_elements() const; + + /** + * Return the scalar (inner) + * product of two vectors. The + * vectors must have the same + * size. + */ + TrilinosScalar operator * (const VectorBase &vec) const; + + /** + * Return square of the + * $l_2$-norm. + */ + real_type norm_sqr () const; + + /** + * Mean value of the elements of + * this vector. + */ + TrilinosScalar mean_value () const; + + /** + * Compute the minimal value of + * the elements of this vector. + */ + TrilinosScalar minimal_value () const; + + /** + * $l_1$-norm of the vector. The + * sum of the absolute values. + */ + real_type l1_norm () const; + + /** + * $l_2$-norm of the vector. The + * square root of the sum of the + * squares of the elements. + */ + real_type l2_norm () const; + + /** + * $l_p$-norm of the vector. The + * pth root of the sum of + * the pth powers of the + * absolute values of the + * elements. + */ + real_type lp_norm (const TrilinosScalar p) const; + + /** + * Maximum absolute value of the + * elements. + */ + real_type linfty_norm () const; + + /** + * Return whether the vector + * contains only elements with + * value zero. This function is + * mainly for internal + * consistency checks and should + * seldom be used when not in + * debug mode since it uses quite + * some time. + */ + bool all_zero () const; + + /** + * Return @p true if the vector + * has no negative entries, + * i.e. all entries are zero or + * positive. This function is + * used, for example, to check + * whether refinement indicators + * are really all positive (or + * zero). + */ + bool is_non_negative () const; + //@} + + + /** + * @name 2: Data-Access + */ + //@{ + + /** + * Provide access to a given + * element, both read and write. + */ + reference + operator () (const unsigned int index); + + /** + * Provide read-only access to an + * element. This is equivalent to + * the el() command. + */ + TrilinosScalar + operator () (const unsigned int index) const; + + /** + * Provide access to a given + * element, both read and write. + * + * Exactly the same as operator(). + */ + reference + operator [] (const unsigned int index); + + /** + * Provide read-only access to an + * element. This is equivalent to + * the el() command. + * + * Exactly the same as operator(). + */ + TrilinosScalar + operator [] (const unsigned int index) const; + + /** + * Return the value of the vector + * entry i. Note that this + * function does only work + * properly when we request a + * data stored on the local + * processor. The function will + * throw an exception in case the + * elements sits on another + * process. + */ + TrilinosScalar el (const unsigned int index) const; + + /** + * A collective set operation: + * instead of setting individual + * elements of a vector, this + * function allows to set a whole + * set of elements at once. The + * indices of the elements to be + * set are stated in the first + * argument, the corresponding + * values in the second. + */ + void set (const std::vector &indices, - const std::vector &values); ++ const std::vector &values); + + /** + * This is a second collective + * set operation. As a + * difference, this function + * takes a deal.II vector of + * values. + */ + void set (const std::vector &indices, + const ::dealii::Vector &values); + //@} + + + /** + * @name 3: Modification of vectors + */ + //@{ + + /** + * This collective set operation + * is of lower level and can + * handle anything else — + * the only thing you have to + * provide is an address where + * all the indices are stored and + * the number of elements to be + * set. + */ + void set (const unsigned int n_elements, + const unsigned int *indices, + const TrilinosScalar *values); + + /** + * A collective add operation: + * This funnction adds a whole + * set of values stored in @p + * values to the vector + * components specified by @p + * indices. + */ + void add (const std::vector &indices, + const std::vector &values); + + /** + * This is a second collective + * add operation. As a + * difference, this function + * takes a deal.II vector of + * values. + */ + void add (const std::vector &indices, + const ::dealii::Vector &values); + + /** + * Take an address where + * n_elements are stored + * contiguously and add them into + * the vector. Handles all cases + * which are not covered by the + * other two add() + * functions above. + */ + void add (const unsigned int n_elements, + const unsigned int *indices, + const TrilinosScalar *values); + + /** + * Multiply the entire vector by + * a fixed factor. + */ + VectorBase &operator *= (const TrilinosScalar factor); + + /** + * Divide the entire vector by a + * fixed factor. + */ + VectorBase &operator /= (const TrilinosScalar factor); + + /** + * Add the given vector to the + * present one. + */ + VectorBase &operator += (const VectorBase &V); + + /** + * Subtract the given vector from + * the present one. + */ + VectorBase &operator -= (const VectorBase &V); + + /** + * Addition of @p s to all + * components. Note that @p s is + * a scalar and not a vector. + */ + void add (const TrilinosScalar s); + + /** + * Simple vector addition, equal + * to the operator + * +=. + * + * Though, if the second argument + * allow_different_maps + * is set, then it is possible to + * add data from a different map. + */ + void add (const VectorBase &V, + const bool allow_different_maps = false); + + /** + * Simple addition of a multiple + * of a vector, i.e. *this = + * a*V. + */ + void add (const TrilinosScalar a, + const VectorBase &V); + + /** + * Multiple addition of scaled + * vectors, i.e. *this = a*V + + * b*W. + */ + void add (const TrilinosScalar a, + const VectorBase &V, + const TrilinosScalar b, + const VectorBase &W); + + /** + * Scaling and simple vector + * addition, i.e. *this = + * s*(*this) + V. + */ + void sadd (const TrilinosScalar s, + const VectorBase &V); + + /** + * Scaling and simple addition, + * i.e. *this = s*(*this) + + * a*V. + */ + void sadd (const TrilinosScalar s, + const TrilinosScalar a, + const VectorBase &V); + + /** + * Scaling and multiple addition. + */ + void sadd (const TrilinosScalar s, + const TrilinosScalar a, + const VectorBase &V, + const TrilinosScalar b, + const VectorBase &W); + + /** + * Scaling and multiple addition. + * *this = s*(*this) + a*V + + * b*W + c*X. + */ + void sadd (const TrilinosScalar s, + const TrilinosScalar a, + const VectorBase &V, + const TrilinosScalar b, + const VectorBase &W, + const TrilinosScalar c, + const VectorBase &X); + + /** + * Scale each element of this + * vector by the corresponding + * element in the argument. This + * function is mostly meant to + * simulate multiplication (and + * immediate re-assignment) by a + * diagonal scaling matrix. + */ + void scale (const VectorBase &scaling_factors); + + /** + * Assignment *this = + * a*V. + */ + void equ (const TrilinosScalar a, + const VectorBase &V); + + /** + * Assignment *this = a*V + + * b*W. + */ + void equ (const TrilinosScalar a, + const VectorBase &V, + const TrilinosScalar b, + const VectorBase &W); + + /** + * Compute the elementwise ratio + * of the two given vectors, that + * is let this[i] = + * a[i]/b[i]. This is useful + * for example if you want to + * compute the cellwise ratio of + * true to estimated error. + * + * This vector is appropriately + * scaled to hold the result. + * + * If any of the b[i] is + * zero, the result is + * undefined. No attempt is made + * to catch such situations. + */ + void ratio (const VectorBase &a, + const VectorBase &b); + //@} + + + /** + * @name 4: Mixed stuff + */ + //@{ + + /** + * Return a const reference to the + * underlying Trilinos + * Epetra_MultiVector class. + */ + const Epetra_MultiVector &trilinos_vector () const; + + /** + * Return a (modifyable) reference to + * the underlying Trilinos + * Epetra_FEVector class. + */ + Epetra_FEVector &trilinos_vector (); + + /** + * Return a const reference to the + * underlying Trilinos Epetra_Map + * that sets the parallel + * partitioning of the vector. + */ + const Epetra_Map &vector_partitioner () const; + + /** + * Output of vector in + * user-defined format in analogy + * to the dealii::Vector + * class. + */ + void print (const char *format = 0) const; + + /** + * Print to a stream. @p + * precision denotes the desired + * precision with which values + * shall be printed, @p + * scientific whether scientific + * notation shall be used. If @p + * across is @p true then the + * vector is printed in a line, + * while if @p false then the + * elements are printed on a + * separate line each. + */ + void print (std::ostream &out, + const unsigned int precision = 3, + const bool scientific = true, + const bool across = true) const; + + /** + * Swap the contents of this + * vector and the other vector @p + * v. One could do this operation + * with a temporary variable and + * copying over the data + * elements, but this function is + * significantly more efficient + * since it only swaps the + * pointers to the data of the + * two vectors and therefore does + * not need to allocate temporary + * storage and move data + * around. Note that the vectors + * need to be of the same size + * and base on the same map. + * + * This function is analog to the + * the @p swap function of all C + * standard containers. Also, + * there is a global function + * swap(u,v) that simply + * calls u.swap(v), + * again in analogy to standard + * functions. + */ + void swap (VectorBase &v); + + /** + * Estimate for the memory + * consumption in bytes. + */ + std::size_t memory_consumption () const; + //@} + + /** + * Exception + */ + DeclException0 (ExcGhostsPresent); + + /** + * Exception + */ + DeclException0 (ExcDifferentParallelPartitioning); + + /** + * Exception + */ + DeclException1 (ExcTrilinosError, + int, + << "An error with error number " << arg1 + << " occurred while calling a Trilinos function"); + + /** + * Exception + */ + DeclException3 (ExcAccessToNonlocalElement, + int, int, int, + << "You tried to access element " << arg1 + << " of a distributed vector, but only entries " + << arg2 << " through " << arg3 + << " are stored locally and can be accessed."); + + + private: + /** + * Trilinos doesn't allow to + * mix additions to matrix + * entries and overwriting them + * (to make synchronisation of + * parallel computations + * simpler). The way we do it + * is to, for each access + * operation, store whether it + * is an insertion or an + * addition. If the previous + * one was of different type, + * then we first have to flush + * the Trilinos buffers; + * otherwise, we can simply go + * on. Luckily, Trilinos has + * an object for this which + * does already all the + * parallel communications in + * such a case, so we simply + * use their model, which + * stores whether the last + * operation was an addition or + * an insertion. + */ + Epetra_CombineMode last_action; + + /** + * A boolean variable to hold + * information on whether the + * vector is compressed or not. + */ + bool compressed; + + /** + * Whether this vector has ghost elements. This is true + * on all processors even if only one of them has any + * ghost elements. + */ + bool has_ghosts; + + /** + * An Epetra distibuted vector + * type. Requires an existing + * Epetra_Map for storing data. + */ + std_cxx1x::shared_ptr vector; + + + /** + * Make the reference class a + * friend. + */ + friend class internal::VectorReference; + friend class Vector; + friend class MPI::Vector; }; @@@ -1312,10 -1312,10 +1312,10 @@@ inline void VectorBase::set (const std::vector &indices, - const std::vector &values) + const std::vector &values) { - // if we have ghost values, do not allow - // writing to this vector at all. + // if we have ghost values, do not allow + // writing to this vector at all. Assert (!has_ghost_elements(), ExcGhostsPresent()); Assert (indices.size() == values.size(), @@@ -1381,10 -1381,10 +1381,10 @@@ inline void VectorBase::add (const std::vector &indices, - const std::vector &values) + const std::vector &values) { - // if we have ghost values, do not allow - // writing to this vector at all. + // if we have ghost values, do not allow + // writing to this vector at all. Assert (!has_ghost_elements(), ExcGhostsPresent()); Assert (indices.size() == values.size(), ExcDimensionMismatch(indices.size(),values.size())); diff --cc deal.II/include/deal.II/lac/vector.h index 7135bd0cd0,fccc27c794..f4a65a839a --- a/deal.II/include/deal.II/lac/vector.h +++ b/deal.II/include/deal.II/lac/vector.h @@@ -463,596 -463,596 +463,596 @@@ public #ifdef DEAL_II_USE_TRILINOS - /** - * Another copy operator: copy - * the values from a (sequential - * or parallel, depending on the - * underlying compiler) Trilinos - * wrapper vector class. This - * operator is only available if - * Trilinos was detected during - * configuration time. - * - * Note that due to the - * communication model used in MPI, - * this operation can only succeed - * if all processes do it at the - * same time. I.e., it is not - * possible for only one process to - * obtain a copy of a parallel - * vector while the other jobs do - * something else. - */ - Vector & - operator = (const TrilinosWrappers::MPI::Vector &v); - - /** - * Another copy operator: copy the - * values from a sequential - * Trilinos wrapper vector - * class. This operator is only - * available if Trilinos was - * detected during configuration - * time. - */ - Vector & - operator = (const TrilinosWrappers::Vector &v); + /** + * Another copy operator: copy + * the values from a (sequential + * or parallel, depending on the + * underlying compiler) Trilinos + * wrapper vector class. This + * operator is only available if + * Trilinos was detected during + * configuration time. + * + * Note that due to the + * communication model used in MPI, + * this operation can only succeed + * if all processes do it at the + * same time. I.e., it is not + * possible for only one process to + * obtain a copy of a parallel + * vector while the other jobs do + * something else. + */ + Vector & + operator = (const TrilinosWrappers::MPI::Vector &v); + + /** + * Another copy operator: copy the + * values from a sequential + * Trilinos wrapper vector + * class. This operator is only + * available if Trilinos was + * detected during configuration + * time. + */ + Vector & + operator = (const TrilinosWrappers::Vector &v); #endif - /** - * Test for equality. This function - * assumes that the present vector - * and the one to compare with have - * the same size already, since - * comparing vectors of different - * sizes makes not much sense - * anyway. - */ - template - bool operator == (const Vector &v) const; - - /** - * Test for inequality. This function - * assumes that the present vector and - * the one to compare with have the same - * size already, since comparing vectors - * of different sizes makes not much - * sense anyway. - */ - template - bool operator != (const Vector &v) const; - - /** - * Return the scalar product of - * two vectors. The return type - * is the underlying type of - * @p this vector, so the return - * type and the accuracy with - * which it the result is - * computed depend on the order - * of the arguments of this - * vector. - * - * For complex vectors, the - * scalar product is implemented - * as $\left=\sum_i - * v_i \bar{w_i}$. - */ - template - Number operator * (const Vector &V) const; - - /** - * Return square of the $l_2$-norm. - */ - real_type norm_sqr () const; - - /** - * Mean value of the elements of - * this vector. - */ - Number mean_value () const; - - /** - * $l_1$-norm of the vector. - * The sum of the absolute values. - */ - real_type l1_norm () const; - - /** - * $l_2$-norm of the vector. The - * square root of the sum of the - * squares of the elements. - */ - real_type l2_norm () const; - - /** - * $l_p$-norm of the vector. The - * pth root of the sum of the pth - * powers of the absolute values - * of the elements. - */ - real_type lp_norm (const real_type p) const; - - /** - * Maximum absolute value of the - * elements. - */ - real_type linfty_norm () const; - - /** - * Return dimension of the vector. - */ - unsigned int size () const; - - /** - * Return whether the vector contains only - * elements with value zero. This function - * is mainly for internal consistency - * checks and should seldom be used when - * not in debug mode since it uses quite - * some time. - */ - bool all_zero () const; - - /** - * Return @p true if the vector has no - * negative entries, i.e. all entries are - * zero or positive. This function is - * used, for example, to check whether - * refinement indicators are really all - * positive (or zero). - * - * The function obviously only makes - * sense if the template argument of this - * class is a real type. If it is a - * complex type, then an exception is - * thrown. - */ - bool is_non_negative () const; - - /** - * Make the @p Vector class a bit like - * the vector<> class of the C++ - * standard library by returning - * iterators to the start and end of the - * elements of this vector. - */ - iterator begin (); - - /** - * Return constant iterator to the start of - * the vectors. - */ - const_iterator begin () const; - - /** - * Return an iterator pointing to the - * element past the end of the array. - */ - iterator end (); - - /** - * Return a constant iterator pointing to - * the element past the end of the array. - */ - const_iterator end () const; - //@} - - - /** - * @name 2: Data-Access - */ - //@{ - /** - * Access the value of the @p ith - * component. - */ - Number operator() (const unsigned int i) const; - - /** - * Access the @p ith component - * as a writeable reference. - */ - Number& operator() (const unsigned int i); - - /** - * Access the value of the @p ith - * component. - * - * Exactly the same as operator(). - */ - Number operator[] (const unsigned int i) const; - - /** - * Access the @p ith component - * as a writeable reference. - * - * Exactly the same as operator(). - */ - Number& operator[] (const unsigned int i); - //@} - - - /** - * @name 3: Modification of vectors - */ - //@{ - - /** - * Add the given vector to the present - * one. - */ - Vector & operator += (const Vector &V); - - /** - * Subtract the given vector from the - * present one. - */ - Vector & operator -= (const Vector &V); - - /** - * A collective add operation: - * This funnction adds a whole - * set of values stored in @p - * values to the vector - * components specified by @p - * indices. - */ - template - void add (const std::vector &indices, - const std::vector &values); - - /** - * This is a second collective - * add operation. As a - * difference, this function - * takes a deal.II vector of - * values. - */ - template - void add (const std::vector &indices, - const Vector &values); - - /** - * Take an address where - * n_elements are stored - * contiguously and add them into - * the vector. Handles all cases - * which are not covered by the - * other two add() - * functions above. - */ - template - void add (const unsigned int n_elements, - const unsigned int *indices, - const OtherNumber *values); - - /** - * Addition of @p s to all - * components. Note that @p s is a - * scalar and not a vector. - */ - void add (const Number s); - - /** - * Simple vector addition, equal to the - * operator +=. - */ - void add (const Vector &V); - - /** - * Simple addition of a multiple of a - * vector, i.e. *this += a*V. - */ - void add (const Number a, const Vector &V); - - /** - * Multiple addition of scaled vectors, - * i.e. *this += a*V+b*W. - */ - void add (const Number a, const Vector &V, - const Number b, const Vector &W); - - /** - * Scaling and simple vector addition, - * i.e. - * *this = s*(*this)+V. - */ - void sadd (const Number s, - const Vector &V); - - /** - * Scaling and simple addition, i.e. - * *this = s*(*this)+a*V. - */ - void sadd (const Number s, - const Number a, - const Vector &V); - - /** - * Scaling and multiple addition. - */ - void sadd (const Number s, - const Number a, - const Vector &V, - const Number b, - const Vector &W); - - /** - * Scaling and multiple addition. - * *this = s*(*this)+a*V + b*W + c*X. - */ - void sadd (const Number s, - const Number a, - const Vector &V, - const Number b, - const Vector &W, - const Number c, - const Vector &X); - - /** - * Scale each element of the - * vector by the given factor. - * - * This function is deprecated - * and will be removed in a - * future version. Use - * operator *= and - * operator /= instead. - */ - void scale (const Number factor); - - - /** - * Scale each element of the - * vector by a constant - * value. - */ - Vector & operator *= (const Number factor); - - /** - * Scale each element of the - * vector by the inverse of the - * given value. - */ - Vector & operator /= (const Number factor); - - /** - * Scale each element of this - * vector by the corresponding - * element in the argument. This - * function is mostly meant to - * simulate multiplication (and - * immediate re-assignment) by a - * diagonal scaling matrix. - */ - void scale (const Vector &scaling_factors); - - /** - * Scale each element of this - * vector by the corresponding - * element in the argument. This - * function is mostly meant to - * simulate multiplication (and - * immediate re-assignment) by a - * diagonal scaling matrix. - */ - template - void scale (const Vector &scaling_factors); - - /** - * Assignment *this = a*u. - */ - void equ (const Number a, const Vector& u); - - /** - * Assignment *this = a*u. - */ - template - void equ (const Number a, const Vector& u); - - /** - * Assignment *this = a*u + b*v. - */ - void equ (const Number a, const Vector& u, - const Number b, const Vector& v); - - /** - * Assignment *this = a*u + b*v + b*w. - */ - void equ (const Number a, const Vector& u, - const Number b, const Vector& v, - const Number c, const Vector& w); - - /** - * Compute the elementwise ratio of the - * two given vectors, that is let - * this[i] = a[i]/b[i]. This is - * useful for example if you want to - * compute the cellwise ratio of true to - * estimated error. - * - * This vector is appropriately - * scaled to hold the result. - * - * If any of the b[i] is - * zero, the result is - * undefined. No attempt is made - * to catch such situations. - */ - void ratio (const Vector &a, - const Vector &b); - - /** - * This function does nothing but is - * there for compatibility with the - * @p PETScWrappers::Vector class. - * - * For the PETSc vector wrapper class, - * this function updates the ghost - * values of the PETSc vector. This - * is necessary after any modification - * before reading ghost values. - * - * However, for the implementation of - * this class, it is immaterial and thus - * an empty function. - */ - void update_ghost_values () const; - //@} - - - /** - * @name 4: Mixed stuff - */ - //@{ - /** - * Output of vector in user-defined - * format. For complex-valued vectors, - * the format should include specifiers - * for both the real and imaginary - * parts. - */ - void print (const char* format = 0) const; - - /** - * Print to a - * stream. @p precision denotes - * the desired precision with - * which values shall be printed, - * @p scientific whether - * scientific notation shall be - * used. If @p across is - * @p true then the vector is - * printed in a line, while if - * @p false then the elements - * are printed on a separate line - * each. - */ - void print (std::ostream& out, - const unsigned int precision = 3, - const bool scientific = true, - const bool across = true) const; - - /** - * Print to a - * LogStream. width is - * used as argument to the - * std::setw manipulator, if - * printing across. If @p - * across is @p true then the - * vector is printed in a line, - * while if @p false then the - * elements are printed on a - * separate line each. - */ - void print (LogStream& out, - const unsigned int width = 6, - const bool across = true) const; - - /** - * Write the vector en bloc to a - * file. This is done in a binary - * mode, so the output is neither - * readable by humans nor - * (probably) by other computers - * using a different operating - * system or number format. - */ - void block_write (std::ostream &out) const; - - /** - * Read a vector en block from a - * file. This is done using the - * inverse operations to the - * above function, so it is - * reasonably fast because the - * bitstream is not interpreted. - * - * The vector is resized if - * necessary. - * - * A primitive form of error - * checking is performed which - * will recognize the bluntest - * attempts to interpret some - * data as a vector stored - * bitwise to a file, but not - * more. - */ - void block_read (std::istream &in); - - /** - * Determine an estimate for the - * memory consumption (in bytes) - * of this object. - */ - std::size_t memory_consumption () const; - //@} - - /** - * Write the data of this object to - * a stream for the purpose of serialization. - */ - template - void save (Archive & ar, const unsigned int version) const; - - /** - * Read the data of this object - * from a stream for the purpose of serialization. - */ - template - void load (Archive & ar, const unsigned int version); - - BOOST_SERIALIZATION_SPLIT_MEMBER() - - protected: - - /** - * Dimension. Actual number of - * components contained in the - * vector. Get this number by - * calling size(). - */ - unsigned int vec_size; - - /** - * Amount of memory actually - * reserved for this vector. This - * number may be greater than - * @p vec_size if a @p reinit was - * called with less memory - * requirements than the vector - * needed last time. At present - * @p reinit does not free - * memory when the number of - * needed elements is reduced. - */ - unsigned int max_vec_size; - - /** - * Pointer to the array of - * elements of this vector. - */ - Number *val; - - /** - * Make all other vector types - * friends. - */ - template friend class Vector; - - /** - * LAPACK matrices need access to - * the data. - */ - friend class LAPACKFullMatrix; - - /** - * VectorView will access the - * pointer. - */ - friend class VectorView; + /** + * Test for equality. This function + * assumes that the present vector + * and the one to compare with have + * the same size already, since + * comparing vectors of different + * sizes makes not much sense + * anyway. + */ + template + bool operator == (const Vector &v) const; + + /** + * Test for inequality. This function + * assumes that the present vector and + * the one to compare with have the same + * size already, since comparing vectors + * of different sizes makes not much + * sense anyway. + */ + template + bool operator != (const Vector &v) const; + + /** + * Return the scalar product of + * two vectors. The return type + * is the underlying type of + * @p this vector, so the return + * type and the accuracy with + * which it the result is + * computed depend on the order + * of the arguments of this + * vector. + * + * For complex vectors, the + * scalar product is implemented + * as $\left=\sum_i + * v_i \bar{w_i}$. + */ + template + Number operator * (const Vector &V) const; + + /** + * Return square of the $l_2$-norm. + */ + real_type norm_sqr () const; + + /** + * Mean value of the elements of + * this vector. + */ + Number mean_value () const; + + /** + * $l_1$-norm of the vector. + * The sum of the absolute values. + */ + real_type l1_norm () const; + + /** + * $l_2$-norm of the vector. The + * square root of the sum of the + * squares of the elements. + */ + real_type l2_norm () const; + + /** + * $l_p$-norm of the vector. The + * pth root of the sum of the pth + * powers of the absolute values + * of the elements. + */ + real_type lp_norm (const real_type p) const; + + /** + * Maximum absolute value of the + * elements. + */ + real_type linfty_norm () const; + + /** + * Return dimension of the vector. + */ + unsigned int size () const; + + /** + * Return whether the vector contains only + * elements with value zero. This function + * is mainly for internal consistency + * checks and should seldom be used when + * not in debug mode since it uses quite + * some time. + */ + bool all_zero () const; + + /** + * Return @p true if the vector has no + * negative entries, i.e. all entries are + * zero or positive. This function is + * used, for example, to check whether + * refinement indicators are really all + * positive (or zero). + * + * The function obviously only makes + * sense if the template argument of this + * class is a real type. If it is a + * complex type, then an exception is + * thrown. + */ + bool is_non_negative () const; + + /** + * Make the @p Vector class a bit like + * the vector<> class of the C++ + * standard library by returning + * iterators to the start and end of the + * elements of this vector. + */ + iterator begin (); + + /** + * Return constant iterator to the start of + * the vectors. + */ + const_iterator begin () const; + + /** + * Return an iterator pointing to the + * element past the end of the array. + */ + iterator end (); + + /** + * Return a constant iterator pointing to + * the element past the end of the array. + */ + const_iterator end () const; + //@} + + + /** + * @name 2: Data-Access + */ + //@{ + /** + * Access the value of the @p ith + * component. + */ + Number operator() (const unsigned int i) const; + + /** + * Access the @p ith component + * as a writeable reference. + */ + Number &operator() (const unsigned int i); + + /** + * Access the value of the @p ith + * component. + * + * Exactly the same as operator(). + */ + Number operator[] (const unsigned int i) const; + + /** + * Access the @p ith component + * as a writeable reference. + * + * Exactly the same as operator(). + */ + Number &operator[] (const unsigned int i); + //@} + + + /** + * @name 3: Modification of vectors + */ + //@{ + + /** + * Add the given vector to the present + * one. + */ + Vector &operator += (const Vector &V); + + /** + * Subtract the given vector from the + * present one. + */ + Vector &operator -= (const Vector &V); + + /** + * A collective add operation: + * This funnction adds a whole + * set of values stored in @p + * values to the vector + * components specified by @p + * indices. + */ + template + void add (const std::vector &indices, - const std::vector &values); ++ const std::vector &values); + + /** + * This is a second collective + * add operation. As a + * difference, this function + * takes a deal.II vector of + * values. + */ + template + void add (const std::vector &indices, + const Vector &values); + + /** + * Take an address where + * n_elements are stored + * contiguously and add them into + * the vector. Handles all cases + * which are not covered by the + * other two add() + * functions above. + */ + template + void add (const unsigned int n_elements, + const unsigned int *indices, - const OtherNumber *values); ++ const OtherNumber *values); + + /** + * Addition of @p s to all + * components. Note that @p s is a + * scalar and not a vector. + */ + void add (const Number s); + + /** + * Simple vector addition, equal to the + * operator +=. + */ + void add (const Vector &V); + + /** + * Simple addition of a multiple of a + * vector, i.e. *this += a*V. + */ + void add (const Number a, const Vector &V); + + /** + * Multiple addition of scaled vectors, + * i.e. *this += a*V+b*W. + */ + void add (const Number a, const Vector &V, + const Number b, const Vector &W); + + /** + * Scaling and simple vector addition, + * i.e. + * *this = s*(*this)+V. + */ + void sadd (const Number s, + const Vector &V); + + /** + * Scaling and simple addition, i.e. + * *this = s*(*this)+a*V. + */ + void sadd (const Number s, + const Number a, + const Vector &V); + + /** + * Scaling and multiple addition. + */ + void sadd (const Number s, + const Number a, + const Vector &V, + const Number b, + const Vector &W); + + /** + * Scaling and multiple addition. + * *this = s*(*this)+a*V + b*W + c*X. + */ + void sadd (const Number s, + const Number a, + const Vector &V, + const Number b, + const Vector &W, + const Number c, + const Vector &X); + + /** + * Scale each element of the + * vector by the given factor. + * + * This function is deprecated + * and will be removed in a + * future version. Use + * operator *= and + * operator /= instead. + */ + void scale (const Number factor); + + + /** + * Scale each element of the + * vector by a constant + * value. + */ + Vector &operator *= (const Number factor); + + /** + * Scale each element of the + * vector by the inverse of the + * given value. + */ + Vector &operator /= (const Number factor); + + /** + * Scale each element of this + * vector by the corresponding + * element in the argument. This + * function is mostly meant to + * simulate multiplication (and + * immediate re-assignment) by a + * diagonal scaling matrix. + */ + void scale (const Vector &scaling_factors); + + /** + * Scale each element of this + * vector by the corresponding + * element in the argument. This + * function is mostly meant to + * simulate multiplication (and + * immediate re-assignment) by a + * diagonal scaling matrix. + */ + template + void scale (const Vector &scaling_factors); + + /** + * Assignment *this = a*u. + */ + void equ (const Number a, const Vector &u); + + /** + * Assignment *this = a*u. + */ + template + void equ (const Number a, const Vector &u); + + /** + * Assignment *this = a*u + b*v. + */ + void equ (const Number a, const Vector &u, + const Number b, const Vector &v); + + /** + * Assignment *this = a*u + b*v + b*w. + */ + void equ (const Number a, const Vector &u, + const Number b, const Vector &v, + const Number c, const Vector &w); + + /** + * Compute the elementwise ratio of the + * two given vectors, that is let + * this[i] = a[i]/b[i]. This is + * useful for example if you want to + * compute the cellwise ratio of true to + * estimated error. + * + * This vector is appropriately + * scaled to hold the result. + * + * If any of the b[i] is + * zero, the result is + * undefined. No attempt is made + * to catch such situations. + */ + void ratio (const Vector &a, + const Vector &b); + + /** + * This function does nothing but is + * there for compatibility with the + * @p PETScWrappers::Vector class. + * + * For the PETSc vector wrapper class, + * this function updates the ghost + * values of the PETSc vector. This + * is necessary after any modification + * before reading ghost values. + * + * However, for the implementation of + * this class, it is immaterial and thus + * an empty function. + */ + void update_ghost_values () const; + //@} + + + /** + * @name 4: Mixed stuff + */ + //@{ + /** + * Output of vector in user-defined + * format. For complex-valued vectors, + * the format should include specifiers + * for both the real and imaginary + * parts. + */ + void print (const char *format = 0) const; + + /** + * Print to a + * stream. @p precision denotes + * the desired precision with + * which values shall be printed, + * @p scientific whether + * scientific notation shall be + * used. If @p across is + * @p true then the vector is + * printed in a line, while if + * @p false then the elements + * are printed on a separate line + * each. + */ + void print (std::ostream &out, + const unsigned int precision = 3, + const bool scientific = true, + const bool across = true) const; + + /** + * Print to a + * LogStream. width is + * used as argument to the + * std::setw manipulator, if + * printing across. If @p + * across is @p true then the + * vector is printed in a line, + * while if @p false then the + * elements are printed on a + * separate line each. + */ + void print (LogStream &out, + const unsigned int width = 6, + const bool across = true) const; + + /** + * Write the vector en bloc to a + * file. This is done in a binary + * mode, so the output is neither + * readable by humans nor + * (probably) by other computers + * using a different operating + * system or number format. + */ + void block_write (std::ostream &out) const; + + /** + * Read a vector en block from a + * file. This is done using the + * inverse operations to the + * above function, so it is + * reasonably fast because the + * bitstream is not interpreted. + * + * The vector is resized if + * necessary. + * + * A primitive form of error + * checking is performed which + * will recognize the bluntest + * attempts to interpret some + * data as a vector stored + * bitwise to a file, but not + * more. + */ + void block_read (std::istream &in); + + /** + * Determine an estimate for the + * memory consumption (in bytes) + * of this object. + */ + std::size_t memory_consumption () const; + //@} + + /** + * Write the data of this object to + * a stream for the purpose of serialization. + */ + template + void save (Archive &ar, const unsigned int version) const; + + /** + * Read the data of this object + * from a stream for the purpose of serialization. + */ + template + void load (Archive &ar, const unsigned int version); + + BOOST_SERIALIZATION_SPLIT_MEMBER() + + protected: + + /** + * Dimension. Actual number of + * components contained in the + * vector. Get this number by + * calling size(). + */ + unsigned int vec_size; + + /** + * Amount of memory actually + * reserved for this vector. This + * number may be greater than + * @p vec_size if a @p reinit was + * called with less memory + * requirements than the vector + * needed last time. At present + * @p reinit does not free + * memory when the number of + * needed elements is reduced. + */ + unsigned int max_vec_size; + + /** + * Pointer to the array of + * elements of this vector. + */ + Number *val; + + /** + * Make all other vector types + * friends. + */ + template friend class Vector; + + /** + * LAPACK matrices need access to + * the data. + */ + friend class LAPACKFullMatrix; + + /** + * VectorView will access the + * pointer. + */ + friend class VectorView; }; /*@}*/ diff --cc deal.II/include/deal.II/lac/vector.templates.h index a40967a664,60fb6cac10..8d7daf85b1 --- a/deal.II/include/deal.II/lac/vector.templates.h +++ b/deal.II/include/deal.II/lac/vector.templates.h @@@ -498,7 -498,7 +498,7 @@@ namespace interna struct InnerProd { Number - operator() (const Number*&X, const Number2*&Y, const Number &) const - operator() (const Number *&X, const Number2 *&Y, const Number &) const ++ operator() (const Number *&X, const Number2 *&Y, const Number &) const { return *X++ * Number(numbers::NumberTraits::conjugate(*Y++)); } @@@ -508,7 -508,7 +508,7 @@@ struct Norm2 { RealType - operator() (const Number*&X, const Number* &, const RealType &) const - operator() (const Number *&X, const Number *&, const RealType &) const ++ operator() (const Number *&X, const Number *&, const RealType &) const { return numbers::NumberTraits::abs_square(*X++); } @@@ -518,7 -518,7 +518,7 @@@ struct Norm1 { RealType - operator() (const Number*&X, const Number* &, const RealType &) const - operator() (const Number *&X, const Number *&, const RealType &) const ++ operator() (const Number *&X, const Number *&, const RealType &) const { return numbers::NumberTraits::abs(*X++); } @@@ -528,7 -528,7 +528,7 @@@ struct NormP { RealType - operator() (const Number*&X, const Number* &, const RealType &p) const - operator() (const Number *&X, const Number *&, const RealType &p) const ++ operator() (const Number *&X, const Number *&, const RealType &p) const { return std::pow(numbers::NumberTraits::abs(*X++), p); } @@@ -538,7 -538,7 +538,7 @@@ struct MeanValue { Number - operator() (const Number*&X, const Number* &, const Number &) const - operator() (const Number *&X, const Number *&, const Number &) const ++ operator() (const Number *&X, const Number *&, const Number &) const { return *X++; } diff --cc deal.II/include/deal.II/matrix_free/fe_evaluation.h index 6da19f1c8b,4d81957bdc..0cafb452cb --- a/deal.II/include/deal.II/matrix_free/fe_evaluation.h +++ b/deal.II/include/deal.II/matrix_free/fe_evaluation.h @@@ -2706,7 -2706,7 +2706,7 @@@ template - ::distribute_local_to_global (std::vector &dst, -::distribute_local_to_global (std::vector &dst, ++::distribute_local_to_global (std::vector &dst, const unsigned int first_index) const { AssertIndexRange (first_index, dst.size()); @@@ -2786,7 -2786,7 +2786,7 @@@ template - ::set_dof_values (std::vector &dst, -::set_dof_values (std::vector &dst, ++::set_dof_values (std::vector &dst, const unsigned int first_index) const { AssertIndexRange (first_index, dst.size()); diff --cc deal.II/include/deal.II/matrix_free/mapping_info.h index d8913efc2b,6a46b630ff..bfd47e83c0 --- a/deal.II/include/deal.II/matrix_free/mapping_info.h +++ b/deal.II/include/deal.II/matrix_free/mapping_info.h @@@ -30,294 -30,94 +30,94 @@@ DEAL_II_NAMESPACE_OPE namespace internal { - namespace MatrixFreeFunctions - { - /** - * The class that stores all geometry-dependent data related with cell - * interiors for use in the matrix-free class. - * - * @author Katharina Kormann and Martin Kronbichler, 2010, 2011 - */ - template - struct MappingInfo + namespace MatrixFreeFunctions { - /** - * Determines how many bits of an unsigned int - * are used to distinguish the cell types - * (Cartesian, with constant Jacobian, or - * general) - */ - static const std::size_t n_cell_type_bits = 2; - - /** - * Determines how many types of different - * cells can be detected at most. Corresponds - * to the number of bits we reserved for it. - */ - static const unsigned int n_cell_types = 1U< &tria, - const std::vector > &cells, - const std::vector &active_fe_index, - const Mapping &mapping, - const std::vector > &quad, - const UpdateFlags update_flags); - - /** - * Helper function to determine which update - * flags must be set in the internal functions - * to initialize all data as requested by the - * user. - */ - UpdateFlags - compute_update_flags (const UpdateFlags update_flags, - const std::vector > &quad) const; - - /** - * Returns the type of a given cell as - * detected during initialization. - */ - CellType get_cell_type (const unsigned int cell_chunk_no) const; - - /** - * Returns the type of a given cell as - * detected during initialization. - */ - unsigned int get_cell_data_index (const unsigned int cell_chunk_no) const; - - /** - * Clears all data fields in this class. - */ - void clear (); - - /** - * Returns the memory consumption of this - * class in bytes. - */ - std::size_t memory_consumption() const; - - /** - * Prints a detailed summary of memory - * consumption in the different structures of - * this class to the given output stream. - */ - template - void print_memory_consumption(STREAM &out, - const SizeInfo &size_info) const; - - /** - * Stores whether a cell is Cartesian, has - * constant transform data (Jacobians) or is - * general. cell_type % 4 gives this - * information (0: Cartesian, 1: constant - * Jacobian throughout cell, 2: general cell), - * and cell_type / 4 gives the index in the - * data field of where to find the information - * in the fields Jacobian and JxW values - * (except for quadrature points, for which - * the index runs as usual). - */ - std::vector cell_type; - - /** - * The first field stores the inverse Jacobian - * for Cartesian cells: There, it is a - * diagonal rank-2 tensor, so we actually just - * store a rank-1 tensor. It is the same on - * all cells, therefore we only store it once - * per cell, and use similarities from one - * cell to another, too (on structured meshes, - * there are usually many cells with the same - * Jacobian). - * - * The second field stores the Jacobian - * determinant for Cartesian cells (without - * the quadrature weight, which depends on the - * quadrature point, whereas the determinant - * is the same on each quadrature point). - */ - AlignedVector >, - VectorizedArray > > cartesian_data; - - /** - * The first field stores the Jacobian for - * non-Cartesian cells where all the Jacobians - * on the cell are the same (i.e., constant, - * which comes from a linear transformation - * from unit to real cell). Also use - * similarities from one cell to another (on - * structured meshes, there are usually many - * cells with the same Jacobian). - * - * The second field stores the Jacobian - * determinant for non-Cartesian cells with - * constant Jacobian throughout the cell - * (without the quadrature weight, which - * depends on the quadrature point, whereas - * the determinant is the same on each - * quadrature point). - */ - AlignedVector >, - VectorizedArray > > affine_data; - - /** - * Definition of a structure that stores data - * that depends on the quadrature formula (if - * we have more than one quadrature formula on - * a given problem, these fields will be - * different) - */ - struct MappingInfoDependent + /** + * The class that stores all geometry-dependent data related with cell + * interiors for use in the matrix-free class. + * + * @author Katharina Kormann and Martin Kronbichler, 2010, 2011 + */ + template + struct MappingInfo { - /** - * This field stores the row starts for the - * inverse Jacobian transformations, - * quadrature weights and second derivatives. - */ - std::vector rowstart_jacobians; - - /** - * This field stores the inverse Jacobian - * transformation from unit to real cell, - * which is needed for most gradient - * transformations (corresponds to - * FEValues::inverse_jacobian) for general - * cells. - */ - AlignedVector > > jacobians; - - /** - * This field stores the Jacobian - * determinant times the quadrature weights - * (JxW in deal.II speak) for general cells. - */ - AlignedVector > JxW_values; - - /** - * Stores the diagonal part of the gradient of - * the inverse Jacobian transformation. The - * first index runs over the derivatives - * $\partial^2/\partial x_i^2$, the second - * over the space coordinate. Needed for - * computing the Laplacian of FE functions on - * the real cell. Uses a separate storage from - * the off-diagonal part $\partial^2/\partial - * x_i \partial x_j, i\neq j$ because that is - * only needed for computing a full Hessian. - */ - AlignedVector > > jacobians_grad_diag; - - /** - * Stores the off-diagonal part of the - * gradient of the inverse Jacobian - * transformation. Because of symmetry, only - * the upper diagonal part is needed. The - * first index runs through the derivatives - * row-wise, i.e., $\partial^2/\partial x_1 - * \partial x_2$ first, then - * $\partial^2/\partial x_1 \partial x_3$, and - * so on. The second index is the spatial - * coordinate. Not filled currently. - */ - AlignedVector1?dim*(dim-1)/2:1), - Tensor<1,dim,VectorizedArray > > > jacobians_grad_upper; - - /** - * Stores the row start for quadrature points - * in real coordinates for both types of - * cells. Note that Cartesian cells will have - * shorter fields (length is @p n_q_points_1d) - * than non-Cartesian cells (length is @p - * n_q_points). - */ - std::vector rowstart_q_points; - - /** - * Stores the quadrature points in real - * coordinates for Cartesian cells (does not - * need to store the full data on all points) - */ - AlignedVector > > quadrature_points; - - /** - * The dim-dimensional quadrature formula - * underlying the problem (constructed from a - * 1D tensor product quadrature formula). - */ - dealii::hp::QCollection quadrature; - - /** - * The (dim-1)-dimensional quadrature formula - * corresponding to face evaluation - * (constructed from a 1D tensor product - * quadrature formula). - */ - dealii::hp::QCollection face_quadrature; - - /** - * The number of quadrature points for the - * current quadrature formula. - */ - std::vector n_q_points; - - /** - * The number of quadrature points for the - * current quadrature formula when applied to - * a face. Only set if the quadrature formula - * is derived from a tensor product, since it - * is not defined from the full quadrature - * formula otherwise. - */ - std::vector n_q_points_face; - - /** - * The quadrature weights (vectorized data - * format) on the unit cell. - */ - std::vector > > quadrature_weights; - - /** - * This variable stores the number of - * quadrature points for all quadrature - * indices in the underlying element for - * easier access to data in the hp case. - */ - std::vector quad_index_conversion; - - /** - * Returns the quadrature index for a given - * number of quadrature points. If not in hp - * mode or if the index is not found, this - * function always returns index 0. Hence, - * this function does not check whether the - * given degree is actually present. - */ - unsigned int - quad_index_from_n_q_points (const unsigned int n_q_points) const; - - - /** - * Prints a detailed summary of memory - * consumption in the different structures of - * this class to the given output stream. - */ + /** + * Determines how many bits of an unsigned int + * are used to distinguish the cell types + * (Cartesian, with constant Jacobian, or + * general) + */ + static const std::size_t n_cell_type_bits = 2; + + /** + * Determines how many types of different + * cells can be detected at most. Corresponds + * to the number of bits we reserved for it. + */ + static const unsigned int n_cell_types = 1U< &tria, + const std::vector > &cells, + const std::vector &active_fe_index, + const Mapping &mapping, - const std::vector > &quad, ++ const std::vector > &quad, + const UpdateFlags update_flags); + + /** + * Helper function to determine which update + * flags must be set in the internal functions + * to initialize all data as requested by the + * user. + */ + UpdateFlags + compute_update_flags (const UpdateFlags update_flags, - const std::vector > &quad) const; ++ const std::vector > &quad) const; + + /** + * Returns the type of a given cell as + * detected during initialization. + */ + CellType get_cell_type (const unsigned int cell_chunk_no) const; + + /** + * Returns the type of a given cell as + * detected during initialization. + */ + unsigned int get_cell_data_index (const unsigned int cell_chunk_no) const; + + /** + * Clears all data fields in this class. + */ + void clear (); + + /** + * Returns the memory consumption of this + * class in bytes. + */ + std::size_t memory_consumption() const; + + /** + * Prints a detailed summary of memory + * consumption in the different structures of + * this class to the given output stream. + */ template void print_memory_consumption(STREAM &out, const SizeInfo &size_info) const; diff --cc deal.II/include/deal.II/matrix_free/matrix_free.h index bf75c4a7b8,8351f2e8dc..6caf13fd32 --- a/deal.II/include/deal.II/matrix_free/matrix_free.h +++ b/deal.II/include/deal.II/matrix_free/matrix_free.h @@@ -1920,8 -1920,8 +1920,8 @@@ template void MatrixFree:: reinit(const Mapping &mapping, - const std::vector &dof_handler, - const std::vector &constraint, - const std::vector &dof_handler, ++ const std::vector &dof_handler, + const std::vector &constraint, const std::vector &quad, const MatrixFree::AdditionalData additional_data) { @@@ -1988,9 -1988,9 +1988,9 @@@ namespace interna template inline - void update_ghost_values_start (const std::vector &src) + void update_ghost_values_start (const std::vector &src) { - for(unsigned int comp=0;comp inline - void update_ghost_values_start (const std::vector &src) - void update_ghost_values_start (const std::vector &src) ++ void update_ghost_values_start (const std::vector &src) { - for(unsigned int comp=0;comp inline - void update_ghost_values_finish (const std::vector &src) + void update_ghost_values_finish (const std::vector &src) { - for(unsigned int comp=0;comp inline - void update_ghost_values_finish (const std::vector &src) - void update_ghost_values_finish (const std::vector &src) ++ void update_ghost_values_finish (const std::vector &src) { - for(unsigned int comp=0;comp::cell_loop (const std_cxx1x::function &, OutVector &, - const InVector&, + const InVector &, const std::pair &)> &cell_operation, + unsigned int> &)> &cell_operation, OutVector &dst, - const InVector &src) const + const InVector &src) const { #if DEAL_II_USE_MT==1 diff --cc deal.II/include/deal.II/multigrid/mg_coarse.h index a50182ae3c,8cfc4eb7d2..fc31e5e29e --- a/deal.II/include/deal.II/multigrid/mg_coarse.h +++ b/deal.II/include/deal.II/multigrid/mg_coarse.h @@@ -207,11 -207,11 +207,11 @@@ MGCoarseGridLACIteration template MGCoarseGridLACIteration - ::MGCoarseGridLACIteration(SOLVER& s, + ::MGCoarseGridLACIteration(SOLVER &s, - const MATRIX &m, + const MATRIX &m, const PRECOND &p) - : - solver(&s, typeid(*this).name()) + : + solver(&s, typeid(*this).name()) { matrix = new PointerMatrix(&m); precondition = new PointerMatrix(&p); @@@ -230,8 -230,8 +230,8 @@@ template void MGCoarseGridLACIteration - ::initialize(SOLVER& s, + ::initialize(SOLVER &s, - const MATRIX &m, + const MATRIX &m, const PRECOND &p) { solver = &s; diff --cc deal.II/include/deal.II/multigrid/mg_constrained_dofs.h index 5b90025982,29fcf76ca1..19e30b8957 --- a/deal.II/include/deal.II/multigrid/mg_constrained_dofs.h +++ b/deal.II/include/deal.II/multigrid/mg_constrained_dofs.h @@@ -35,156 -34,151 +35,156 @@@ template struct FunctionMap */ class MGConstrainedDoFs : public Subscriptor { - public: - /** - * Fill the internal data - * structures with values - * extracted from the dof - * handler. - * - * This function leaves - * #boundary_indices empty, since - * no boundary values are - * provided. - */ - template - void initialize(const MGDoFHandler& dof); - - /** - * Fill the internal data - * structures with values - * extracted from the dof - * handler, applying the boundary - * values provided. - */ - template - void initialize(const MGDoFHandler& dof, - const typename FunctionMap::type& function_map, - const ComponentMask &component_mask = ComponentMask()); - - template - void initialize(const DoFHandler& dof, - const typename FunctionMap::type& function_map, - const std::vector& component_mask = std::vector()); - - /** - * Reset the data structures. - */ - void clear(); - - /** - * Determine whether a dof index - * is subject to a boundary - * constraint. - */ - bool is_boundary_index (const unsigned int level, - const unsigned int index) const; - - /** - * Determine whether a dof index - * is at an edge that is not - * a refinement edge. - */ - bool non_refinement_edge_index (const unsigned int level, + public: + /** + * Fill the internal data + * structures with values + * extracted from the dof + * handler. + * + * This function leaves + * #boundary_indices empty, since + * no boundary values are + * provided. + */ + template + void initialize(const MGDoFHandler &dof); + + /** + * Fill the internal data + * structures with values + * extracted from the dof + * handler, applying the boundary + * values provided. + */ + template + void initialize(const MGDoFHandler &dof, + const typename FunctionMap::type &function_map, + const ComponentMask &component_mask = ComponentMask()); + ++ template ++ void initialize(const DoFHandler &dof, ++ const typename FunctionMap::type &function_map, ++ const std::vector &component_mask = std::vector()); ++ + /** + * Reset the data structures. + */ + void clear(); + + /** + * Determine whether a dof index + * is subject to a boundary + * constraint. + */ + bool is_boundary_index (const unsigned int level, + const unsigned int index) const; + + /** + * Determine whether a dof index + * is at an edge that is not + * a refinement edge. + */ + bool non_refinement_edge_index (const unsigned int level, + const unsigned int index) const; + + /** + * Determine whether a dof index + * is at the refinement edge. + */ + bool at_refinement_edge (const unsigned int level, + const unsigned int index) const; + + /** + * Determine whether a dof index + * is at the refinement edge and + * subject to a boundary + * constraint . + */ + bool at_refinement_edge_boundary (const unsigned int level, const unsigned int index) const; - /** - * Determine whether a dof index - * is at the refinement edge. - */ - bool at_refinement_edge (const unsigned int level, - const unsigned int index) const; - - /** - * Determine whether a dof index - * is at the refinement edge and - * subject to a boundary - * constraint . - */ - bool at_refinement_edge_boundary (const unsigned int level, - const unsigned int index) const; - - /** - * Return the indices of dofs for each - * level that lie on the boundary of the - * domain. - */ - const std::vector > & - get_boundary_indices () const; - - /** - * Return the indices of dofs for each - * level that lie on the boundary of the - * domain. - */ - const std::vector > & - get_non_refinement_edge_indices () const; - - /** - * Return the indices of dofs for each - * level that lie on the refinement edge - * (i.e. are on faces between cells of - * this level and cells on the level - * below). - */ - const std::vector > & - get_refinement_edge_indices () const; - - /** - * Return the indices of dofs for each - * level that are in the intersection of - * the sets returned by - * get_boundary_indices() and - * get_refinement_edge_indices(). - */ - const std::vector > & - get_refinement_edge_boundary_indices () const; - - /** - * Return if boundary_indices need to - * be set or not. - */ - - bool set_boundary_values () const; - - /** - * Return if the finite element requires - * continuity across refinement edges. - */ - bool continuity_across_refinement_edges () const; - private: - - /** - * The indices of boundary dofs - * for each level. - */ - std::vector > boundary_indices; - - /** - * The degrees of freedom on egdges - * that are not a - * refinement edge between a - * level and coarser cells. - */ - std::vector > non_refinement_edge_indices; - - /** - * The degrees of freedom on the - * refinement edge between a - * level and coarser cells. - */ - std::vector > refinement_edge_indices; - - /** - * The degrees of freedom on the - * refinement edge between a - * level and coarser cells, which - * are also on the boundary. - * - * This is a subset of - * #refinement_edge_indices. - */ - std::vector > refinement_edge_boundary_indices; + /** + * Return the indices of dofs for each + * level that lie on the boundary of the + * domain. + */ + const std::vector > & + get_boundary_indices () const; + + /** + * Return the indices of dofs for each + * level that lie on the boundary of the + * domain. + */ + const std::vector > & + get_non_refinement_edge_indices () const; + + /** + * Return the indices of dofs for each + * level that lie on the refinement edge + * (i.e. are on faces between cells of + * this level and cells on the level + * below). + */ + const std::vector > & + get_refinement_edge_indices () const; + + /** + * Return the indices of dofs for each + * level that are in the intersection of + * the sets returned by + * get_boundary_indices() and + * get_refinement_edge_indices(). + */ + const std::vector > & + get_refinement_edge_boundary_indices () const; + + /** + * Return if boundary_indices need to + * be set or not. + */ + + bool set_boundary_values () const; + + /** + * Return if the finite element requires + * continuity across refinement edges. + */ + bool continuity_across_refinement_edges () const; + private: + + /** + * The indices of boundary dofs + * for each level. + */ + std::vector > boundary_indices; + + /** + * The degrees of freedom on egdges + * that are not a + * refinement edge between a + * level and coarser cells. + */ + std::vector > non_refinement_edge_indices; + + /** + * The degrees of freedom on the + * refinement edge between a + * level and coarser cells. + */ + std::vector > refinement_edge_indices; + + /** + * The degrees of freedom on the + * refinement edge between a + * level and coarser cells, which + * are also on the boundary. + * + * This is a subset of + * #refinement_edge_indices. + */ + std::vector > refinement_edge_boundary_indices; }; @@@ -232,34 -226,8 +232,34 @@@ MGConstrainedDoFs::initialize MGTools::make_boundary_list (dof, function_map, boundary_indices, component_mask); MGTools::extract_inner_interface_dofs (dof, refinement_edge_indices, - refinement_edge_boundary_indices); + refinement_edge_boundary_indices); MGTools::extract_non_interface_dofs (dof, non_refinement_edge_indices); +} + + +template +inline +void +MGConstrainedDoFs::initialize( - const DoFHandler& dof, - const typename FunctionMap::type& function_map, - const std::vector& component_mask) ++ const DoFHandler &dof, ++ const typename FunctionMap::type &function_map, ++ const std::vector &component_mask) +{ + const unsigned int nlevels = dof.get_tria().n_levels(); + boundary_indices.resize(nlevels); + refinement_edge_indices.resize(nlevels); + refinement_edge_boundary_indices.resize(nlevels); + - for(unsigned int l=0; l class BlockS */ namespace MGTools { - /** - * Compute row length vector for - * multilevel methods. - */ + /** + * Compute row length vector for + * multilevel methods. + */ template void - compute_row_length_vector(const MGDoFHandler& dofs, + compute_row_length_vector(const MGDoFHandler &dofs, const unsigned int level, - std::vector& row_lengths, + std::vector &row_lengths, const DoFTools::Coupling flux_couplings = DoFTools::none); - /** - * Compute row length vector for - * multilevel methods with - * optimization for block - * couplings. - */ + /** + * Compute row length vector for + * multilevel methods with + * optimization for block + * couplings. + */ template void - compute_row_length_vector(const MGDoFHandler& dofs, + compute_row_length_vector(const MGDoFHandler &dofs, const unsigned int level, - std::vector& row_lengths, - const Table<2,DoFTools::Coupling>& couplings, - const Table<2,DoFTools::Coupling>& flux_couplings); + std::vector &row_lengths, + const Table<2,DoFTools::Coupling> &couplings, + const Table<2,DoFTools::Coupling> &flux_couplings); - /** - * Write the sparsity structure - * of the matrix belonging to the - * specified @p level. The sparsity pattern - * is not compressed, so before - * creating the actual matrix - * you have to compress the - * matrix yourself, using - * SparseMatrixStruct::compress(). - * - * There is no need to consider - * hanging nodes here, since only - * one level is considered. - */ + /** + * Write the sparsity structure + * of the matrix belonging to the + * specified @p level. The sparsity pattern + * is not compressed, so before + * creating the actual matrix + * you have to compress the + * matrix yourself, using + * SparseMatrixStruct::compress(). + * + * There is no need to consider + * hanging nodes here, since only + * one level is considered. + */ - template + template void - make_sparsity_pattern (const MGDoFHandler &dof_handler, + make_sparsity_pattern (const DH &dof_handler, SparsityPattern &sparsity, const unsigned int level); @@@ -163,32 -163,32 +163,32 @@@ const unsigned int level, const Table<2,DoFTools::Coupling> &flux_mask); - /** - * Count the dofs block-wise - * on each level. - * - * Result is a vector containing - * for each level a vector - * containing the number of dofs - * for each block (access is - * result[level][block]). - */ + /** + * Count the dofs block-wise + * on each level. + * + * Result is a vector containing + * for each level a vector + * containing the number of dofs + * for each block (access is + * result[level][block]). + */ - template + template void - count_dofs_per_block (const DH& dof_handler, - std::vector >& dofs_per_block, - count_dofs_per_block (const MGDoFHandler &mg_dof, - std::vector > &result, - std::vector target_block = std::vector()); ++ count_dofs_per_block (const DH &dof_handler, ++ std::vector > &dofs_per_block, + std::vector target_block = std::vector()); - /** - * Count the dofs component-wise - * on each level. - * - * Result is a vector containing - * for each level a vector - * containing the number of dofs - * for each component (access is - * result[level][component]). - */ + /** + * Count the dofs component-wise + * on each level. + * + * Result is a vector containing + * for each level a vector + * containing the number of dofs + * for each component (access is + * result[level][component]). + */ template void count_dofs_per_component (const MGDoFHandler &mg_dof, @@@ -233,14 -233,14 +233,14 @@@ void make_boundary_list (const MGDoFHandler &mg_dof, const typename FunctionMap::type &function_map, - std::vector > &boundary_indices, + std::vector > &boundary_indices, const ComponentMask &component_mask = ComponentMask()); - /** - * The same function as above, but return - * an IndexSet rather than a - * std::set on each level. - */ + /** + * The same function as above, but return + * an IndexSet rather than a + * std::set on each level. + */ template void make_boundary_list (const MGDoFHandler &mg_dof, @@@ -286,13 -286,13 +286,13 @@@ template void extract_inner_interface_dofs (const MGDoFHandler &mg_dof_handler, - std::vector > &interface_dofs, - std::vector > &boundary_interface_dofs); + std::vector > &interface_dofs, + std::vector > &boundary_interface_dofs); - /** - * Does the same as the function above, - * but fills only the interface_dofs. - */ + /** + * Does the same as the function above, + * but fills only the interface_dofs. + */ template void extract_inner_interface_dofs (const MGDoFHandler &mg_dof_handler, @@@ -301,7 -301,7 +301,7 @@@ template void extract_non_interface_dofs (const MGDoFHandler &mg_dof_handler, - std::vector > &non_interface_dofs); - std::vector > &non_interface_dofs); ++ std::vector > &non_interface_dofs); } /* @} */ diff --cc deal.II/include/deal.II/multigrid/mg_transfer.h index d32ca265b7,c2be8fe6fe..d1ee97bf27 --- a/deal.II/include/deal.II/multigrid/mg_transfer.h +++ b/deal.II/include/deal.II/multigrid/mg_transfer.h @@@ -59,212 -58,191 +59,212 @@@ template class template class MGTransferPrebuilt : public MGTransferBase { - public: - /** - * Constructor without constraint - * matrices. Use this constructor - * only with discontinuous finite - * elements or with no local - * refinement. - */ - MGTransferPrebuilt (); - /** - * Constructor with constraint matrices as well as mg_constrained_dofs. - */ - MGTransferPrebuilt (const ConstraintMatrix& constraints, - const MGConstrainedDoFs& mg_constrained_dofs); - /** - * Destructor. - */ - virtual ~MGTransferPrebuilt (); - /** - * Actually build the prolongation - * matrices for each level. - */ - template - void build_matrices (const MGDoFHandler &mg_dof); - - template - void build_matrices (const DoFHandler &dof_handler); - - virtual void prolongate (const unsigned int to_level, - VECTOR &dst, - const VECTOR &src) const; - - virtual void restrict_and_add (const unsigned int from_level, - VECTOR &dst, - const VECTOR &src) const; - - /** - * Transfer from a vector on the - * global grid to vectors defined - * on each of the levels - * separately, i.a. an @p MGVector. - */ - template - void - copy_to_mg (const MGDoFHandler& mg_dof, - MGLevelObject& dst, - const InVector& src) const; - - template - void - copy_to_mg (const DoFHandler& dof_handler, - MGLevelObject& dst, - const InVector& src) const; - - /** - * Transfer from multi-level vector to - * normal vector. - * - * Copies data from active - * portions of an MGVector into - * the respective positions of a - * Vector. In order to - * keep the result consistent, - * constrained degrees of freedom - * are set to zero. - */ - template - void - copy_from_mg (const MGDoFHandler& mg_dof, - OutVector& dst, - const MGLevelObject &src) const; - - template - void - copy_from_mg (const DoFHandler& dof_handler, - OutVector& dst, - const MGLevelObject &src) const; - - /** - * Add a multi-level vector to a - * normal vector. - * - * Works as the previous - * function, but probably not for - * continuous elements. - */ - template - void - copy_from_mg_add (const MGDoFHandler& mg_dof, - OutVector& dst, - const MGLevelObject& src) const; - - template - void - copy_from_mg_add (const DoFHandler& dof_handler, - OutVector& dst, - const MGLevelObject& src) const; - - /** - * If this object operates on - * BlockVector objects, we need - * to describe how the individual - * vector components are mapped - * to the blocks of a vector. For - * example, for a Stokes system, - * we have dim+1 vector - * components for velocity and - * pressure, but we may want to - * use block vectors with only - * two blocks for all velocities - * in one block, and the pressure - * variables in the other. - * - * By default, if this function - * is not called, block vectors - * have as many blocks as the - * finite element has vector - * components. However, this can - * be changed by calling this - * function with an array that - * describes how vector - * components are to be grouped - * into blocks. The meaning of - * the argument is the same as - * the one given to the - * DoFTools::count_dofs_per_component - * function. - */ - void - set_component_to_block_map (const std::vector &map); - - /** - * Finite element does not - * provide prolongation matrices. - */ - DeclException0(ExcNoProlongation); - - /** - * Call @p build_matrices - * function first. - */ - DeclException0(ExcMatricesNotBuilt); - - /** - * Memory used by this object. - */ - std::size_t memory_consumption () const; - - - private: - - /** - * Sizes of the multi-level vectors. - */ - std::vector sizes; - - /** - * Sparsity patterns for transfer - * matrices. - */ - std::vector > prolongation_sparsities; - - /** - * The actual prolongation matrix. - * column indices belong to the - * dof indices of the mother cell, - * i.e. the coarse level. - * while row indices belong to the - * child cell, i.e. the fine level. - */ - std::vector > > prolongation_matrices; - - /** - * Mapping for the - * copy_to/from_mg-functions. - * The data is first the global - * index, then the level index. - */ - std::vector > > - copy_indices; - - /** - * The vector that stores what - * has been given to the - * set_component_to_block_map() - * function. - */ - std::vector component_to_block_map; - - /** - * Degrees of freedom on the - * refinement edge excluding - * those on the boundary. - */ - std::vector > interface_dofs; - /** - * The constraints of the global - * system. - */ - SmartPointer > constraints; - /** - * The mg_constrained_dofs of the level - * systems. - */ - - SmartPointer > mg_constrained_dofs; + public: + /** + * Constructor without constraint + * matrices. Use this constructor + * only with discontinuous finite + * elements or with no local + * refinement. + */ + MGTransferPrebuilt (); + /** + * Constructor with constraint matrices as well as mg_constrained_dofs. + */ + MGTransferPrebuilt (const ConstraintMatrix &constraints, + const MGConstrainedDoFs &mg_constrained_dofs); + /** + * Destructor. + */ + virtual ~MGTransferPrebuilt (); + /** + * Actually build the prolongation + * matrices for each level. + */ + template + void build_matrices (const MGDoFHandler &mg_dof); + ++ template ++ void build_matrices (const DoFHandler &dof_handler); ++ + virtual void prolongate (const unsigned int to_level, + VECTOR &dst, + const VECTOR &src) const; + + virtual void restrict_and_add (const unsigned int from_level, + VECTOR &dst, + const VECTOR &src) const; + + /** + * Transfer from a vector on the + * global grid to vectors defined + * on each of the levels + * separately, i.a. an @p MGVector. + */ + template + void + copy_to_mg (const MGDoFHandler &mg_dof, + MGLevelObject &dst, + const InVector &src) const; + ++ template ++ void ++ copy_to_mg (const DoFHandler &dof_handler, ++ MGLevelObject &dst, ++ const InVector &src) const; ++ + /** + * Transfer from multi-level vector to + * normal vector. + * + * Copies data from active + * portions of an MGVector into + * the respective positions of a + * Vector. In order to + * keep the result consistent, + * constrained degrees of freedom + * are set to zero. + */ + template + void + copy_from_mg (const MGDoFHandler &mg_dof, + OutVector &dst, + const MGLevelObject &src) const; + ++ template ++ void ++ copy_from_mg (const DoFHandler &dof_handler, ++ OutVector &dst, ++ const MGLevelObject &src) const; ++ + /** + * Add a multi-level vector to a + * normal vector. + * + * Works as the previous + * function, but probably not for + * continuous elements. + */ + template + void + copy_from_mg_add (const MGDoFHandler &mg_dof, + OutVector &dst, + const MGLevelObject &src) const; + ++ template ++ void ++ copy_from_mg_add (const DoFHandler &dof_handler, ++ OutVector &dst, ++ const MGLevelObject &src) const; ++ + /** + * If this object operates on + * BlockVector objects, we need + * to describe how the individual + * vector components are mapped + * to the blocks of a vector. For + * example, for a Stokes system, + * we have dim+1 vector + * components for velocity and + * pressure, but we may want to + * use block vectors with only + * two blocks for all velocities + * in one block, and the pressure + * variables in the other. + * + * By default, if this function + * is not called, block vectors + * have as many blocks as the + * finite element has vector + * components. However, this can + * be changed by calling this + * function with an array that + * describes how vector + * components are to be grouped + * into blocks. The meaning of + * the argument is the same as + * the one given to the + * DoFTools::count_dofs_per_component + * function. + */ + void + set_component_to_block_map (const std::vector &map); + + /** + * Finite element does not + * provide prolongation matrices. + */ + DeclException0(ExcNoProlongation); + + /** + * Call @p build_matrices + * function first. + */ + DeclException0(ExcMatricesNotBuilt); + + /** + * Memory used by this object. + */ + std::size_t memory_consumption () const; + + + private: + + /** + * Sizes of the multi-level vectors. + */ + std::vector sizes; + + /** + * Sparsity patterns for transfer + * matrices. + */ + std::vector > prolongation_sparsities; + + /** + * The actual prolongation matrix. + * column indices belong to the + * dof indices of the mother cell, + * i.e. the coarse level. + * while row indices belong to the + * child cell, i.e. the fine level. + */ + std::vector > > prolongation_matrices; + + /** + * Mapping for the + * copy_to/from_mg-functions. + * The data is first the global + * index, then the level index. - */ ++ */ + std::vector > > + copy_indices; + + /** + * The vector that stores what + * has been given to the + * set_component_to_block_map() + * function. + */ + std::vector component_to_block_map; + + /** + * Degrees of freedom on the + * refinement edge excluding + * those on the boundary. + */ + std::vector > interface_dofs; + /** + * The constraints of the global + * system. + */ + SmartPointer > constraints; + /** + * The mg_constrained_dofs of the level + * systems. + */ + + SmartPointer > mg_constrained_dofs; }; diff --cc deal.II/include/deal.II/multigrid/mg_transfer.templates.h index 7e51e549cd,eb5bc9e514..2950fe2c3c --- a/deal.II/include/deal.II/multigrid/mg_transfer.templates.h +++ b/deal.II/include/deal.II/multigrid/mg_transfer.templates.h @@@ -54,33 -54,18 +54,33 @@@ namespac } } - + + template + void + reinit_vector (const dealii::DoFHandler &mg_dof, + std::vector , + MGLevelObject > &v) + { + for (unsigned int level=v.get_minlevel(); - level<=v.get_maxlevel();++level) ++ level<=v.get_maxlevel(); ++level) + { + unsigned int n = mg_dof.n_dofs (level); + v[level].reinit(n); + } + + } + - /** - * Adjust vectors on all levels to - * correct size. Here, we just - * count the numbers of degrees - * of freedom on each level and - * @p reinit each level vector - * to this length. The target_component - * is handed to MGTools::count_dofs_per_block. - * See for documentation there. - */ + /** + * Adjust vectors on all levels to + * correct size. Here, we just + * count the numbers of degrees + * of freedom on each level and + * @p reinit each level vector + * to this length. The target_component + * is handed to MGTools::count_dofs_per_block. + * See for documentation there. + */ template void reinit_vector (const dealii::MGDoFHandler &mg_dof, @@@ -115,41 -100,6 +115,41 @@@ v[level].collect_sizes(); } } - ++ + template + void + reinit_vector (const dealii::DoFHandler &mg_dof, + std::vector target_component, + MGLevelObject > &v) + { + const unsigned int n_blocks = mg_dof.get_fe().n_blocks(); + if (target_component.size()==0) + { + target_component.resize(n_blocks); - for (unsigned int i=0;i > - ndofs(mg_dof.get_tria().n_levels(), - std::vector(n_target_blocks)); ++ ndofs(mg_dof.get_tria().n_levels(), ++ std::vector(n_target_blocks)); + MGTools::count_dofs_per_block (mg_dof, ndofs, target_component); + + for (unsigned int level=v.get_minlevel(); - level<=v.get_maxlevel();++level) ++ level<=v.get_maxlevel(); ++level) + { + v[level].reinit(n_target_blocks); + for (unsigned int b=0; b::copy_to_mg +template +template +void +MGTransferPrebuilt::copy_to_mg ( - const DoFHandler& dof_handler, - MGLevelObject& dst, - const InVector& src) const ++ const DoFHandler &dof_handler, ++ MGLevelObject &dst, ++ const InVector &src) const +{ + reinit_vector(dof_handler, component_to_block_map, dst); + bool first = true; - for (unsigned int level=dof_handler.get_tria().n_levels();level != 0;) ++ for (unsigned int level=dof_handler.get_tria().n_levels(); level != 0;) + { + --level; - VECTOR& dst_level = dst[level]; ++ VECTOR &dst_level = dst[level]; + + typedef std::vector >::const_iterator IT; + for (IT i= copy_indices[level].begin(); - i != copy_indices[level].end();++i) - dst_level(i->second) = src(i->first); - - // For non-DG: degrees of - // freedom in the refinement - // face may need special - // attention, since they belong - // to the coarse level, but - // have fine level basis - // functions ++ i != copy_indices[level].end(); ++i) ++ dst_level(i->second) = src(i->first); ++ ++ // For non-DG: degrees of ++ // freedom in the refinement ++ // face may need special ++ // attention, since they belong ++ // to the coarse level, but ++ // have fine level basis ++ // functions + if (!first) - restrict_and_add (level+1, dst[level], dst[level+1]); ++ restrict_and_add (level+1, dst[level], dst[level+1]); + first = false; + } +} + + + template template void @@@ -260,36 -175,6 +260,36 @@@ MGTransferPrebuilt::copy_from_m +template +template +void +MGTransferPrebuilt::copy_from_mg( - const DoFHandler& dof_handler, - OutVector& dst, - const MGLevelObject& src) const ++ const DoFHandler &dof_handler, ++ OutVector &dst, ++ const MGLevelObject &src) const +{ - // For non-DG: degrees of - // freedom in the refinement - // face may need special - // attention, since they belong - // to the coarse level, but - // have fine level basis - // functions ++ // For non-DG: degrees of ++ // freedom in the refinement ++ // face may need special ++ // attention, since they belong ++ // to the coarse level, but ++ // have fine level basis ++ // functions + dst = 0; - for (unsigned int level=0;level >::const_iterator IT; ++ for (unsigned int level=0; level >::const_iterator IT; + - for (IT i= copy_indices[level].begin(); - i != copy_indices[level].end();++i) - dst(i->first) = src[level](i->second); - } ++ for (IT i= copy_indices[level].begin(); ++ i != copy_indices[level].end(); ++i) ++ dst(i->first) = src[level](i->second); ++ } + if (constraints != 0) + constraints->condense(dst); +} + + + template template void @@@ -316,32 -201,6 +316,32 @@@ MGTransferPrebuilt::copy_from_m +template +template +void +MGTransferPrebuilt::copy_from_mg_add ( - const DoFHandler& dof_handler, ++ const DoFHandler &dof_handler, + OutVector &dst, + const MGLevelObject &src) const +{ - // For non-DG: degrees of - // freedom in the refinement - // face may need special - // attention, since they belong - // to the coarse level, but - // have fine level basis - // functions - for (unsigned int level=0;level >::const_iterator IT; - for (IT i= copy_indices[level].begin(); - i != copy_indices[level].end();++i) - dst(i->first) += src[level](i->second); - } ++ // For non-DG: degrees of ++ // freedom in the refinement ++ // face may need special ++ // attention, since they belong ++ // to the coarse level, but ++ // have fine level basis ++ // functions ++ for (unsigned int level=0; level >::const_iterator IT; ++ for (IT i= copy_indices[level].begin(); ++ i != copy_indices[level].end(); ++i) ++ dst(i->first) += src[level](i->second); ++ } +} + + + template void MGTransferPrebuilt:: diff --cc deal.II/include/deal.II/multigrid/multigrid.h index d3b20f401d,cf674c41f2..afbf957d29 --- a/deal.II/include/deal.II/multigrid/multigrid.h +++ b/deal.II/include/deal.II/multigrid/multigrid.h @@@ -63,441 -62,432 +63,441 @@@ DEAL_II_NAMESPACE_OPE template class Multigrid : public Subscriptor { - public: - /** - * List of implemented cycle types. - */ - enum Cycle - { - /// The V-cycle - v_cycle, - /// The W-cycle - w_cycle, - /// The F-cycle - f_cycle - }; - - typedef VECTOR vector_type; - typedef const VECTOR const_vector_type; - - /** - * Constructor. The - * MGDoFHandler is used to - * determine the highest possible - * level. transfer is an - * object performing prolongation - * and restriction. - * - * This function already - * initializes the vectors which - * will be used later in the - * course of the - * computations. You should - * therefore create objects of - * this type as late as possible. - */ - template - Multigrid(const MGDoFHandler& mg_dof_handler, - const MGMatrixBase& matrix, - const MGCoarseGridBase& coarse, - const MGTransferBase& transfer, - const MGSmootherBase& pre_smooth, - const MGSmootherBase& post_smooth, - Cycle cycle = v_cycle); - - template - Multigrid(const DoFHandler& mg_dof_handler, - const MGMatrixBase& matrix, - const MGCoarseGridBase& coarse, - const MGTransferBase& transfer, - const MGSmootherBase& pre_smooth, - const MGSmootherBase& post_smooth, - Cycle cycle = v_cycle); - - /** - * Experimental constructor for - * cases in which no MGDoFHandler - * is available. - * - * @warning Not intended for general use. - */ - Multigrid(const unsigned int minlevel, - const unsigned int maxlevel, - const MGMatrixBase& matrix, - const MGCoarseGridBase& coarse, - const MGTransferBase& transfer, - const MGSmootherBase& pre_smooth, - const MGSmootherBase& post_smooth, - Cycle cycle = v_cycle); - - /** - * Reinit this class according to - * #minlevel and #maxlevel. - */ - void reinit (const unsigned int minlevel, - const unsigned int maxlevel); - - /** - * Execute one multigrid - * cycle. The type of cycle is - * selected by the constructor - * argument cycle. See the enum - * Cycle for available types. - */ - void cycle (); - - /** - * Execute one step of the - * V-cycle algorithm. This - * function assumes, that the - * multilevel vector #defect is - * filled with the residual of an - * outer defect correction - * scheme. This is usually taken - * care of by - * PreconditionMG). After - * vcycle(), the result is in the - * multilevel vector - * #solution. See - * copy_*_mg in class - * MGTools if you want to use - * these vectors yourself. - * - * The actual work for this - * function is done in - * level_v_step(). - */ - void vcycle (); - - /** - * @deprecated This function is - * purely experimental and will - * probably never be implemented - * in a way that it can be - * released. - * - * Perform a multigrid cycle with - * a vector which is already a - * level vector. Use of this - * function assumes that there is - * NO local refinement and that - * both vectors are on the finest - * level of this Multigrid - * object. - */ - void vmult(VECTOR& dst, const VECTOR& src) const; - - /** - * @deprecated This function is - * purely experimental and will - * probably never be implemented - * in a way that it can be - * released. - * - * Perform a multigrid cycle with - * a vector which is already a - * level vector. Use of this - * function assumes that there is - * NO local refinement and that - * both vectors are on the finest - * level of this Multigrid - * object. - */ - void vmult_add(VECTOR& dst, const VECTOR& src) const; - - /** - * @deprecated Even worse than - * vmult(), this function is not - * even implemented, but just - * declared such that certain - * objects relying on it can be - * constructed. - */ - void Tvmult(VECTOR& dst, const VECTOR& src) const; - - /** - * @deprecated Even worse than - * vmult(), this function is not - * even implemented, but just - * declared such that certain - * objects relying on it can be - * constructed. - */ - void Tvmult_add(VECTOR& dst, const VECTOR& src) const; - - /** - * Set additional matrices to - * correct residual computation - * at refinement edges. Since we - * only smoothen in the interior - * of the refined part of the - * mesh, the coupling across the - * refinement edge is - * missing. This coupling is - * provided by these two - * matrices. - * - * @note While - * edge_out.vmult is - * used, for the second argument, - * we use - * edge_in.Tvmult. Thus, - * edge_in should be - * assembled in transposed - * form. This saves a second - * sparsity pattern for - * edge_in. In - * particular, for symmetric - * operators, both arguments can - * refer to the same matrix, - * saving assembling of one of - * them. - */ - void set_edge_matrices (const MGMatrixBase& edge_out, - const MGMatrixBase& edge_in); - - /** - * Set additional matrices to - * correct residual computation - * at refinement edges. These - * matrices originate from - * discontinuous Galerkin methods - * (see FE_DGQ etc.), where they - * correspond to the edge fluxes - * at the refinement edge between - * two levels. - * - * @note While - * edge_down.vmult is - * used, for the second argument, - * we use - * edge_up.Tvmult. Thus, - * edge_up should be - * assembled in transposed - * form. This saves a second - * sparsity pattern for - * edge_up. In - * particular, for symmetric - * operators, both arguments can - * refer to the same matrix, - * saving assembling of one of - * them. - */ - void set_edge_flux_matrices (const MGMatrixBase& edge_down, - const MGMatrixBase& edge_up); - - /** - * Return the finest level for - * multigrid. - */ - unsigned int get_maxlevel() const; - - /** - * Return the coarsest level for - * multigrid. - */ - unsigned int get_minlevel() const; - - /** - * Set the highest level for - * which the multilevel method is - * performed. By default, this is - * the finest level of the - * Triangulation; therefore, this - * function will only accept - * arguments smaller than the - * current #maxlevel and not - * smaller than the current - * #minlevel. - */ - void set_maxlevel (const unsigned int); - - /** - * Set the coarse level for which - * the multilevel method is - * performed. By default, this is - * zero. Accepted are - * non-negative values not larger than - * than the current #maxlevel. - * - * If relative ist - * true, then this - * function determins the number - * of levels used, that is, it - * sets #minlevel to - * #maxlevel-level. - * - * @note The mesh on the coarsest - * level must cover the whole - * domain. There may not be - * hanging nodes on #minlevel. - * - * @note If #minlevel is set to a - * nonzero value, do not forget - * to adjust your coarse grid - * solver! - */ - void set_minlevel (const unsigned int level, - bool relative = false); - - /** - * Chance #cycle_type used in cycle(). - */ - void set_cycle(Cycle); - - /** - * Set the debug level. Higher - * values will create more - * debugging output during the - * multigrid cycles. - */ - void set_debug (const unsigned int); - - private: - - /** - * The V-cycle multigrid method. - * level is the level the - * function starts on. It - * will usually be called for the - * highest level from outside, - * but will then call itself - * recursively for level-1, - * unless we are on #minlevel - * where the coarse grid solver - * solves the problem exactly. - */ - void level_v_step (const unsigned int level); - - /** - * The actual W-cycle or F-cycle - * multigrid method. - * level is the level - * the function starts on. It - * will usually be called for the - * highest level from outside, - * but will then call itself - * recursively for - * level-1, unless we - * are on #minlevel where the - * coarse grid solver solves the - * problem exactly. - */ - void level_step (const unsigned int level, Cycle cycle); - - /** - * Cycle type performed by the method cycle(). - */ - Cycle cycle_type; - - /** - * Level for coarse grid solution. - */ - unsigned int minlevel; - - /** - * Highest level of cells. - */ - unsigned int maxlevel; - - public: - /** - * Input vector for the - * cycle. Contains the defect of - * the outer method projected to - * the multilevel vectors. - */ - MGLevelObject defect; - - /** - * The solution update after the - * multigrid step. - */ - MGLevelObject solution; - - private: - /** - * Auxiliary vector. - */ - MGLevelObject t; - - /** - * Auxiliary vector for W- and - * F-cycles. Left uninitialized - * in V-cycle. - */ - MGLevelObject defect2; - - - /** - * The matrix for each level. - */ - SmartPointer,Multigrid > matrix; - - /** - * The matrix for each level. - */ - SmartPointer,Multigrid > coarse; - - /** - * Object for grid tranfer. - */ - SmartPointer,Multigrid > transfer; - - /** - * The pre-smoothing object. - */ - SmartPointer,Multigrid > pre_smooth; - - /** - * The post-smoothing object. - */ - SmartPointer,Multigrid > post_smooth; - - /** - * Edge matrix from the interior - * of the refined part to the - * refinement edge. - * - * @note Only vmult is - * used for these matrices. - */ - SmartPointer > edge_out; - - /** - * Transpose edge matrix from the - * refinement edge to the - * interior of the refined part. - * - * @note Only Tvmult is - * used for these matrices. - */ - SmartPointer > edge_in; - - /** - * Edge matrix from fine to coarse. - * - * @note Only vmult is - * used for these matrices. - */ - SmartPointer,Multigrid > edge_down; - - /** - * Transpose edge matrix from coarse to fine. - * - * @note Only Tvmult is - * used for these matrices. - */ - SmartPointer,Multigrid > edge_up; - - /** - * Level for debug - * output. Defaults to zero and - * can be set by set_debug(). - */ - unsigned int debug; - - template friend class PreconditionMG; + public: + /** + * List of implemented cycle types. + */ + enum Cycle + { + /// The V-cycle + v_cycle, + /// The W-cycle + w_cycle, + /// The F-cycle + f_cycle + }; + + typedef VECTOR vector_type; + typedef const VECTOR const_vector_type; + + /** + * Constructor. The + * MGDoFHandler is used to + * determine the highest possible + * level. transfer is an + * object performing prolongation + * and restriction. + * + * This function already + * initializes the vectors which + * will be used later in the + * course of the + * computations. You should + * therefore create objects of + * this type as late as possible. + */ + template + Multigrid(const MGDoFHandler &mg_dof_handler, + const MGMatrixBase &matrix, + const MGCoarseGridBase &coarse, + const MGTransferBase &transfer, + const MGSmootherBase &pre_smooth, + const MGSmootherBase &post_smooth, + Cycle cycle = v_cycle); + ++ template ++ Multigrid(const DoFHandler &mg_dof_handler, ++ const MGMatrixBase &matrix, ++ const MGCoarseGridBase &coarse, ++ const MGTransferBase &transfer, ++ const MGSmootherBase &pre_smooth, ++ const MGSmootherBase &post_smooth, ++ Cycle cycle = v_cycle); ++ + /** + * Experimental constructor for + * cases in which no MGDoFHandler + * is available. + * + * @warning Not intended for general use. + */ + Multigrid(const unsigned int minlevel, + const unsigned int maxlevel, + const MGMatrixBase &matrix, + const MGCoarseGridBase &coarse, + const MGTransferBase &transfer, + const MGSmootherBase &pre_smooth, + const MGSmootherBase &post_smooth, + Cycle cycle = v_cycle); + + /** + * Reinit this class according to + * #minlevel and #maxlevel. + */ + void reinit (const unsigned int minlevel, + const unsigned int maxlevel); + + /** + * Execute one multigrid + * cycle. The type of cycle is + * selected by the constructor + * argument cycle. See the enum + * Cycle for available types. + */ + void cycle (); + + /** + * Execute one step of the + * V-cycle algorithm. This + * function assumes, that the + * multilevel vector #defect is + * filled with the residual of an + * outer defect correction + * scheme. This is usually taken + * care of by + * PreconditionMG). After + * vcycle(), the result is in the + * multilevel vector + * #solution. See + * copy_*_mg in class + * MGTools if you want to use + * these vectors yourself. + * + * The actual work for this + * function is done in + * level_v_step(). + */ + void vcycle (); + + /** + * @deprecated This function is + * purely experimental and will + * probably never be implemented + * in a way that it can be + * released. + * + * Perform a multigrid cycle with + * a vector which is already a + * level vector. Use of this + * function assumes that there is + * NO local refinement and that + * both vectors are on the finest + * level of this Multigrid + * object. + */ + void vmult(VECTOR &dst, const VECTOR &src) const; + + /** + * @deprecated This function is + * purely experimental and will + * probably never be implemented + * in a way that it can be + * released. + * + * Perform a multigrid cycle with + * a vector which is already a + * level vector. Use of this + * function assumes that there is + * NO local refinement and that + * both vectors are on the finest + * level of this Multigrid + * object. + */ + void vmult_add(VECTOR &dst, const VECTOR &src) const; + + /** + * @deprecated Even worse than + * vmult(), this function is not + * even implemented, but just + * declared such that certain + * objects relying on it can be + * constructed. + */ + void Tvmult(VECTOR &dst, const VECTOR &src) const; + + /** + * @deprecated Even worse than + * vmult(), this function is not + * even implemented, but just + * declared such that certain + * objects relying on it can be + * constructed. + */ + void Tvmult_add(VECTOR &dst, const VECTOR &src) const; + + /** + * Set additional matrices to + * correct residual computation + * at refinement edges. Since we + * only smoothen in the interior + * of the refined part of the + * mesh, the coupling across the + * refinement edge is + * missing. This coupling is + * provided by these two + * matrices. + * + * @note While + * edge_out.vmult is + * used, for the second argument, + * we use + * edge_in.Tvmult. Thus, + * edge_in should be + * assembled in transposed + * form. This saves a second + * sparsity pattern for + * edge_in. In + * particular, for symmetric + * operators, both arguments can + * refer to the same matrix, + * saving assembling of one of + * them. + */ + void set_edge_matrices (const MGMatrixBase &edge_out, + const MGMatrixBase &edge_in); + + /** + * Set additional matrices to + * correct residual computation + * at refinement edges. These + * matrices originate from + * discontinuous Galerkin methods + * (see FE_DGQ etc.), where they + * correspond to the edge fluxes + * at the refinement edge between + * two levels. + * + * @note While + * edge_down.vmult is + * used, for the second argument, + * we use + * edge_up.Tvmult. Thus, + * edge_up should be + * assembled in transposed + * form. This saves a second + * sparsity pattern for + * edge_up. In + * particular, for symmetric + * operators, both arguments can + * refer to the same matrix, + * saving assembling of one of + * them. + */ + void set_edge_flux_matrices (const MGMatrixBase &edge_down, + const MGMatrixBase &edge_up); + + /** + * Return the finest level for + * multigrid. + */ + unsigned int get_maxlevel() const; + + /** + * Return the coarsest level for + * multigrid. + */ + unsigned int get_minlevel() const; + + /** + * Set the highest level for + * which the multilevel method is + * performed. By default, this is + * the finest level of the + * Triangulation; therefore, this + * function will only accept + * arguments smaller than the + * current #maxlevel and not + * smaller than the current + * #minlevel. + */ + void set_maxlevel (const unsigned int); + + /** + * Set the coarse level for which + * the multilevel method is + * performed. By default, this is + * zero. Accepted are + * non-negative values not larger than + * than the current #maxlevel. + * + * If relative ist + * true, then this + * function determins the number + * of levels used, that is, it + * sets #minlevel to + * #maxlevel-level. + * + * @note The mesh on the coarsest + * level must cover the whole + * domain. There may not be + * hanging nodes on #minlevel. + * + * @note If #minlevel is set to a + * nonzero value, do not forget + * to adjust your coarse grid + * solver! + */ + void set_minlevel (const unsigned int level, + bool relative = false); + + /** + * Chance #cycle_type used in cycle(). + */ + void set_cycle(Cycle); + + /** + * Set the debug level. Higher + * values will create more + * debugging output during the + * multigrid cycles. + */ + void set_debug (const unsigned int); + + private: + + /** + * The V-cycle multigrid method. + * level is the level the + * function starts on. It + * will usually be called for the + * highest level from outside, + * but will then call itself + * recursively for level-1, + * unless we are on #minlevel + * where the coarse grid solver + * solves the problem exactly. + */ + void level_v_step (const unsigned int level); + + /** + * The actual W-cycle or F-cycle + * multigrid method. + * level is the level + * the function starts on. It + * will usually be called for the + * highest level from outside, + * but will then call itself + * recursively for + * level-1, unless we + * are on #minlevel where the + * coarse grid solver solves the + * problem exactly. + */ + void level_step (const unsigned int level, Cycle cycle); + + /** + * Cycle type performed by the method cycle(). + */ + Cycle cycle_type; + + /** + * Level for coarse grid solution. + */ + unsigned int minlevel; + + /** + * Highest level of cells. + */ + unsigned int maxlevel; + + public: + /** + * Input vector for the + * cycle. Contains the defect of + * the outer method projected to + * the multilevel vectors. + */ + MGLevelObject defect; + + /** + * The solution update after the + * multigrid step. + */ + MGLevelObject solution; + + private: + /** + * Auxiliary vector. + */ + MGLevelObject t; + + /** + * Auxiliary vector for W- and + * F-cycles. Left uninitialized + * in V-cycle. + */ + MGLevelObject defect2; + + + /** + * The matrix for each level. + */ + SmartPointer,Multigrid > matrix; + + /** + * The matrix for each level. + */ + SmartPointer,Multigrid > coarse; + + /** + * Object for grid tranfer. + */ + SmartPointer,Multigrid > transfer; + + /** + * The pre-smoothing object. + */ + SmartPointer,Multigrid > pre_smooth; + + /** + * The post-smoothing object. + */ + SmartPointer,Multigrid > post_smooth; + + /** + * Edge matrix from the interior + * of the refined part to the + * refinement edge. + * + * @note Only vmult is + * used for these matrices. + */ + SmartPointer > edge_out; + + /** + * Transpose edge matrix from the + * refinement edge to the + * interior of the refined part. + * + * @note Only Tvmult is + * used for these matrices. + */ + SmartPointer > edge_in; + + /** + * Edge matrix from fine to coarse. + * + * @note Only vmult is + * used for these matrices. + */ + SmartPointer,Multigrid > edge_down; + + /** + * Transpose edge matrix from coarse to fine. + * + * @note Only Tvmult is + * used for these matrices. + */ + SmartPointer,Multigrid > edge_up; + + /** + * Level for debug + * output. Defaults to zero and + * can be set by set_debug(). + */ + unsigned int debug; + + template friend class PreconditionMG; }; @@@ -515,80 -505,80 +515,80 @@@ template class PreconditionMG : public Subscriptor { - public: - /** - * Constructor. - * Arguments are the multigrid object, - * pre-smoother, post-smoother and - * coarse grid solver. - */ - PreconditionMG(const DoFHandler& dof_handler, - Multigrid& mg, - const TRANSFER& transfer); - - /** - * Dummy function needed by other classes. - */ - bool empty () const; - - /** - * Preconditioning operator. - * Calls the @p vcycle function - * of the @p MG object passed to - * the constructor. - * - * This is the operator used by - * LAC iterative solvers. - */ - template - void vmult (VECTOR2 &dst, - const VECTOR2 &src) const; - - /** - * Preconditioning operator. - * Calls the @p vcycle function - * of the @p MG object passed to - * the constructor. - */ - template - void vmult_add (VECTOR2 &dst, - const VECTOR2 &src) const; - - /** - * Tranposed preconditioning operator. - * - * Not implemented, but the - * definition may be needed. - */ - template - void Tvmult (VECTOR2 &dst, - const VECTOR2 &src) const; - - /** - * Tranposed preconditioning operator. - * - * Not implemented, but the - * definition may be needed. - */ - template - void Tvmult_add (VECTOR2 &dst, - const VECTOR2 &src) const; - - private: - /** - * Associated @p MGDoFHandler. - */ - SmartPointer,PreconditionMG > dof_handler; - - /** - * The multigrid object. - */ - SmartPointer,PreconditionMG > multigrid; - - /** - * Object for grid tranfer. - */ - SmartPointer > transfer; + public: + /** + * Constructor. + * Arguments are the multigrid object, + * pre-smoother, post-smoother and + * coarse grid solver. + */ - PreconditionMG(const MGDoFHandler &mg_dof, ++ PreconditionMG(const DoFHandler &dof_handler, + Multigrid &mg, + const TRANSFER &transfer); + + /** + * Dummy function needed by other classes. + */ + bool empty () const; + + /** + * Preconditioning operator. + * Calls the @p vcycle function + * of the @p MG object passed to + * the constructor. + * + * This is the operator used by + * LAC iterative solvers. + */ + template + void vmult (VECTOR2 &dst, + const VECTOR2 &src) const; + + /** + * Preconditioning operator. + * Calls the @p vcycle function + * of the @p MG object passed to + * the constructor. + */ + template + void vmult_add (VECTOR2 &dst, + const VECTOR2 &src) const; + + /** + * Tranposed preconditioning operator. + * + * Not implemented, but the + * definition may be needed. + */ + template + void Tvmult (VECTOR2 &dst, + const VECTOR2 &src) const; + + /** + * Tranposed preconditioning operator. + * + * Not implemented, but the + * definition may be needed. + */ + template + void Tvmult_add (VECTOR2 &dst, + const VECTOR2 &src) const; + + private: + /** + * Associated @p MGDoFHandler. + */ - SmartPointer,PreconditionMG > mg_dof_handler; ++ SmartPointer,PreconditionMG > dof_handler; + + /** + * The multigrid object. + */ + SmartPointer,PreconditionMG > multigrid; + + /** + * Object for grid tranfer. + */ + SmartPointer > transfer; }; /*@}*/ @@@ -625,34 -615,6 +625,34 @@@ Multigrid::Multigrid (const MGD {} +template +template - Multigrid::Multigrid (const DoFHandler& dof_handler, - const MGMatrixBase& matrix, - const MGCoarseGridBase& coarse, - const MGTransferBase& transfer, - const MGSmootherBase& pre_smooth, - const MGSmootherBase& post_smooth, - Cycle cycle) - : - cycle_type(cycle), - minlevel(0), - maxlevel(dof_handler.get_tria().n_levels()-1), - defect(minlevel,maxlevel), - solution(minlevel,maxlevel), - t(minlevel,maxlevel), - defect2(minlevel,maxlevel), - matrix(&matrix, typeid(*this).name()), - coarse(&coarse, typeid(*this).name()), - transfer(&transfer, typeid(*this).name()), - pre_smooth(&pre_smooth, typeid(*this).name()), - post_smooth(&post_smooth, typeid(*this).name()), - edge_down(0, typeid(*this).name()), - edge_up(0, typeid(*this).name()), - debug(0) ++Multigrid::Multigrid (const DoFHandler &dof_handler, ++ const MGMatrixBase &matrix, ++ const MGCoarseGridBase &coarse, ++ const MGTransferBase &transfer, ++ const MGSmootherBase &pre_smooth, ++ const MGSmootherBase &post_smooth, ++ Cycle cycle) ++ : ++ cycle_type(cycle), ++ minlevel(0), ++ maxlevel(dof_handler.get_tria().n_levels()-1), ++ defect(minlevel,maxlevel), ++ solution(minlevel,maxlevel), ++ t(minlevel,maxlevel), ++ defect2(minlevel,maxlevel), ++ matrix(&matrix, typeid(*this).name()), ++ coarse(&coarse, typeid(*this).name()), ++ transfer(&transfer, typeid(*this).name()), ++ pre_smooth(&pre_smooth, typeid(*this).name()), ++ post_smooth(&post_smooth, typeid(*this).name()), ++ edge_down(0, typeid(*this).name()), ++ edge_up(0, typeid(*this).name()), ++ debug(0) +{} + + template inline @@@ -678,13 -640,13 +678,13 @@@ Multigrid::get_minlevel () cons template PreconditionMG - ::PreconditionMG(const DoFHandler& dof_handler, - Multigrid& mg, - const TRANSFER& transfer) - : - dof_handler(&dof_handler), - multigrid(&mg), - transfer(&transfer) -::PreconditionMG(const MGDoFHandler &mg_dof_handler, ++::PreconditionMG(const DoFHandler &dof_handler, + Multigrid &mg, + const TRANSFER &transfer) + : - mg_dof_handler(&mg_dof_handler), ++ dof_handler(&dof_handler), + multigrid(&mg), + transfer(&transfer) {} template @@@ -698,17 -660,17 +698,17 @@@ template void PreconditionMG::vmult ( - VECTOR2& dst, - const VECTOR2& src) const + VECTOR2 &dst, + const VECTOR2 &src) const { - transfer->copy_to_mg(*mg_dof_handler, + transfer->copy_to_mg(*dof_handler, - multigrid->defect, - src); + multigrid->defect, + src); multigrid->cycle(); - transfer->copy_from_mg(*mg_dof_handler, + transfer->copy_from_mg(*dof_handler, - dst, - multigrid->solution); + dst, + multigrid->solution); } @@@ -716,16 -678,16 +716,16 @@@ template void PreconditionMG::vmult_add ( - VECTOR2& dst, - const VECTOR2& src) const + VECTOR2 &dst, + const VECTOR2 &src) const { - transfer->copy_to_mg(*mg_dof_handler, + transfer->copy_to_mg(*dof_handler, - multigrid->defect, - src); + multigrid->defect, + src); multigrid->cycle(); - transfer->copy_from_mg_add(*mg_dof_handler, + transfer->copy_from_mg_add(*dof_handler, - dst, - multigrid->solution); + dst, + multigrid->solution); } diff --cc deal.II/include/deal.II/numerics/derivative_approximation.h index 1e40043c95,2958010bd1..0aaa4cc2c5 --- a/deal.II/include/deal.II/numerics/derivative_approximation.h +++ b/deal.II/include/deal.II/numerics/derivative_approximation.h @@@ -178,520 -178,520 +178,520 @@@ namespace h */ class DerivativeApproximation { + public: + /** + * This function is used to + * obtain an approximation of the + * gradient. Pass it the DoF + * handler object that describes + * the finite element field, a + * nodal value vector, and + * receive the cell-wise + * Euclidian norm of the + * approximated gradient. + * + * The last parameter denotes the + * solution component, for which the + * gradient is to be computed. It + * defaults to the first component. For + * scalar elements, this is the only + * valid choice; for vector-valued ones, + * any component between zero and the + * number of vector components can be + * given here. + */ + template class DH, class InputVector, int spacedim> + static void + approximate_gradient (const Mapping &mapping, + const DH &dof, + const InputVector &solution, + Vector &derivative_norm, + const unsigned int component = 0); + + /** + * Calls the @p interpolate + * function, see above, with + * mapping=MappingQ1@(). + */ + template class DH, class InputVector, int spacedim> + static void + approximate_gradient (const DH &dof, + const InputVector &solution, + Vector &derivative_norm, + const unsigned int component = 0); + + /** + * This function is the analogue + * to the one above, computing + * finite difference + * approximations of the tensor + * of second derivatives. Pass it + * the DoF handler object that + * describes the finite element + * field, a nodal value vector, + * and receive the cell-wise + * spectral norm of the + * approximated tensor of second + * derivatives. The spectral norm + * is the matrix norm associated + * to the $l_2$ vector norm. + * + * The last parameter denotes the + * solution component, for which + * the gradient is to be + * computed. It defaults to the + * first component. For + * scalar elements, this is the only + * valid choice; for vector-valued ones, + * any component between zero and the + * number of vector components can be + * given here. + */ + template class DH, class InputVector, int spacedim> + static void + approximate_second_derivative (const Mapping &mapping, + const DH &dof, + const InputVector &solution, + Vector &derivative_norm, + const unsigned int component = 0); + + /** + * Calls the @p interpolate + * function, see above, with + * mapping=MappingQ1@(). + */ + template class DH, class InputVector, int spacedim> + static void + approximate_second_derivative (const DH &dof, + const InputVector &solution, + Vector &derivative_norm, + const unsigned int component = 0); + + /** + * This function calculates the + * order-th order approximate + * derivative and returns the full tensor + * for a single cell. + * + * The last parameter denotes the + * solution component, for which + * the gradient is to be + * computed. It defaults to the + * first component. For + * scalar elements, this is the only + * valid choice; for vector-valued ones, + * any component between zero and the + * number of vector components can be + * given here. + */ + + template class DH, class InputVector, int order, int spacedim> + static void + approximate_derivative_tensor (const Mapping &mapping, + const DH &dof, + const InputVector &solution, + const typename DH::active_cell_iterator &cell, + Tensor &derivative, + const unsigned int component = 0); + + /** + * Same as above, with + * mapping=MappingQ1@(). + */ + + template class DH, class InputVector, int order, int spacedim> + static void + approximate_derivative_tensor (const DH &dof, + const InputVector &solution, + const typename DH::active_cell_iterator &cell, + Tensor &derivative, + const unsigned int component = 0); + + /** + * Return the norm of the derivative. + */ + template + static double + derivative_norm(const Tensor &derivative); + + /** + * Exception + */ + DeclException2 (ExcInvalidVectorLength, + int, int, + << "Vector has length " << arg1 << ", but should have " + << arg2); + /** + * Exception + */ + DeclException0 (ExcInsufficientDirections); + + private: + + /** + * The following class is used to + * describe the data needed to + * compute the finite difference + * approximation to the gradient + * on a cell. See the general + * documentation of this class + * for more information on + * implementational details. + * + * @author Wolfgang Bangerth, 2000 + */ + template + class Gradient + { public: - /** - * This function is used to - * obtain an approximation of the - * gradient. Pass it the DoF - * handler object that describes - * the finite element field, a - * nodal value vector, and - * receive the cell-wise - * Euclidian norm of the - * approximated gradient. - * - * The last parameter denotes the - * solution component, for which the - * gradient is to be computed. It - * defaults to the first component. For - * scalar elements, this is the only - * valid choice; for vector-valued ones, - * any component between zero and the - * number of vector components can be - * given here. - */ - template class DH, class InputVector, int spacedim> - static void - approximate_gradient (const Mapping &mapping, + /** + * Declare which data fields have + * to be updated for the function + * @p get_projected_derivative + * to work. + */ + static const UpdateFlags update_flags; + + /** + * Declare the data type which + * holds the derivative described + * by this class. + */ + typedef Tensor<1,dim> Derivative; + + /** + * Likewise declare the data type + * that holds the derivative + * projected to a certain + * directions. + */ + typedef double ProjectedDerivative; + + /** + * Given an FEValues object + * initialized to a cell, and a + * solution vector, extract the + * desired derivative at the + * first quadrature point (which + * is the only one, as we only + * evaluate the finite element + * field at the center of each + * cell). + */ + template + static ProjectedDerivative - get_projected_derivative (const FEValues &fe_values, ++ get_projected_derivative (const FEValues &fe_values, + const InputVector &solution, + const unsigned int component); + + /** + * Return the norm of the + * derivative object. Here, for + * the gradient, we choose the + * Euclidian norm of the gradient + * vector. + */ + static double derivative_norm (const Derivative &d); + + /** + * If for the present derivative + * order, symmetrization of the + * derivative tensor is + * necessary, then do so on the + * argument. + * + * For the first derivatives, no + * such thing is necessary, so + * this function is a no-op. + */ + static void symmetrize (Derivative &derivative_tensor); + }; + + + + /** + * The following class is used to + * describe the data needed to + * compute the finite difference + * approximation to the second + * derivatives on a cell. See the + * general documentation of this + * class for more information on + * implementational details. + * + * @author Wolfgang Bangerth, 2000 + */ + template + class SecondDerivative + { + public: + /** + * Declare which data fields have + * to be updated for the function + * @p get_projected_derivative + * to work. + */ + static const UpdateFlags update_flags; + + /** + * Declare the data type which + * holds the derivative described + * by this class. + */ + typedef Tensor<2,dim> Derivative; + + /** + * Likewise declare the data type + * that holds the derivative + * projected to a certain + * directions. + */ + typedef Tensor<1,dim> ProjectedDerivative; + + /** + * Given an FEValues object + * initialized to a cell, and a + * solution vector, extract the + * desired derivative at the + * first quadrature point (which + * is the only one, as we only + * evaluate the finite element + * field at the center of each + * cell). + */ + template + static ProjectedDerivative - get_projected_derivative (const FEValues &fe_values, ++ get_projected_derivative (const FEValues &fe_values, + const InputVector &solution, + const unsigned int component); + + /** + * Return the norm of the + * derivative object. Here, for + * the (symmetric) tensor of + * second derivatives, we choose + * the absolute value of the + * largest eigenvalue, which is + * the matrix norm associated to + * the $l_2$ norm of vectors. It + * is also the largest value of + * the curvature of the solution. + */ + static double derivative_norm (const Derivative &d); + + /** + * If for the present derivative + * order, symmetrization of the + * derivative tensor is + * necessary, then do so on the + * argument. + * + * For the second derivatives, + * each entry of the tensor is + * set to the mean of its value + * and the value of the transpose + * element. + * + * Note that this function + * actually modifies its + * argument. + */ + static void symmetrize (Derivative &derivative_tensor); + }; + + template + class ThirdDerivative + { + public: + /** + * Declare which data fields have + * to be updated for the function + * @p get_projected_derivative + * to work. + */ + static const UpdateFlags update_flags; + + /** + * Declare the data type which + * holds the derivative described + * by this class. + */ + typedef Tensor<3,dim> Derivative; + + /** + * Likewise declare the data type + * that holds the derivative + * projected to a certain + * directions. + */ + typedef Tensor<2,dim> ProjectedDerivative; + + /** + * Given an FEValues object + * initialized to a cell, and a + * solution vector, extract the + * desired derivative at the + * first quadrature point (which + * is the only one, as we only + * evaluate the finite element + * field at the center of each + * cell). + */ + template + static ProjectedDerivative - get_projected_derivative (const FEValues &fe_values, ++ get_projected_derivative (const FEValues &fe_values, + const InputVector &solution, + const unsigned int component); + + /** + * Return the norm of the + * derivative object. Here, for + * the (symmetric) tensor of + * second derivatives, we choose + * the absolute value of the + * largest eigenvalue, which is + * the matrix norm associated to + * the $l_2$ norm of vectors. It + * is also the largest value of + * the curvature of the solution. + */ + static double derivative_norm (const Derivative &d); + + /** + * If for the present derivative + * order, symmetrization of the + * derivative tensor is + * necessary, then do so on the + * argument. + * + * For the second derivatives, + * each entry of the tensor is + * set to the mean of its value + * and the value of the transpose + * element. + * + * Note that this function + * actually modifies its + * argument. + */ + static void symmetrize (Derivative &derivative_tensor); + }; + + template + class DerivativeSelector + { + public: + /** + * typedef to select the + * DerivativeDescription corresponding + * to the orderth + * derivative. In this general template + * we set an unvalid typedef to void, + * the real typedefs have to be + * specialized. + */ + typedef void DerivDescr; + + }; + + template + class DerivativeSelector<1,dim> + { + public: + + typedef Gradient DerivDescr; + }; + + template + class DerivativeSelector<2,dim> + { + public: + + typedef SecondDerivative DerivDescr; + }; + + template + class DerivativeSelector<3,dim> + { + public: + + typedef ThirdDerivative DerivDescr; + }; + + + + + private: + + /** + * Convenience typedef denoting + * the range of indices on which + * a certain thread shall + * operate. + */ + typedef std::pair IndexInterval; + + /** + * Kind of the main function of + * this class. It is called by + * the public entry points to + * this class with the correct + * template first argument and + * then simply calls the + * @p approximate function, + * after setting up several + * threads and doing some + * administration that is + * independent of the actual + * derivative to be computed. + * + * The @p component argument + * denotes which component of the + * solution vector we are to work + * on. + */ + template class DH, class InputVector, int spacedim> + static void + approximate_derivative (const Mapping &mapping, const DH &dof, const InputVector &solution, - Vector &derivative_norm, - const unsigned int component = 0); - - /** - * Calls the @p interpolate - * function, see above, with - * mapping=MappingQ1@(). - */ - template class DH, class InputVector, int spacedim> - static void - approximate_gradient (const DH &dof, - const InputVector &solution, - Vector &derivative_norm, - const unsigned int component = 0); - - /** - * This function is the analogue - * to the one above, computing - * finite difference - * approximations of the tensor - * of second derivatives. Pass it - * the DoF handler object that - * describes the finite element - * field, a nodal value vector, - * and receive the cell-wise - * spectral norm of the - * approximated tensor of second - * derivatives. The spectral norm - * is the matrix norm associated - * to the $l_2$ vector norm. - * - * The last parameter denotes the - * solution component, for which - * the gradient is to be - * computed. It defaults to the - * first component. For - * scalar elements, this is the only - * valid choice; for vector-valued ones, - * any component between zero and the - * number of vector components can be - * given here. - */ - template class DH, class InputVector, int spacedim> - static void - approximate_second_derivative (const Mapping &mapping, - const DH &dof, - const InputVector &solution, - Vector &derivative_norm, - const unsigned int component = 0); - - /** - * Calls the @p interpolate - * function, see above, with - * mapping=MappingQ1@(). - */ - template class DH, class InputVector, int spacedim> - static void - approximate_second_derivative (const DH &dof, - const InputVector &solution, - Vector &derivative_norm, - const unsigned int component = 0); - - /** - * This function calculates the - * order-th order approximate - * derivative and returns the full tensor - * for a single cell. - * - * The last parameter denotes the - * solution component, for which - * the gradient is to be - * computed. It defaults to the - * first component. For - * scalar elements, this is the only - * valid choice; for vector-valued ones, - * any component between zero and the - * number of vector components can be - * given here. - */ - - template class DH, class InputVector, int order, int spacedim> - static void - approximate_derivative_tensor (const Mapping &mapping, - const DH &dof, - const InputVector &solution, - const typename DH::active_cell_iterator &cell, - Tensor &derivative, - const unsigned int component = 0); - - /** - * Same as above, with - * mapping=MappingQ1@(). - */ - - template class DH, class InputVector, int order, int spacedim> - static void - approximate_derivative_tensor (const DH &dof, - const InputVector &solution, - const typename DH::active_cell_iterator &cell, - Tensor &derivative, - const unsigned int component = 0); - - /** - * Return the norm of the derivative. - */ - template - static double - derivative_norm(const Tensor &derivative); - - /** - * Exception - */ - DeclException2 (ExcInvalidVectorLength, - int, int, - << "Vector has length " << arg1 << ", but should have " - << arg2); - /** - * Exception - */ - DeclException0 (ExcInsufficientDirections); - - private: - - /** - * The following class is used to - * describe the data needed to - * compute the finite difference - * approximation to the gradient - * on a cell. See the general - * documentation of this class - * for more information on - * implementational details. - * - * @author Wolfgang Bangerth, 2000 - */ - template - class Gradient - { - public: - /** - * Declare which data fields have - * to be updated for the function - * @p get_projected_derivative - * to work. - */ - static const UpdateFlags update_flags; - - /** - * Declare the data type which - * holds the derivative described - * by this class. - */ - typedef Tensor<1,dim> Derivative; - - /** - * Likewise declare the data type - * that holds the derivative - * projected to a certain - * directions. - */ - typedef double ProjectedDerivative; - - /** - * Given an FEValues object - * initialized to a cell, and a - * solution vector, extract the - * desired derivative at the - * first quadrature point (which - * is the only one, as we only - * evaluate the finite element - * field at the center of each - * cell). - */ - template - static ProjectedDerivative - get_projected_derivative (const FEValues &fe_values, - const InputVector &solution, - const unsigned int component); - - /** - * Return the norm of the - * derivative object. Here, for - * the gradient, we choose the - * Euclidian norm of the gradient - * vector. - */ - static double derivative_norm (const Derivative &d); - - /** - * If for the present derivative - * order, symmetrization of the - * derivative tensor is - * necessary, then do so on the - * argument. - * - * For the first derivatives, no - * such thing is necessary, so - * this function is a no-op. - */ - static void symmetrize (Derivative &derivative_tensor); - }; - - - - /** - * The following class is used to - * describe the data needed to - * compute the finite difference - * approximation to the second - * derivatives on a cell. See the - * general documentation of this - * class for more information on - * implementational details. - * - * @author Wolfgang Bangerth, 2000 - */ - template - class SecondDerivative - { - public: - /** - * Declare which data fields have - * to be updated for the function - * @p get_projected_derivative - * to work. - */ - static const UpdateFlags update_flags; - - /** - * Declare the data type which - * holds the derivative described - * by this class. - */ - typedef Tensor<2,dim> Derivative; - - /** - * Likewise declare the data type - * that holds the derivative - * projected to a certain - * directions. - */ - typedef Tensor<1,dim> ProjectedDerivative; - - /** - * Given an FEValues object - * initialized to a cell, and a - * solution vector, extract the - * desired derivative at the - * first quadrature point (which - * is the only one, as we only - * evaluate the finite element - * field at the center of each - * cell). - */ - template - static ProjectedDerivative - get_projected_derivative (const FEValues &fe_values, - const InputVector &solution, - const unsigned int component); - - /** - * Return the norm of the - * derivative object. Here, for - * the (symmetric) tensor of - * second derivatives, we choose - * the absolute value of the - * largest eigenvalue, which is - * the matrix norm associated to - * the $l_2$ norm of vectors. It - * is also the largest value of - * the curvature of the solution. - */ - static double derivative_norm (const Derivative &d); - - /** - * If for the present derivative - * order, symmetrization of the - * derivative tensor is - * necessary, then do so on the - * argument. - * - * For the second derivatives, - * each entry of the tensor is - * set to the mean of its value - * and the value of the transpose - * element. - * - * Note that this function - * actually modifies its - * argument. - */ - static void symmetrize (Derivative &derivative_tensor); - }; - - template - class ThirdDerivative - { - public: - /** - * Declare which data fields have - * to be updated for the function - * @p get_projected_derivative - * to work. - */ - static const UpdateFlags update_flags; - - /** - * Declare the data type which - * holds the derivative described - * by this class. - */ - typedef Tensor<3,dim> Derivative; - - /** - * Likewise declare the data type - * that holds the derivative - * projected to a certain - * directions. - */ - typedef Tensor<2,dim> ProjectedDerivative; - - /** - * Given an FEValues object - * initialized to a cell, and a - * solution vector, extract the - * desired derivative at the - * first quadrature point (which - * is the only one, as we only - * evaluate the finite element - * field at the center of each - * cell). - */ - template - static ProjectedDerivative - get_projected_derivative (const FEValues &fe_values, - const InputVector &solution, - const unsigned int component); - - /** - * Return the norm of the - * derivative object. Here, for - * the (symmetric) tensor of - * second derivatives, we choose - * the absolute value of the - * largest eigenvalue, which is - * the matrix norm associated to - * the $l_2$ norm of vectors. It - * is also the largest value of - * the curvature of the solution. - */ - static double derivative_norm (const Derivative &d); - - /** - * If for the present derivative - * order, symmetrization of the - * derivative tensor is - * necessary, then do so on the - * argument. - * - * For the second derivatives, - * each entry of the tensor is - * set to the mean of its value - * and the value of the transpose - * element. - * - * Note that this function - * actually modifies its - * argument. - */ - static void symmetrize (Derivative &derivative_tensor); - }; - - template - class DerivativeSelector - { - public: - /** - * typedef to select the - * DerivativeDescription corresponding - * to the orderth - * derivative. In this general template - * we set an unvalid typedef to void, - * the real typedefs have to be - * specialized. - */ - typedef void DerivDescr; - - }; - - template - class DerivativeSelector<1,dim> - { - public: - - typedef Gradient DerivDescr; - }; - - template - class DerivativeSelector<2,dim> - { - public: - - typedef SecondDerivative DerivDescr; - }; - - template - class DerivativeSelector<3,dim> - { - public: - - typedef ThirdDerivative DerivDescr; - }; - - - - - private: - - /** - * Convenience typedef denoting - * the range of indices on which - * a certain thread shall - * operate. - */ - typedef std::pair IndexInterval; - - /** - * Kind of the main function of - * this class. It is called by - * the public entry points to - * this class with the correct - * template first argument and - * then simply calls the - * @p approximate function, - * after setting up several - * threads and doing some - * administration that is - * independent of the actual - * derivative to be computed. - * - * The @p component argument - * denotes which component of the - * solution vector we are to work - * on. - */ - template class DH, class InputVector, int spacedim> - static void - approximate_derivative (const Mapping &mapping, - const DH &dof, - const InputVector &solution, - const unsigned int component, - Vector &derivative_norm); - - /** - * Compute the derivative - * approximation on the cells in - * the range given by the third - * parameter. - * Fill the @p derivative_norm vector with - * the norm of the computed derivative - * tensors on each cell. - */ - template class DH, class InputVector, int spacedim> - static void - approximate (const Mapping &mapping, - const DH &dof, - const InputVector &solution, - const unsigned int component, - const IndexInterval &index_interval, - Vector &derivative_norm); - - /** - * Compute the derivative approximation on - * one cell. This computes the full - * derivative tensor. - */ - template class DH, class InputVector, int spacedim> - static void - approximate_cell (const Mapping &mapping, - const DH &dof, - const InputVector &solution, - const unsigned int component, - const typename DH::active_cell_iterator &cell, - typename DerivativeDescription::Derivative &derivative); + const unsigned int component, + Vector &derivative_norm); + + /** + * Compute the derivative + * approximation on the cells in + * the range given by the third + * parameter. + * Fill the @p derivative_norm vector with + * the norm of the computed derivative + * tensors on each cell. + */ + template class DH, class InputVector, int spacedim> + static void + approximate (const Mapping &mapping, + const DH &dof, + const InputVector &solution, + const unsigned int component, + const IndexInterval &index_interval, + Vector &derivative_norm); + + /** + * Compute the derivative approximation on + * one cell. This computes the full + * derivative tensor. + */ + template class DH, class InputVector, int spacedim> + static void + approximate_cell (const Mapping &mapping, + const DH &dof, + const InputVector &solution, + const unsigned int component, - const typename DH::active_cell_iterator &cell, ++ const typename DH::active_cell_iterator &cell, + typename DerivativeDescription::Derivative &derivative); }; diff --cc deal.II/include/deal.II/numerics/error_estimator.h index fd78e11f4f,a17ce5ae70..5bdce1a2d1 --- a/deal.II/include/deal.II/numerics/error_estimator.h +++ b/deal.II/include/deal.II/numerics/error_estimator.h @@@ -554,250 -554,250 +554,250 @@@ public template class KellyErrorEstimator<1,spacedim> { - public: - /** - * Implementation of the error - * estimator described above. You - * may give a coefficient, but - * there is a default value which - * denotes the constant - * coefficient with value - * one. The coefficient function - * may either be a scalar one, in - * which case it is used for all - * components of the finite - * element, or a vector-valued - * one with as many components as - * there are in the finite - * element; in the latter case, - * each component is weighted by - * the respective component in - * the coefficient. - * - * You might give a list of components - * you want to evaluate, in case the - * finite element used by the DoFHandler - * object is vector-valued. You then have - * to set those entries to true in the - * bit-vector @p component_mask for which - * the respective component is to be used - * in the error estimator. The default is - * to use all components, which is done - * by either providing a bit-vector with - * all-set entries, or an empty - * bit-vector. All the other parameters - * are as in the general case used for 2d - * and higher. - * - * The estimator supports multithreading - * and splits the cells to - * multithread_info.n_default_threads - * (default) threads. The number of - * threads to be used in multithreaded - * mode can be set with the last - * parameter of the error estimator. - * Multithreading is not presently - * implemented for 1d, but we retain the - * respective parameter for compatibility - * with the function signature in the - * general case. - */ - template - static void estimate (const Mapping<1,spacedim> &mapping, - const DH &dof, - const Quadrature<0> &quadrature, - const typename FunctionMap::type &neumann_bc, - const InputVector &solution, - Vector &error, - const ComponentMask &component_mask = ComponentMask(), - const Function *coefficients = 0, - const unsigned int n_threads = multithread_info.n_default_threads, - const types::subdomain_id subdomain_id = types::invalid_subdomain_id, - const types::material_id material_id = numbers::invalid_material_id); - - /** - * Calls the @p estimate - * function, see above, with - * mapping=MappingQ1<1>(). - */ - template - static void estimate (const DH &dof, - const Quadrature<0> &quadrature, - const typename FunctionMap::type &neumann_bc, - const InputVector &solution, - Vector &error, - const ComponentMask &component_mask = ComponentMask(), - const Function *coefficients = 0, - const unsigned int n_threads = multithread_info.n_default_threads, - const types::subdomain_id subdomain_id = types::invalid_subdomain_id, - const types::material_id material_id = numbers::invalid_material_id); - - /** - * Same function as above, but - * accepts more than one solution - * vectors and returns one error - * vector for each solution - * vector. For the reason of - * existence of this function, - * see the general documentation - * of this class. - * - * Since we do not want to force - * the user of this function to - * copy around their solution - * vectors, the vector of - * solution vectors takes - * pointers to the solutions, - * rather than being a vector of - * vectors. This makes it simpler - * to have the solution vectors - * somewhere in memory, rather - * than to have them collected - * somewhere special. (Note that - * it is not possible to - * construct of vector of - * references, so we had to use a - * vector of pointers.) - */ - template - static void estimate (const Mapping<1,spacedim> &mapping, - const DH &dof, - const Quadrature<0> &quadrature, - const typename FunctionMap::type &neumann_bc, - const std::vector &solutions, - std::vector*> &errors, - const ComponentMask &component_mask = ComponentMask(), - const Function *coefficients = 0, - const unsigned int n_threads = multithread_info.n_default_threads, - const types::subdomain_id subdomain_id = types::invalid_subdomain_id, - const types::material_id material_id = numbers::invalid_material_id); - - /** - * Calls the @p estimate - * function, see above, with - * mapping=MappingQ1<1>(). - */ - template - static void estimate (const DH &dof, - const Quadrature<0> &quadrature, - const typename FunctionMap::type &neumann_bc, - const std::vector &solutions, - std::vector*> &errors, - const ComponentMask &component_mask = ComponentMask(), - const Function *coefficients = 0, - const unsigned int n_threads = multithread_info.n_default_threads, - const types::subdomain_id subdomain_id = types::invalid_subdomain_id, - const types::material_id material_id = numbers::invalid_material_id); - - - /** - * Equivalent to the set of functions - * above, except that this one takes a - * quadrature collection for hp finite - * element dof handlers. - */ - template - static void estimate (const Mapping<1,spacedim> &mapping, - const DH &dof, - const hp::QCollection<0> &quadrature, - const typename FunctionMap::type &neumann_bc, - const InputVector &solution, - Vector &error, - const ComponentMask &component_mask = ComponentMask(), - const Function *coefficients = 0, - const unsigned int n_threads = multithread_info.n_default_threads, - const types::subdomain_id subdomain_id = types::invalid_subdomain_id, - const types::material_id material_id = numbers::invalid_material_id); - - - /** - * Equivalent to the set of functions - * above, except that this one takes a - * quadrature collection for hp finite - * element dof handlers. - */ - template - static void estimate (const DH &dof, - const hp::QCollection<0> &quadrature, - const typename FunctionMap::type &neumann_bc, - const InputVector &solution, - Vector &error, - const ComponentMask &component_mask = ComponentMask(), - const Function *coefficients = 0, - const unsigned int n_threads = multithread_info.n_default_threads, - const types::subdomain_id subdomain_id = types::invalid_subdomain_id, - const types::material_id material_id = numbers::invalid_material_id); - - - /** - * Equivalent to the set of functions - * above, except that this one takes a - * quadrature collection for hp finite - * element dof handlers. - */ - template - static void estimate (const Mapping<1,spacedim> &mapping, - const DH &dof, - const hp::QCollection<0> &quadrature, - const typename FunctionMap::type &neumann_bc, - const std::vector &solutions, - std::vector*> &errors, - const ComponentMask &component_mask = ComponentMask(), - const Function *coefficients = 0, - const unsigned int n_threads = multithread_info.n_default_threads, - const types::subdomain_id subdomain_id = types::invalid_subdomain_id, - const types::material_id material_id = numbers::invalid_material_id); - - - /** - * Equivalent to the set of functions - * above, except that this one takes a - * quadrature collection for hp finite - * element dof handlers. - */ - template - static void estimate (const DH &dof, - const hp::QCollection<0> &quadrature, - const typename FunctionMap::type &neumann_bc, - const std::vector &solutions, - std::vector*> &errors, - const ComponentMask &component_mask = ComponentMask(), - const Function *coefficients = 0, - const unsigned int n_threads = multithread_info.n_default_threads, - const types::subdomain_id subdomain_id = types::invalid_subdomain_id, - const types::material_id material_id = numbers::invalid_material_id); - - /** - * Exception - */ - DeclException0 (ExcInvalidBoundaryIndicator); - /** - * Exception - */ - DeclException0 (ExcInvalidComponentMask); - /** - * Exception - */ - DeclException0 (ExcInvalidCoefficient); - /** - * Exception - */ - DeclException0 (ExcInvalidBoundaryFunction); - /** - * Exception - */ - DeclException2 (ExcIncompatibleNumberOfElements, - int, int, - << "The number of elements " << arg1 << " and " << arg2 - << " of the vectors do not match!"); - /** - * Exception - */ - DeclException0 (ExcInvalidSolutionVector); - /** - * Exception - */ - DeclException0 (ExcNoSolutions); + public: + /** + * Implementation of the error + * estimator described above. You + * may give a coefficient, but + * there is a default value which + * denotes the constant + * coefficient with value + * one. The coefficient function + * may either be a scalar one, in + * which case it is used for all + * components of the finite + * element, or a vector-valued + * one with as many components as + * there are in the finite + * element; in the latter case, + * each component is weighted by + * the respective component in + * the coefficient. + * + * You might give a list of components + * you want to evaluate, in case the + * finite element used by the DoFHandler + * object is vector-valued. You then have + * to set those entries to true in the + * bit-vector @p component_mask for which + * the respective component is to be used + * in the error estimator. The default is + * to use all components, which is done + * by either providing a bit-vector with + * all-set entries, or an empty + * bit-vector. All the other parameters + * are as in the general case used for 2d + * and higher. + * + * The estimator supports multithreading + * and splits the cells to + * multithread_info.n_default_threads + * (default) threads. The number of + * threads to be used in multithreaded + * mode can be set with the last + * parameter of the error estimator. + * Multithreading is not presently + * implemented for 1d, but we retain the + * respective parameter for compatibility + * with the function signature in the + * general case. + */ + template - static void estimate (const Mapping<1,spacedim> &mapping, ++ static void estimate (const Mapping<1,spacedim> &mapping, + const DH &dof, + const Quadrature<0> &quadrature, + const typename FunctionMap::type &neumann_bc, + const InputVector &solution, + Vector &error, + const ComponentMask &component_mask = ComponentMask(), + const Function *coefficients = 0, + const unsigned int n_threads = multithread_info.n_default_threads, + const types::subdomain_id subdomain_id = types::invalid_subdomain_id, + const types::material_id material_id = numbers::invalid_material_id); + + /** + * Calls the @p estimate + * function, see above, with + * mapping=MappingQ1<1>(). + */ + template + static void estimate (const DH &dof, + const Quadrature<0> &quadrature, + const typename FunctionMap::type &neumann_bc, + const InputVector &solution, + Vector &error, + const ComponentMask &component_mask = ComponentMask(), + const Function *coefficients = 0, + const unsigned int n_threads = multithread_info.n_default_threads, + const types::subdomain_id subdomain_id = types::invalid_subdomain_id, + const types::material_id material_id = numbers::invalid_material_id); + + /** + * Same function as above, but + * accepts more than one solution + * vectors and returns one error + * vector for each solution + * vector. For the reason of + * existence of this function, + * see the general documentation + * of this class. + * + * Since we do not want to force + * the user of this function to + * copy around their solution + * vectors, the vector of + * solution vectors takes + * pointers to the solutions, + * rather than being a vector of + * vectors. This makes it simpler + * to have the solution vectors + * somewhere in memory, rather + * than to have them collected + * somewhere special. (Note that + * it is not possible to + * construct of vector of + * references, so we had to use a + * vector of pointers.) + */ + template + static void estimate (const Mapping<1,spacedim> &mapping, + const DH &dof, + const Quadrature<0> &quadrature, + const typename FunctionMap::type &neumann_bc, + const std::vector &solutions, + std::vector*> &errors, + const ComponentMask &component_mask = ComponentMask(), + const Function *coefficients = 0, + const unsigned int n_threads = multithread_info.n_default_threads, + const types::subdomain_id subdomain_id = types::invalid_subdomain_id, + const types::material_id material_id = numbers::invalid_material_id); + + /** + * Calls the @p estimate + * function, see above, with + * mapping=MappingQ1<1>(). + */ + template + static void estimate (const DH &dof, + const Quadrature<0> &quadrature, + const typename FunctionMap::type &neumann_bc, + const std::vector &solutions, + std::vector*> &errors, + const ComponentMask &component_mask = ComponentMask(), + const Function *coefficients = 0, + const unsigned int n_threads = multithread_info.n_default_threads, + const types::subdomain_id subdomain_id = types::invalid_subdomain_id, + const types::material_id material_id = numbers::invalid_material_id); + + + /** + * Equivalent to the set of functions + * above, except that this one takes a + * quadrature collection for hp finite + * element dof handlers. + */ + template + static void estimate (const Mapping<1,spacedim> &mapping, + const DH &dof, + const hp::QCollection<0> &quadrature, + const typename FunctionMap::type &neumann_bc, + const InputVector &solution, + Vector &error, + const ComponentMask &component_mask = ComponentMask(), + const Function *coefficients = 0, + const unsigned int n_threads = multithread_info.n_default_threads, + const types::subdomain_id subdomain_id = types::invalid_subdomain_id, + const types::material_id material_id = numbers::invalid_material_id); + + + /** + * Equivalent to the set of functions + * above, except that this one takes a + * quadrature collection for hp finite + * element dof handlers. + */ + template + static void estimate (const DH &dof, + const hp::QCollection<0> &quadrature, + const typename FunctionMap::type &neumann_bc, + const InputVector &solution, + Vector &error, + const ComponentMask &component_mask = ComponentMask(), + const Function *coefficients = 0, + const unsigned int n_threads = multithread_info.n_default_threads, + const types::subdomain_id subdomain_id = types::invalid_subdomain_id, + const types::material_id material_id = numbers::invalid_material_id); + + + /** + * Equivalent to the set of functions + * above, except that this one takes a + * quadrature collection for hp finite + * element dof handlers. + */ + template + static void estimate (const Mapping<1,spacedim> &mapping, + const DH &dof, + const hp::QCollection<0> &quadrature, + const typename FunctionMap::type &neumann_bc, + const std::vector &solutions, + std::vector*> &errors, + const ComponentMask &component_mask = ComponentMask(), + const Function *coefficients = 0, + const unsigned int n_threads = multithread_info.n_default_threads, + const types::subdomain_id subdomain_id = types::invalid_subdomain_id, + const types::material_id material_id = numbers::invalid_material_id); + + + /** + * Equivalent to the set of functions + * above, except that this one takes a + * quadrature collection for hp finite + * element dof handlers. + */ + template + static void estimate (const DH &dof, + const hp::QCollection<0> &quadrature, + const typename FunctionMap::type &neumann_bc, + const std::vector &solutions, + std::vector*> &errors, + const ComponentMask &component_mask = ComponentMask(), + const Function *coefficients = 0, + const unsigned int n_threads = multithread_info.n_default_threads, + const types::subdomain_id subdomain_id = types::invalid_subdomain_id, + const types::material_id material_id = numbers::invalid_material_id); + + /** + * Exception + */ + DeclException0 (ExcInvalidBoundaryIndicator); + /** + * Exception + */ + DeclException0 (ExcInvalidComponentMask); + /** + * Exception + */ + DeclException0 (ExcInvalidCoefficient); + /** + * Exception + */ + DeclException0 (ExcInvalidBoundaryFunction); + /** + * Exception + */ + DeclException2 (ExcIncompatibleNumberOfElements, + int, int, + << "The number of elements " << arg1 << " and " << arg2 + << " of the vectors do not match!"); + /** + * Exception + */ + DeclException0 (ExcInvalidSolutionVector); + /** + * Exception + */ + DeclException0 (ExcNoSolutions); }; diff --cc deal.II/include/deal.II/numerics/matrix_tools.h index 882c422ad1,a34eec00b1..980235f1e8 --- a/deal.II/include/deal.II/numerics/matrix_tools.h +++ b/deal.II/include/deal.II/numerics/matrix_tools.h @@@ -829,99 -829,99 +829,99 @@@ namespace MatrixTool const bool eliminate_columns = true); #ifdef DEAL_II_USE_PETSC - /** - * Apply dirichlet boundary conditions to - * the system matrix and vectors as - * described in the general - * documentation. This function works on - * the classes that are used to wrap - * PETSc objects. - * - * Note that this function is not very - * efficient: it needs to alternatingly - * read and write into the matrix, a - * situation that PETSc does not handle - * too well. In addition, we only get rid - * of rows corresponding to boundary - * nodes, but the corresponding case of - * deleting the respective columns - * (i.e. if @p eliminate_columns is @p - * true) is not presently implemented, - * and probably will never because it is - * too expensive without direct access to - * the PETSc data structures. (This leads - * to the situation where the action - * indicates by the default value of the - * last argument is actually not - * implemented; that argument has - * true as its default value - * to stay consistent with the other - * functions of same name in this class.) - * A third reason against this function - * is that it doesn't handle the case - * where the matrix is distributed across - * an MPI system. - * - * This function is used in - * step-17 and - * step-18. - */ + /** + * Apply dirichlet boundary conditions to + * the system matrix and vectors as + * described in the general + * documentation. This function works on + * the classes that are used to wrap + * PETSc objects. + * + * Note that this function is not very + * efficient: it needs to alternatingly + * read and write into the matrix, a + * situation that PETSc does not handle + * too well. In addition, we only get rid + * of rows corresponding to boundary + * nodes, but the corresponding case of + * deleting the respective columns + * (i.e. if @p eliminate_columns is @p + * true) is not presently implemented, + * and probably will never because it is + * too expensive without direct access to + * the PETSc data structures. (This leads + * to the situation where the action + * indicates by the default value of the + * last argument is actually not + * implemented; that argument has + * true as its default value + * to stay consistent with the other + * functions of same name in this class.) + * A third reason against this function + * is that it doesn't handle the case + * where the matrix is distributed across + * an MPI system. + * + * This function is used in + * step-17 and + * step-18. + */ void apply_boundary_values (const std::map &boundary_values, - PETScWrappers::SparseMatrix &matrix, - PETScWrappers::Vector &solution, - PETScWrappers::Vector &right_hand_side, + PETScWrappers::SparseMatrix &matrix, + PETScWrappers::Vector &solution, + PETScWrappers::Vector &right_hand_side, const bool eliminate_columns = true); - /** - * Same function, but for parallel PETSc - * matrices. - */ + /** + * Same function, but for parallel PETSc + * matrices. + */ void apply_boundary_values (const std::map &boundary_values, - PETScWrappers::MPI::SparseMatrix &matrix, - PETScWrappers::MPI::Vector &solution, - PETScWrappers::MPI::Vector &right_hand_side, + PETScWrappers::MPI::SparseMatrix &matrix, + PETScWrappers::MPI::Vector &solution, + PETScWrappers::MPI::Vector &right_hand_side, const bool eliminate_columns = true); - /** - * Same function, but for - * parallel PETSc matrices. Note - * that this function only - * operates on the local range of - * the parallel matrix, i.e. it - * only eliminates rows - * corresponding to degrees of - * freedom for which the row is - * stored on the present - * processor. All other boundary - * nodes are ignored, and it - * doesn't matter whether they - * are present in the first - * argument to this function or - * not. A consequence of this, - * however, is that this function - * has to be called from all - * processors that participate in - * sharing the contents of the - * given matrices and vectors. It - * is also implied that the local - * range for all objects passed - * to this function is the same. - */ + /** + * Same function, but for + * parallel PETSc matrices. Note + * that this function only + * operates on the local range of + * the parallel matrix, i.e. it + * only eliminates rows + * corresponding to degrees of + * freedom for which the row is + * stored on the present + * processor. All other boundary + * nodes are ignored, and it + * doesn't matter whether they + * are present in the first + * argument to this function or + * not. A consequence of this, + * however, is that this function + * has to be called from all + * processors that participate in + * sharing the contents of the + * given matrices and vectors. It + * is also implied that the local + * range for all objects passed + * to this function is the same. + */ void apply_boundary_values (const std::map &boundary_values, - PETScWrappers::MPI::SparseMatrix &matrix, + PETScWrappers::MPI::SparseMatrix &matrix, PETScWrappers::Vector &solution, - PETScWrappers::MPI::Vector &right_hand_side, + PETScWrappers::MPI::Vector &right_hand_side, const bool eliminate_columns = true); - /** - * Same as above but for BlockSparseMatrix. - */ + /** + * Same as above but for BlockSparseMatrix. + */ void - apply_boundary_values (const std::map &boundary_values, - PETScWrappers::MPI::BlockSparseMatrix &matrix, + apply_boundary_values (const std::map &boundary_values, + PETScWrappers::MPI::BlockSparseMatrix &matrix, PETScWrappers::MPI::BlockVector &solution, PETScWrappers::MPI::BlockVector &right_hand_side, const bool eliminate_columns = true); @@@ -929,114 -929,114 +929,114 @@@ #endif #ifdef DEAL_II_USE_TRILINOS - /** - * Apply dirichlet boundary - * conditions to the system matrix - * and vectors as described in the - * general documentation. This - * function works on the classes - * that are used to wrap Trilinos - * objects. - * - * Note that this function is not - * very efficient: it needs to - * alternatingly read and write - * into the matrix, a situation - * that Trilinos does not handle - * too well. In addition, we only - * get rid of rows corresponding to - * boundary nodes, but the - * corresponding case of deleting - * the respective columns (i.e. if - * @p eliminate_columns is @p true) - * is not presently implemented, - * and probably will never because - * it is too expensive without - * direct access to the Trilinos - * data structures. (This leads to - * the situation where the action - * indicates by the default value - * of the last argument is actually - * not implemented; that argument - * has true as its - * default value to stay consistent - * with the other functions of same - * name in this class.) A third - * reason against this function is - * that it doesn't handle the case - * where the matrix is distributed - * across an MPI system. - */ + /** + * Apply dirichlet boundary + * conditions to the system matrix + * and vectors as described in the + * general documentation. This + * function works on the classes + * that are used to wrap Trilinos + * objects. + * + * Note that this function is not + * very efficient: it needs to + * alternatingly read and write + * into the matrix, a situation + * that Trilinos does not handle + * too well. In addition, we only + * get rid of rows corresponding to + * boundary nodes, but the + * corresponding case of deleting + * the respective columns (i.e. if + * @p eliminate_columns is @p true) + * is not presently implemented, + * and probably will never because + * it is too expensive without + * direct access to the Trilinos + * data structures. (This leads to + * the situation where the action + * indicates by the default value + * of the last argument is actually + * not implemented; that argument + * has true as its + * default value to stay consistent + * with the other functions of same + * name in this class.) A third + * reason against this function is + * that it doesn't handle the case + * where the matrix is distributed + * across an MPI system. + */ void apply_boundary_values (const std::map &boundary_values, - TrilinosWrappers::SparseMatrix &matrix, + TrilinosWrappers::SparseMatrix &matrix, TrilinosWrappers::Vector &solution, TrilinosWrappers::Vector &right_hand_side, const bool eliminate_columns = true); - /** - * This function does the same as - * the one above, except now - * working on block structures. - */ + /** + * This function does the same as + * the one above, except now + * working on block structures. + */ void apply_boundary_values (const std::map &boundary_values, - TrilinosWrappers::BlockSparseMatrix &matrix, + TrilinosWrappers::BlockSparseMatrix &matrix, TrilinosWrappers::BlockVector &solution, TrilinosWrappers::BlockVector &right_hand_side, const bool eliminate_columns = true); - /** - * Apply dirichlet boundary - * conditions to the system matrix - * and vectors as described in the - * general documentation. This - * function works on the classes - * that are used to wrap Trilinos - * objects. - * - * Note that this function is not - * very efficient: it needs to - * alternatingly read and write - * into the matrix, a situation - * that Trilinos does not handle - * too well. In addition, we only - * get rid of rows corresponding to - * boundary nodes, but the - * corresponding case of deleting - * the respective columns (i.e. if - * @p eliminate_columns is @p true) - * is not presently implemented, - * and probably will never because - * it is too expensive without - * direct access to the Trilinos - * data structures. (This leads to - * the situation where the action - * indicates by the default value - * of the last argument is actually - * not implemented; that argument - * has true as its - * default value to stay consistent - * with the other functions of same - * name in this class.) This - * function does work on MPI vector - * types. - */ + /** + * Apply dirichlet boundary + * conditions to the system matrix + * and vectors as described in the + * general documentation. This + * function works on the classes + * that are used to wrap Trilinos + * objects. + * + * Note that this function is not + * very efficient: it needs to + * alternatingly read and write + * into the matrix, a situation + * that Trilinos does not handle + * too well. In addition, we only + * get rid of rows corresponding to + * boundary nodes, but the + * corresponding case of deleting + * the respective columns (i.e. if + * @p eliminate_columns is @p true) + * is not presently implemented, + * and probably will never because + * it is too expensive without + * direct access to the Trilinos + * data structures. (This leads to + * the situation where the action + * indicates by the default value + * of the last argument is actually + * not implemented; that argument + * has true as its + * default value to stay consistent + * with the other functions of same + * name in this class.) This + * function does work on MPI vector + * types. + */ void apply_boundary_values (const std::map &boundary_values, - TrilinosWrappers::SparseMatrix &matrix, + TrilinosWrappers::SparseMatrix &matrix, TrilinosWrappers::MPI::Vector &solution, TrilinosWrappers::MPI::Vector &right_hand_side, const bool eliminate_columns = true); - /** - * This function does the same as - * the one above, except now working - * on block structures. - */ + /** + * This function does the same as + * the one above, except now working + * on block structures. + */ void apply_boundary_values (const std::map &boundary_values, - TrilinosWrappers::BlockSparseMatrix &matrix, + TrilinosWrappers::BlockSparseMatrix &matrix, TrilinosWrappers::MPI::BlockVector &solution, TrilinosWrappers::MPI::BlockVector &right_hand_side, const bool eliminate_columns = true); diff --cc deal.II/include/deal.II/numerics/vector_tools.templates.h index ae40ad9d46,8546036d53..c907d85d53 --- a/deal.II/include/deal.II/numerics/vector_tools.templates.h +++ b/deal.II/include/deal.II/numerics/vector_tools.templates.h @@@ -2081,10 -2082,10 +2082,10 @@@ namespace VectorTool template void project_boundary_values (const Mapping &mapping, - const DoFHandler&dof, + const DoFHandler &dof, const typename FunctionMap::type &boundary_functions, const Quadrature &q, - std::map &boundary_values, + std::map &boundary_values, std::vector component_mapping) { //TODO:[?] In project_boundary_values, no condensation of sparsity diff --cc deal.II/source/base/parameter_handler.cc index a166766a8c,1975331b27..8227166e21 --- a/deal.II/source/base/parameter_handler.cc +++ b/deal.II/source/base/parameter_handler.cc @@@ -431,18 -431,18 +431,18 @@@ namespace Pattern const unsigned int List::max_int_value - = std::numeric_limits::max(); + = std::numeric_limits::max(); - const char* List::description_init = "[List"; + const char *List::description_init = "[List"; - List::List (const PatternBase &p, + List::List (const PatternBase &p, const unsigned int min_elements, const unsigned int max_elements) - : - pattern (p.clone()), - min_elements (min_elements), - max_elements (max_elements) + : + pattern (p.clone()), + min_elements (min_elements), + max_elements (max_elements) { Assert (min_elements <= max_elements, ExcInvalidRange (min_elements, max_elements)); @@@ -566,20 -566,20 +566,20 @@@ const unsigned int Map::max_int_value - = std::numeric_limits::max(); + = std::numeric_limits::max(); - const char* Map::description_init = "[Map"; + const char *Map::description_init = "[Map"; - Map::Map (const PatternBase &p_key, - const PatternBase &p_value, + Map::Map (const PatternBase &p_key, + const PatternBase &p_value, const unsigned int min_elements, const unsigned int max_elements) - : - key_pattern (p_key.clone()), - value_pattern (p_value.clone()), - min_elements (min_elements), - max_elements (max_elements) + : + key_pattern (p_key.clone()), + value_pattern (p_value.clone()), + min_elements (min_elements), + max_elements (max_elements) { Assert (min_elements <= max_elements, ExcInvalidRange (min_elements, max_elements)); diff --cc deal.II/source/base/quadrature.cc index 928d6b3b68,df037e405f..08f8a518de --- a/deal.II/source/base/quadrature.cc +++ b/deal.II/source/base/quadrature.cc @@@ -1193,94 -1201,106 +1201,106 @@@ subface (const unsigned int face_no Assert (subface_no < GeometryInfo::max_children_per_face, ExcInternalError()); - // As the quadrature points created by - // QProjector are on subfaces in their - // "standard location" we have to use a - // permutation of the equivalent subface - // number in order to respect face - // orientation, flip and rotation. The - // information we need here is exactly the - // same as the - // GeometryInfo<3>::child_cell_on_face info - // for the bottom face (face 4) of a hex, as - // on this the RefineCase of the cell matches - // that of the face and the subfaces are - // numbered in the same way as the child - // cells. - - // in 3d, we have to account for faces that - // have non-standard face orientation, flip - // and rotation. thus, we have to store - // _eight_ data sets per face or subface - // already for the isotropic - // case. Additionally, we have three - // different refinement cases, resulting in - // 4 + 2 + 2 = 8 different subfaces - // for each face. + // As the quadrature points created by + // QProjector are on subfaces in their + // "standard location" we have to use a + // permutation of the equivalent subface + // number in order to respect face + // orientation, flip and rotation. The + // information we need here is exactly the + // same as the + // GeometryInfo<3>::child_cell_on_face info + // for the bottom face (face 4) of a hex, as + // on this the RefineCase of the cell matches + // that of the face and the subfaces are + // numbered in the same way as the child + // cells. + + // in 3d, we have to account for faces that + // have non-standard face orientation, flip + // and rotation. thus, we have to store + // _eight_ data sets per face or subface + // already for the isotropic + // case. Additionally, we have three + // different refinement cases, resulting in + // 4 + 2 + 2 = 8 different subfaces + // for each face. const unsigned int total_subfaces_per_face=8; - // set up a table with the according offsets - // for non-standard orientation, first index: - // face_orientation (standard true=1), second - // index: face_flip (standard false=0), third - // index: face_rotation (standard false=0) - // - // note, that normally we should use the - // obvious offsets 0,1,2,3,4,5,6,7. However, - // prior to the changes enabling flipped and - // rotated faces, in many places of the - // library the convention was used, that the - // first dataset with offset 0 corresponds to - // a face in standard orientation. therefore - // we use the offsets 4,5,6,7,0,1,2,3 here to - // stick to that (implicit) convention + // set up a table with the according offsets + // for non-standard orientation, first index: + // face_orientation (standard true=1), second + // index: face_flip (standard false=0), third + // index: face_rotation (standard false=0) + // + // note, that normally we should use the + // obvious offsets 0,1,2,3,4,5,6,7. However, + // prior to the changes enabling flipped and + // rotated faces, in many places of the + // library the convention was used, that the + // first dataset with offset 0 corresponds to + // a face in standard orientation. therefore + // we use the offsets 4,5,6,7,0,1,2,3 here to + // stick to that (implicit) convention static const unsigned int orientation_offset[2][2][2]= - {{ - // face_orientation=false; face_flip=false; face_rotation=false and true - {4*GeometryInfo::faces_per_cell*total_subfaces_per_face, - 5*GeometryInfo::faces_per_cell*total_subfaces_per_face}, - // face_orientation=false; face_flip=true; face_rotation=false and true - {6*GeometryInfo::faces_per_cell*total_subfaces_per_face, - 7*GeometryInfo::faces_per_cell*total_subfaces_per_face}}, - { - // face_orientation=true; face_flip=false; face_rotation=false and true - {0*GeometryInfo::faces_per_cell*total_subfaces_per_face, - 1*GeometryInfo::faces_per_cell*total_subfaces_per_face}, - // face_orientation=true; face_flip=true; face_rotation=false and true - {2*GeometryInfo::faces_per_cell*total_subfaces_per_face, - 3*GeometryInfo::faces_per_cell*total_subfaces_per_face}}}; - - // set up a table with the offsets for a - // given refinement case respecting the - // corresponding number of subfaces. the - // index corresponds to (RefineCase::Type - 1) - - // note, that normally we should use the - // obvious offsets 0,2,6. However, prior to - // the implementation of anisotropic - // refinement, in many places of the library - // the convention was used, that the first - // dataset with offset 0 corresponds to a - // standard (isotropic) face - // refinement. therefore we use the offsets - // 6,4,0 here to stick to that (implicit) - // convention - static const unsigned int ref_case_offset[3]= + { { - 6, //cut_x - 4, //cut_y - 0 //cut_xy - }; + // face_orientation=false; face_flip=false; face_rotation=false and true + { + 4*GeometryInfo::faces_per_cell*total_subfaces_per_face, - 5*GeometryInfo::faces_per_cell *total_subfaces_per_face ++ 5*GeometryInfo::faces_per_cell*total_subfaces_per_face + }, + // face_orientation=false; face_flip=true; face_rotation=false and true + { + 6*GeometryInfo::faces_per_cell*total_subfaces_per_face, - 7*GeometryInfo::faces_per_cell *total_subfaces_per_face ++ 7*GeometryInfo::faces_per_cell*total_subfaces_per_face + } + }, + { + // face_orientation=true; face_flip=false; face_rotation=false and true + { + 0*GeometryInfo::faces_per_cell*total_subfaces_per_face, - 1*GeometryInfo::faces_per_cell *total_subfaces_per_face ++ 1*GeometryInfo::faces_per_cell*total_subfaces_per_face + }, + // face_orientation=true; face_flip=true; face_rotation=false and true + { + 2*GeometryInfo::faces_per_cell*total_subfaces_per_face, - 3*GeometryInfo::faces_per_cell *total_subfaces_per_face ++ 3*GeometryInfo::faces_per_cell*total_subfaces_per_face + } + } + }; + + // set up a table with the offsets for a + // given refinement case respecting the + // corresponding number of subfaces. the + // index corresponds to (RefineCase::Type - 1) + + // note, that normally we should use the + // obvious offsets 0,2,6. However, prior to + // the implementation of anisotropic + // refinement, in many places of the library + // the convention was used, that the first + // dataset with offset 0 corresponds to a + // standard (isotropic) face + // refinement. therefore we use the offsets + // 6,4,0 here to stick to that (implicit) + // convention + static const unsigned int ref_case_offset[3]= + { + 6, //cut_x + 4, //cut_y + 0 //cut_xy + }; - // for each subface of a given FaceRefineCase - // there is a corresponding equivalent - // subface number of one of the "standard" - // RefineCases (cut_x, cut_y, cut_xy). Map - // the given values to those equivalent - // ones. + // for each subface of a given FaceRefineCase + // there is a corresponding equivalent + // subface number of one of the "standard" + // RefineCases (cut_x, cut_y, cut_xy). Map + // the given values to those equivalent + // ones. - // first, define an invalid number + // first, define an invalid number static const unsigned int e = deal_II_numbers::invalid_unsigned_int; static const RefinementCase diff --cc deal.II/source/distributed/tria.cc index 6796e1c8d7,27fb2b1026..cc3d29cfb1 --- a/deal.II/source/distributed/tria.cc +++ b/deal.II/source/distributed/tria.cc @@@ -1181,18 -1181,18 +1181,18 @@@ namespac } else if (!p4est_has_children && !dealii_cell->has_children()) { - //this active cell didn't change + //this active cell didn't change typename internal::p4est::types::quadrant *q; - q = static_cast::quadrant*> ( - sc_array_index (const_cast(&tree.quadrants), idx) - ); - *static_cast::CellStatus*>(q->p.user_data) = parallel::distributed::Triangulation::CELL_PERSIST; - - for(typename callback_list_t::const_iterator it = attached_data_pack_callbacks.begin(); - it != attached_data_pack_callbacks.end(); - ++it) + q = static_cast::quadrant *> ( + sc_array_index (const_cast(&tree.quadrants), idx) + ); + *static_cast::CellStatus *>(q->p.user_data) = parallel::distributed::Triangulation::CELL_PERSIST; + + for (typename callback_list_t::const_iterator it = attached_data_pack_callbacks.begin(); + it != attached_data_pack_callbacks.end(); + ++it) { - void * ptr = static_cast(q->p.user_data) + (*it).first; //add offset - void *ptr = static_cast(q->p.user_data) + (*it).first; //add offset ++ void *ptr = static_cast(q->p.user_data) + (*it).first; //add offset ((*it).second)(dealii_cell, parallel::distributed::Triangulation::CELL_PERSIST, ptr); @@@ -1228,16 -1228,16 +1228,16 @@@ Assert(child0_idx != -1, ExcMessage("the first child should exist as an active quadrant!")); typename internal::p4est::types::quadrant *q; - q = static_cast::quadrant*> ( - sc_array_index (const_cast(&tree.quadrants), child0_idx) - ); - *static_cast::CellStatus*>(q->p.user_data) = parallel::distributed::Triangulation::CELL_REFINE; - - for(typename callback_list_t::const_iterator it = attached_data_pack_callbacks.begin(); - it != attached_data_pack_callbacks.end(); - ++it) + q = static_cast::quadrant *> ( + sc_array_index (const_cast(&tree.quadrants), child0_idx) + ); + *static_cast::CellStatus *>(q->p.user_data) = parallel::distributed::Triangulation::CELL_REFINE; + + for (typename callback_list_t::const_iterator it = attached_data_pack_callbacks.begin(); + it != attached_data_pack_callbacks.end(); + ++it) { - void * ptr = static_cast(q->p.user_data) + (*it).first; //add offset - void *ptr = static_cast(q->p.user_data) + (*it).first; //add offset ++ void *ptr = static_cast(q->p.user_data) + (*it).first; //add offset ((*it).second)(dealii_cell, parallel::distributed::Triangulation::CELL_REFINE, @@@ -1260,19 -1260,19 +1260,19 @@@ } else { - //it's children got coarsened into - //this cell + //it's children got coarsened into + //this cell typename internal::p4est::types::quadrant *q; - q = static_cast::quadrant*> ( - sc_array_index (const_cast(&tree.quadrants), idx) - ); - *static_cast::CellStatus*>(q->p.user_data) = parallel::distributed::Triangulation::CELL_COARSEN; - - for(typename callback_list_t::const_iterator it = attached_data_pack_callbacks.begin(); - it != attached_data_pack_callbacks.end(); - ++it) + q = static_cast::quadrant *> ( + sc_array_index (const_cast(&tree.quadrants), idx) + ); + *static_cast::CellStatus *>(q->p.user_data) = parallel::distributed::Triangulation::CELL_COARSEN; + + for (typename callback_list_t::const_iterator it = attached_data_pack_callbacks.begin(); + it != attached_data_pack_callbacks.end(); + ++it) { - void * ptr = static_cast(q->p.user_data) + (*it).first; //add offset - void *ptr = static_cast(q->p.user_data) + (*it).first; //add offset ++ void *ptr = static_cast(q->p.user_data) + (*it).first; //add offset ((*it).second)(dealii_cell, parallel::distributed::Triangulation::CELL_COARSEN, ptr); diff --cc deal.II/source/dofs/block_info.cc index 1ae1207819,445b6e4cef..fc695be921 --- a/deal.II/source/dofs/block_info.cc +++ b/deal.II/source/dofs/block_info.cc @@@ -25,27 -25,12 +25,29 @@@ DEAL_II_NAMESPACE_OPE template void - BlockInfo::initialize(const DoFHandler& dof, bool levels_only, bool multigrid) -BlockInfo::initialize(const DoFHandler &dof) ++BlockInfo::initialize(const DoFHandler &dof, bool levels_only, bool multigrid) { - if (!levels_only) { - const FiniteElement& fe = dof.get_fe(); - std::vector sizes(fe.n_blocks()); - DoFTools::count_dofs_per_block(dof, sizes); - bi_global.reinit(sizes); - } - - if (multigrid) { - std::vector > sizes (dof.get_tria ().n_levels ()); - - for (unsigned int i = 0; i < sizes.size (); ++i) - sizes[i].resize (dof.get_fe ().n_blocks ()); - - MGTools::count_dofs_per_block (dof, sizes); - levels.resize (sizes.size ()); - - for (unsigned int i = 0; i < sizes.size (); ++i) - levels[i].reinit (sizes[i]); - } - const FiniteElement &fe = dof.get_fe(); - std::vector sizes(fe.n_blocks()); - DoFTools::count_dofs_per_block(dof, sizes); - bi_global.reinit(sizes); ++ if (!levels_only) ++ { ++ const FiniteElement &fe = dof.get_fe(); ++ std::vector sizes(fe.n_blocks()); ++ DoFTools::count_dofs_per_block(dof, sizes); ++ bi_global.reinit(sizes); ++ } ++ ++ if (multigrid) ++ { ++ std::vector > sizes (dof.get_tria ().n_levels ()); ++ ++ for (unsigned int i = 0; i < sizes.size (); ++i) ++ sizes[i].resize (dof.get_fe ().n_blocks ()); ++ ++ MGTools::count_dofs_per_block (dof, sizes); ++ levels.resize (sizes.size ()); ++ ++ for (unsigned int i = 0; i < sizes.size (); ++i) ++ levels[i].reinit (sizes[i]); ++ } } @@@ -71,10 -56,10 +73,10 @@@ BlockInfo::initialize_local(const DoFHa template void - BlockInfo::initialize(const MGDoFHandler& dof, bool levels_only) + BlockInfo::initialize(const MGDoFHandler &dof, bool levels_only) { if (!levels_only) - initialize(static_cast&>(dof)); + initialize(static_cast&> (dof)); std::vector > sizes (dof.get_tria().n_levels()); for (unsigned int i=0; i - static - unsigned int - max_couplings_between_dofs (const DoFHandler<1,spacedim> &dof_handler) - { - return std::min(3*dof_handler.selected_fe->dofs_per_vertex + - 2*dof_handler.selected_fe->dofs_per_line, - dof_handler.n_dofs()); - } - - - - template - static - unsigned int - max_couplings_between_dofs (const DoFHandler<2,spacedim> &dof_handler) - { - - // get these numbers by drawing pictures - // and counting... - // example: - // | | | - // --x-----x--x--X-- - // | | | | - // | x--x--x - // | | | | - // --x--x--*--x--x-- - // | | | | - // x--x--x | - // | | | | - // --X--x--x-----x-- - // | | | - // x = vertices connected with center vertex *; - // = total of 19 - // (the X vertices are connected with * if - // the vertices adjacent to X are hanging - // nodes) - // count lines -> 28 (don't forget to count - // mother and children separately!) - unsigned int max_couplings; - switch (dof_handler.tria->max_adjacent_cells()) - { - case 4: - max_couplings=19*dof_handler.selected_fe->dofs_per_vertex + - 28*dof_handler.selected_fe->dofs_per_line + - 8*dof_handler.selected_fe->dofs_per_quad; - break; - case 5: - max_couplings=21*dof_handler.selected_fe->dofs_per_vertex + - 31*dof_handler.selected_fe->dofs_per_line + - 9*dof_handler.selected_fe->dofs_per_quad; - break; - case 6: - max_couplings=28*dof_handler.selected_fe->dofs_per_vertex + - 42*dof_handler.selected_fe->dofs_per_line + - 12*dof_handler.selected_fe->dofs_per_quad; - break; - case 7: - max_couplings=30*dof_handler.selected_fe->dofs_per_vertex + - 45*dof_handler.selected_fe->dofs_per_line + - 13*dof_handler.selected_fe->dofs_per_quad; - break; - case 8: - max_couplings=37*dof_handler.selected_fe->dofs_per_vertex + - 56*dof_handler.selected_fe->dofs_per_line + - 16*dof_handler.selected_fe->dofs_per_quad; - break; - - // the following - // numbers are not - // based on actual - // counting but by - // extrapolating the - // number sequences - // from the previous - // ones (for example, - // for dofs_per_vertex, - // the sequence above - // is 19, 21, 28, 30, - // 37, and is continued - // as follows): - case 9: - max_couplings=39*dof_handler.selected_fe->dofs_per_vertex + - 59*dof_handler.selected_fe->dofs_per_line + - 17*dof_handler.selected_fe->dofs_per_quad; - break; - case 10: - max_couplings=46*dof_handler.selected_fe->dofs_per_vertex + - 70*dof_handler.selected_fe->dofs_per_line + - 20*dof_handler.selected_fe->dofs_per_quad; - break; - case 11: - max_couplings=48*dof_handler.selected_fe->dofs_per_vertex + - 73*dof_handler.selected_fe->dofs_per_line + - 21*dof_handler.selected_fe->dofs_per_quad; - break; - case 12: - max_couplings=55*dof_handler.selected_fe->dofs_per_vertex + - 84*dof_handler.selected_fe->dofs_per_line + - 24*dof_handler.selected_fe->dofs_per_quad; - break; - case 13: - max_couplings=57*dof_handler.selected_fe->dofs_per_vertex + - 87*dof_handler.selected_fe->dofs_per_line + - 25*dof_handler.selected_fe->dofs_per_quad; - break; - case 14: - max_couplings=63*dof_handler.selected_fe->dofs_per_vertex + - 98*dof_handler.selected_fe->dofs_per_line + - 28*dof_handler.selected_fe->dofs_per_quad; - break; - case 15: - max_couplings=65*dof_handler.selected_fe->dofs_per_vertex + - 103*dof_handler.selected_fe->dofs_per_line + - 29*dof_handler.selected_fe->dofs_per_quad; - break; - case 16: - max_couplings=72*dof_handler.selected_fe->dofs_per_vertex + - 114*dof_handler.selected_fe->dofs_per_line + - 32*dof_handler.selected_fe->dofs_per_quad; - break; - - default: - Assert (false, ExcNotImplemented()); - max_couplings=0; - } - return std::min(max_couplings,dof_handler.n_dofs()); - } - - - template - static - unsigned int - max_couplings_between_dofs (const DoFHandler<3,spacedim> &dof_handler) - { + /** + * Implement the function of same name in + * the mother class. + */ + template + static + unsigned int + max_couplings_between_dofs (const DoFHandler<1,spacedim> &dof_handler) + { + return std::min(3*dof_handler.selected_fe->dofs_per_vertex + + 2*dof_handler.selected_fe->dofs_per_line, + dof_handler.n_dofs()); + } + + + + template + static + unsigned int + max_couplings_between_dofs (const DoFHandler<2,spacedim> &dof_handler) + { + + // get these numbers by drawing pictures + // and counting... + // example: + // | | | + // --x-----x--x--X-- + // | | | | + // | x--x--x + // | | | | + // --x--x--*--x--x-- + // | | | | + // x--x--x | + // | | | | + // --X--x--x-----x-- + // | | | + // x = vertices connected with center vertex *; + // = total of 19 + // (the X vertices are connected with * if + // the vertices adjacent to X are hanging + // nodes) + // count lines -> 28 (don't forget to count + // mother and children separately!) + unsigned int max_couplings; + switch (dof_handler.tria->max_adjacent_cells()) + { + case 4: + max_couplings=19*dof_handler.selected_fe->dofs_per_vertex + + 28*dof_handler.selected_fe->dofs_per_line + + 8*dof_handler.selected_fe->dofs_per_quad; + break; + case 5: + max_couplings=21*dof_handler.selected_fe->dofs_per_vertex + + 31*dof_handler.selected_fe->dofs_per_line + + 9*dof_handler.selected_fe->dofs_per_quad; + break; + case 6: + max_couplings=28*dof_handler.selected_fe->dofs_per_vertex + + 42*dof_handler.selected_fe->dofs_per_line + + 12*dof_handler.selected_fe->dofs_per_quad; + break; + case 7: + max_couplings=30*dof_handler.selected_fe->dofs_per_vertex + + 45*dof_handler.selected_fe->dofs_per_line + + 13*dof_handler.selected_fe->dofs_per_quad; + break; + case 8: + max_couplings=37*dof_handler.selected_fe->dofs_per_vertex + + 56*dof_handler.selected_fe->dofs_per_line + + 16*dof_handler.selected_fe->dofs_per_quad; + break; + + // the following + // numbers are not + // based on actual + // counting but by + // extrapolating the + // number sequences + // from the previous + // ones (for example, + // for dofs_per_vertex, + // the sequence above + // is 19, 21, 28, 30, + // 37, and is continued + // as follows): + case 9: + max_couplings=39*dof_handler.selected_fe->dofs_per_vertex + + 59*dof_handler.selected_fe->dofs_per_line + + 17*dof_handler.selected_fe->dofs_per_quad; + break; + case 10: + max_couplings=46*dof_handler.selected_fe->dofs_per_vertex + + 70*dof_handler.selected_fe->dofs_per_line + + 20*dof_handler.selected_fe->dofs_per_quad; + break; + case 11: + max_couplings=48*dof_handler.selected_fe->dofs_per_vertex + + 73*dof_handler.selected_fe->dofs_per_line + + 21*dof_handler.selected_fe->dofs_per_quad; + break; + case 12: + max_couplings=55*dof_handler.selected_fe->dofs_per_vertex + + 84*dof_handler.selected_fe->dofs_per_line + + 24*dof_handler.selected_fe->dofs_per_quad; + break; + case 13: + max_couplings=57*dof_handler.selected_fe->dofs_per_vertex + + 87*dof_handler.selected_fe->dofs_per_line + + 25*dof_handler.selected_fe->dofs_per_quad; + break; + case 14: + max_couplings=63*dof_handler.selected_fe->dofs_per_vertex + + 98*dof_handler.selected_fe->dofs_per_line + + 28*dof_handler.selected_fe->dofs_per_quad; + break; + case 15: + max_couplings=65*dof_handler.selected_fe->dofs_per_vertex + + 103*dof_handler.selected_fe->dofs_per_line + + 29*dof_handler.selected_fe->dofs_per_quad; + break; + case 16: + max_couplings=72*dof_handler.selected_fe->dofs_per_vertex + + 114*dof_handler.selected_fe->dofs_per_line + + 32*dof_handler.selected_fe->dofs_per_quad; + break; + + default: + Assert (false, ExcNotImplemented()); + max_couplings=0; + } + return std::min(max_couplings,dof_handler.n_dofs()); + } + + + template + static + unsigned int + max_couplings_between_dofs (const DoFHandler<3,spacedim> &dof_handler) + { //TODO:[?] Invent significantly better estimates than the ones in this function - // doing the same thing here is a - // rather complicated thing, compared - // to the 2d case, since it is hard - // to draw pictures with several - // refined hexahedra :-) so I - // presently only give a coarse - // estimate for the case that at most - // 8 hexes meet at each vertex - // - // can anyone give better estimate - // here? - const unsigned int max_adjacent_cells - = dof_handler.tria->max_adjacent_cells(); - - unsigned int max_couplings; - if (max_adjacent_cells <= 8) - max_couplings=7*7*7*dof_handler.selected_fe->dofs_per_vertex + - 7*6*7*3*dof_handler.selected_fe->dofs_per_line + - 9*4*7*3*dof_handler.selected_fe->dofs_per_quad + - 27*dof_handler.selected_fe->dofs_per_hex; - else - { - Assert (false, ExcNotImplemented()); - max_couplings=0; - } - - return std::min(max_couplings,dof_handler.n_dofs()); - } - - - /** - * Reserve enough space in the - * levels[] objects to store the - * numbers of the degrees of freedom - * needed for the given element. The - * given element is that one which - * was selected when calling - * @p distribute_dofs the last time. - */ - template - static - void reserve_space (DoFHandler<1,spacedim> &dof_handler) - { - dof_handler.vertex_dofs - .resize(dof_handler.tria->n_vertices() * - dof_handler.selected_fe->dofs_per_vertex, - DoFHandler<1,spacedim>::invalid_dof_index); - - for (unsigned int i=0; in_levels(); ++i) - { - dof_handler.levels - .push_back (new internal::DoFHandler::DoFLevel<1>); - - dof_handler.levels.back()->dof_object.dofs - .resize (dof_handler.tria->n_raw_cells(i) * - dof_handler.selected_fe->dofs_per_line, - DoFHandler<1,spacedim>::invalid_dof_index); - - dof_handler.levels.back()->cell_dof_indices_cache - .resize (dof_handler.tria->n_raw_cells(i) * - dof_handler.selected_fe->dofs_per_cell, - DoFHandler<1,spacedim>::invalid_dof_index); - } - } - - - template - static - void reserve_space (DoFHandler<2,spacedim> &dof_handler) - { - dof_handler.vertex_dofs - .resize(dof_handler.tria->n_vertices() * - dof_handler.selected_fe->dofs_per_vertex, - DoFHandler<2,spacedim>::invalid_dof_index); - - for (unsigned int i=0; in_levels(); ++i) - { - dof_handler.levels.push_back (new internal::DoFHandler::DoFLevel<2>); - - dof_handler.levels.back()->dof_object.dofs - .resize (dof_handler.tria->n_raw_cells(i) * - dof_handler.selected_fe->dofs_per_quad, - DoFHandler<2,spacedim>::invalid_dof_index); - - dof_handler.levels.back()->cell_dof_indices_cache - .resize (dof_handler.tria->n_raw_cells(i) * - dof_handler.selected_fe->dofs_per_cell, - DoFHandler<2,spacedim>::invalid_dof_index); + // doing the same thing here is a + // rather complicated thing, compared + // to the 2d case, since it is hard + // to draw pictures with several + // refined hexahedra :-) so I + // presently only give a coarse + // estimate for the case that at most + // 8 hexes meet at each vertex + // + // can anyone give better estimate + // here? + const unsigned int max_adjacent_cells + = dof_handler.tria->max_adjacent_cells(); + + unsigned int max_couplings; + if (max_adjacent_cells <= 8) + max_couplings=7*7*7*dof_handler.selected_fe->dofs_per_vertex + + 7*6*7*3*dof_handler.selected_fe->dofs_per_line + + 9*4*7*3*dof_handler.selected_fe->dofs_per_quad + + 27*dof_handler.selected_fe->dofs_per_hex; + else + { + Assert (false, ExcNotImplemented()); + max_couplings=0; + } + + return std::min(max_couplings,dof_handler.n_dofs()); + } + + + /** + * Reserve enough space in the + * levels[] objects to store the + * numbers of the degrees of freedom + * needed for the given element. The + * given element is that one which + * was selected when calling + * @p distribute_dofs the last time. + */ + template + static + void reserve_space (DoFHandler<1,spacedim> &dof_handler) + { + dof_handler.vertex_dofs + .resize(dof_handler.tria->n_vertices() * + dof_handler.selected_fe->dofs_per_vertex, + DoFHandler<1,spacedim>::invalid_dof_index); + + for (unsigned int i=0; in_levels(); ++i) + { + dof_handler.levels + .push_back (new internal::DoFHandler::DoFLevel<1>); + + dof_handler.levels.back()->dof_object.dofs + .resize (dof_handler.tria->n_raw_cells(i) * + dof_handler.selected_fe->dofs_per_line, + DoFHandler<1,spacedim>::invalid_dof_index); + + dof_handler.levels.back()->cell_dof_indices_cache + .resize (dof_handler.tria->n_raw_cells(i) * + dof_handler.selected_fe->dofs_per_cell, + DoFHandler<1,spacedim>::invalid_dof_index); + } + } + + + template + static + void reserve_space (DoFHandler<2,spacedim> &dof_handler) + { + dof_handler.vertex_dofs + .resize(dof_handler.tria->n_vertices() * + dof_handler.selected_fe->dofs_per_vertex, + DoFHandler<2,spacedim>::invalid_dof_index); + + for (unsigned int i=0; in_levels(); ++i) + { + dof_handler.levels.push_back (new internal::DoFHandler::DoFLevel<2>); + + dof_handler.levels.back()->dof_object.dofs + .resize (dof_handler.tria->n_raw_cells(i) * + dof_handler.selected_fe->dofs_per_quad, + DoFHandler<2,spacedim>::invalid_dof_index); + + dof_handler.levels.back()->cell_dof_indices_cache + .resize (dof_handler.tria->n_raw_cells(i) * + dof_handler.selected_fe->dofs_per_cell, + DoFHandler<2,spacedim>::invalid_dof_index); + } + + dof_handler.faces = new internal::DoFHandler::DoFFaces<2>; + dof_handler.faces->lines.dofs + .resize (dof_handler.tria->n_raw_lines() * + dof_handler.selected_fe->dofs_per_line, + DoFHandler<2,spacedim>::invalid_dof_index); + } + + + template + static + void reserve_space (DoFHandler<3,spacedim> &dof_handler) + { + dof_handler.vertex_dofs + .resize(dof_handler.tria->n_vertices() * + dof_handler.selected_fe->dofs_per_vertex, + DoFHandler<3,spacedim>::invalid_dof_index); + + for (unsigned int i=0; in_levels(); ++i) + { + dof_handler.levels.push_back (new internal::DoFHandler::DoFLevel<3>); + + dof_handler.levels.back()->dof_object.dofs + .resize (dof_handler.tria->n_raw_cells(i) * + dof_handler.selected_fe->dofs_per_hex, + DoFHandler<3,spacedim>::invalid_dof_index); + + dof_handler.levels.back()->cell_dof_indices_cache + .resize (dof_handler.tria->n_raw_cells(i) * + dof_handler.selected_fe->dofs_per_cell, + DoFHandler<3,spacedim>::invalid_dof_index); + } + dof_handler.faces = new internal::DoFHandler::DoFFaces<3>; + + dof_handler.faces->lines.dofs + .resize (dof_handler.tria->n_raw_lines() * + dof_handler.selected_fe->dofs_per_line, + DoFHandler<3,spacedim>::invalid_dof_index); + dof_handler.faces->quads.dofs + .resize (dof_handler.tria->n_raw_quads() * + dof_handler.selected_fe->dofs_per_quad, + DoFHandler<3,spacedim>::invalid_dof_index); + } ++ ++ template ++ static ++ void reserve_space_mg (DoFHandler<1, spacedim> &dof_handler) ++ { ++ Assert (dof_handler.get_tria ().n_levels () > 0, ExcMessage ("Invalid triangulation")); ++ dof_handler.clear_mg_space (); ++ ++ const dealii::Triangulation<1, spacedim> &tria = dof_handler.get_tria (); ++ const unsigned int &dofs_per_line = dof_handler.get_fe ().dofs_per_line; ++ const unsigned int &n_levels = tria.n_levels (); ++ ++ for (unsigned int i = 0; i < n_levels; ++i) ++ { ++ dof_handler.mg_levels.push_back (new internal::DoFHandler::DoFLevel<1>); ++ dof_handler.mg_levels.back ()->dof_object.dofs = std::vector (tria.n_raw_lines (i) * dofs_per_line, DoFHandler<1>::invalid_dof_index); ++ } ++ ++ const unsigned int &n_vertices = tria.n_vertices (); ++ ++ dof_handler.mg_vertex_dofs.resize (n_vertices); ++ ++ std::vector max_level (n_vertices, 0); ++ std::vector min_level (n_vertices, n_levels); ++ ++ for (typename dealii::Triangulation<1, spacedim>::cell_iterator cell = tria.begin (); cell != tria.end (); ++cell) ++ { ++ const unsigned int level = cell->level (); ++ ++ for (unsigned int vertex = 0; vertex < GeometryInfo<1>::vertices_per_cell; ++vertex) ++ { ++ const unsigned int vertex_index = cell->vertex_index (vertex); ++ ++ if (min_level[vertex_index] > level) ++ min_level[vertex_index] = level; ++ ++ if (max_level[vertex_index] < level) ++ max_level[vertex_index] = level; + } - - dof_handler.faces = new internal::DoFHandler::DoFFaces<2>; - dof_handler.faces->lines.dofs - .resize (dof_handler.tria->n_raw_lines() * - dof_handler.selected_fe->dofs_per_line, - DoFHandler<2,spacedim>::invalid_dof_index); - } - - - template - static - void reserve_space (DoFHandler<3,spacedim> &dof_handler) - { - dof_handler.vertex_dofs - .resize(dof_handler.tria->n_vertices() * - dof_handler.selected_fe->dofs_per_vertex, - DoFHandler<3,spacedim>::invalid_dof_index); - - for (unsigned int i=0; in_levels(); ++i) - { - dof_handler.levels.push_back (new internal::DoFHandler::DoFLevel<3>); - - dof_handler.levels.back()->dof_object.dofs - .resize (dof_handler.tria->n_raw_cells(i) * - dof_handler.selected_fe->dofs_per_hex, - DoFHandler<3,spacedim>::invalid_dof_index); - - dof_handler.levels.back()->cell_dof_indices_cache - .resize (dof_handler.tria->n_raw_cells(i) * - dof_handler.selected_fe->dofs_per_cell, - DoFHandler<3,spacedim>::invalid_dof_index); ++ } ++ ++ for (unsigned int vertex = 0; vertex < n_vertices; ++vertex) ++ if (tria.vertex_used (vertex)) ++ { ++ Assert (min_level[vertex] < n_levels, ExcInternalError ()); ++ Assert (max_level[vertex] >= min_level[vertex], ExcInternalError ()); ++ dof_handler.mg_vertex_dofs[vertex].init (min_level[vertex], max_level[vertex], dof_handler.get_fe ().dofs_per_vertex); ++ } ++ ++ else ++ { ++ Assert (min_level[vertex] == n_levels, ExcInternalError ()); ++ Assert (max_level[vertex] == 0, ExcInternalError ()); ++ dof_handler.mg_vertex_dofs[vertex].init (1, 0, 0); ++ } ++ } ++ ++ template ++ static ++ void reserve_space_mg (DoFHandler<2, spacedim> &dof_handler) ++ { ++ Assert (dof_handler.get_tria ().n_levels () > 0, ExcMessage ("Invalid triangulation")); ++ dof_handler.clear_mg_space (); ++ ++ const dealii::FiniteElement<2, spacedim> &fe = dof_handler.get_fe (); ++ const dealii::Triangulation<2, spacedim> &tria = dof_handler.get_tria (); ++ const unsigned int &n_levels = tria.n_levels (); ++ ++ for (unsigned int i = 0; i < n_levels; ++i) ++ { ++ dof_handler.mg_levels.push_back (new internal::DoFHandler::DoFLevel<2>); ++ dof_handler.mg_levels.back ()->dof_object.dofs = std::vector (tria.n_raw_quads (i) * fe.dofs_per_quad, DoFHandler<2>::invalid_dof_index); ++ } ++ ++ dof_handler.mg_faces = new internal::DoFHandler::DoFFaces<2>; ++ dof_handler.mg_faces->lines.dofs = std::vector (tria.n_raw_lines () * fe.dofs_per_line, DoFHandler<2>::invalid_dof_index); ++ ++ const unsigned int &n_vertices = tria.n_vertices (); ++ ++ dof_handler.mg_vertex_dofs.resize (n_vertices); ++ ++ std::vector max_level (n_vertices, 0); ++ std::vector min_level (n_vertices, n_levels); ++ ++ for (typename dealii::Triangulation<2, spacedim>::cell_iterator cell = tria.begin (); cell != tria.end (); ++cell) ++ { ++ const unsigned int level = cell->level (); ++ ++ for (unsigned int vertex = 0; vertex < GeometryInfo<2>::vertices_per_cell; ++vertex) ++ { ++ const unsigned int vertex_index = cell->vertex_index (vertex); ++ ++ if (min_level[vertex_index] > level) ++ min_level[vertex_index] = level; ++ ++ if (max_level[vertex_index] < level) ++ max_level[vertex_index] = level; + } - dof_handler.faces = new internal::DoFHandler::DoFFaces<3>; - - dof_handler.faces->lines.dofs - .resize (dof_handler.tria->n_raw_lines() * - dof_handler.selected_fe->dofs_per_line, - DoFHandler<3,spacedim>::invalid_dof_index); - dof_handler.faces->quads.dofs - .resize (dof_handler.tria->n_raw_quads() * - dof_handler.selected_fe->dofs_per_quad, - DoFHandler<3,spacedim>::invalid_dof_index); - } - - template - static - void reserve_space_mg (DoFHandler<1, spacedim>& dof_handler) { - Assert (dof_handler.get_tria ().n_levels () > 0, ExcMessage ("Invalid triangulation")); - dof_handler.clear_mg_space (); - - const dealii::Triangulation<1, spacedim>& tria = dof_handler.get_tria (); - const unsigned int& dofs_per_line = dof_handler.get_fe ().dofs_per_line; - const unsigned int& n_levels = tria.n_levels (); - - for (unsigned int i = 0; i < n_levels; ++i) { - dof_handler.mg_levels.push_back (new internal::DoFHandler::DoFLevel<1>); - dof_handler.mg_levels.back ()->dof_object.dofs = std::vector (tria.n_raw_lines (i) * dofs_per_line, DoFHandler<1>::invalid_dof_index); - } - - const unsigned int& n_vertices = tria.n_vertices (); - - dof_handler.mg_vertex_dofs.resize (n_vertices); - - std::vector max_level (n_vertices, 0); - std::vector min_level (n_vertices, n_levels); - - for (typename dealii::Triangulation<1, spacedim>::cell_iterator cell = tria.begin (); cell != tria.end (); ++cell) { - const unsigned int level = cell->level (); - - for (unsigned int vertex = 0; vertex < GeometryInfo<1>::vertices_per_cell; ++vertex) { - const unsigned int vertex_index = cell->vertex_index (vertex); - - if (min_level[vertex_index] > level) - min_level[vertex_index] = level; - - if (max_level[vertex_index] < level) - max_level[vertex_index] = level; - } - } - - for (unsigned int vertex = 0; vertex < n_vertices; ++vertex) - if (tria.vertex_used (vertex)) { - Assert (min_level[vertex] < n_levels, ExcInternalError ()); - Assert (max_level[vertex] >= min_level[vertex], ExcInternalError ()); - dof_handler.mg_vertex_dofs[vertex].init (min_level[vertex], max_level[vertex], dof_handler.get_fe ().dofs_per_vertex); - } - - else { - Assert (min_level[vertex] == n_levels, ExcInternalError ()); - Assert (max_level[vertex] == 0, ExcInternalError ()); - dof_handler.mg_vertex_dofs[vertex].init (1, 0, 0); - } - } - - template - static - void reserve_space_mg (DoFHandler<2, spacedim>& dof_handler) { - Assert (dof_handler.get_tria ().n_levels () > 0, ExcMessage ("Invalid triangulation")); - dof_handler.clear_mg_space (); - - const dealii::FiniteElement<2, spacedim>& fe = dof_handler.get_fe (); - const dealii::Triangulation<2, spacedim>& tria = dof_handler.get_tria (); - const unsigned int& n_levels = tria.n_levels (); - - for (unsigned int i = 0; i < n_levels; ++i) { - dof_handler.mg_levels.push_back (new internal::DoFHandler::DoFLevel<2>); - dof_handler.mg_levels.back ()->dof_object.dofs = std::vector (tria.n_raw_quads (i) * fe.dofs_per_quad, DoFHandler<2>::invalid_dof_index); - } - - dof_handler.mg_faces = new internal::DoFHandler::DoFFaces<2>; - dof_handler.mg_faces->lines.dofs = std::vector (tria.n_raw_lines () * fe.dofs_per_line, DoFHandler<2>::invalid_dof_index); - - const unsigned int& n_vertices = tria.n_vertices (); - - dof_handler.mg_vertex_dofs.resize (n_vertices); - - std::vector max_level (n_vertices, 0); - std::vector min_level (n_vertices, n_levels); - - for (typename dealii::Triangulation<2, spacedim>::cell_iterator cell = tria.begin (); cell != tria.end (); ++cell) { - const unsigned int level = cell->level (); - - for (unsigned int vertex = 0; vertex < GeometryInfo<2>::vertices_per_cell; ++vertex) { - const unsigned int vertex_index = cell->vertex_index (vertex); - - if (min_level[vertex_index] > level) - min_level[vertex_index] = level; - - if (max_level[vertex_index] < level) - max_level[vertex_index] = level; - } - } - - for (unsigned int vertex = 0; vertex < n_vertices; ++vertex) - if (tria.vertex_used (vertex)) { - Assert (min_level[vertex] < n_levels, ExcInternalError ()); - Assert (max_level[vertex] >= min_level[vertex], ExcInternalError ()); - dof_handler.mg_vertex_dofs[vertex].init (min_level[vertex], max_level[vertex], fe.dofs_per_vertex); - } - - else { - Assert (min_level[vertex] == n_levels, ExcInternalError ()); - Assert (max_level[vertex] == 0, ExcInternalError ()); - dof_handler.mg_vertex_dofs[vertex].init (1, 0, 0); - } - } - - template - static - void reserve_space_mg (DoFHandler<3, spacedim>& dof_handler) { - Assert (dof_handler.get_tria ().n_levels () > 0, ExcMessage ("Invalid triangulation")); - dof_handler.clear_mg_space (); - - const dealii::FiniteElement<3, spacedim>& fe = dof_handler.get_fe (); - const dealii::Triangulation<3, spacedim>& tria = dof_handler.get_tria (); - const unsigned int& n_levels = tria.n_levels (); - - for (unsigned int i = 0; i < n_levels; ++i) { - dof_handler.mg_levels.push_back (new internal::DoFHandler::DoFLevel<3>); - dof_handler.mg_levels.back ()->dof_object.dofs = std::vector (tria.n_raw_hexs (i) * fe.dofs_per_hex, DoFHandler<3>::invalid_dof_index); - } - - dof_handler.mg_faces = new internal::DoFHandler::DoFFaces<3>; - dof_handler.mg_faces->lines.dofs = std::vector (tria.n_raw_lines () * fe.dofs_per_line, DoFHandler<3>::invalid_dof_index); - dof_handler.mg_faces->quads.dofs = std::vector (tria.n_raw_quads () * fe.dofs_per_quad, DoFHandler<3>::invalid_dof_index); - - const unsigned int& n_vertices = tria.n_vertices (); - - dof_handler.mg_vertex_dofs.resize (n_vertices); - - std::vector max_level (n_vertices, 0); - std::vector min_level (n_vertices, n_levels); - - for (typename dealii::Triangulation<3, spacedim>::cell_iterator cell = tria.begin (); cell != tria.end (); ++cell) { - const unsigned int level = cell->level (); - - for (unsigned int vertex = 0; vertex < GeometryInfo<3>::vertices_per_cell; ++vertex) { - const unsigned int vertex_index = cell->vertex_index (vertex); - - if (min_level[vertex_index] > level) - min_level[vertex_index] = level; - - if (max_level[vertex_index] < level) - max_level[vertex_index] = level; - } - } - - for (unsigned int vertex = 0; vertex < n_vertices; ++vertex) - if (tria.vertex_used (vertex)) { - Assert (min_level[vertex] < n_levels, ExcInternalError ()); - Assert (max_level[vertex] >= min_level[vertex], ExcInternalError ()); - dof_handler.mg_vertex_dofs[vertex].init (min_level[vertex], max_level[vertex], fe.dofs_per_vertex); - } - - else { - Assert (min_level[vertex] == n_levels, ExcInternalError ()); - Assert (max_level[vertex] == 0, ExcInternalError ()); - dof_handler.mg_vertex_dofs[vertex].init (1, 0, 0); - } - } - - template - static - unsigned int distribute_dofs_on_cell (typename DoFHandler<1, spacedim>::cell_iterator& cell, unsigned int next_free_dof) { - const FiniteElement<1, spacedim>& fe = cell->get_fe (); - - if (fe.dofs_per_vertex > 0) - for (unsigned int vertex = 0; vertex < GeometryInfo<1>::vertices_per_cell; ++vertex) { - typename DoFHandler<1, spacedim>::cell_iterator neighbor = cell->neighbor (vertex); - - if (neighbor.state () == IteratorState::valid) - if (neighbor->user_flag_set () && (neighbor->level () == cell->level ())) { - if (vertex == 0) - for (unsigned int dof = 0; dof < fe.dofs_per_vertex; ++dof) - cell->set_mg_vertex_dof_index (cell->level (), 0, dof, neighbor->mg_vertex_dof_index (cell->level (), 1, dof)); - - else - for (unsigned int dof = 0; dof < fe.dofs_per_vertex; ++dof) - cell->set_mg_vertex_dof_index (cell->level (), 1, dof, neighbor->mg_vertex_dof_index (cell->level (), 0, dof)); - - continue; - } - - for (unsigned int dof = 0; dof < fe.dofs_per_vertex; ++dof) - cell->set_mg_vertex_dof_index (cell->level (), vertex, dof, next_free_dof++); - } - - if (fe.dofs_per_line > 0) - for (unsigned int dof = 0; dof < fe.dofs_per_line; ++dof) - cell->set_mg_dof_index (cell->level (), dof, next_free_dof++); - - cell->set_user_flag (); - return next_free_dof; - } - - template - static - unsigned int distribute_dofs_on_cell (typename DoFHandler<2, spacedim>::cell_iterator& cell, unsigned int next_free_dof) { - const FiniteElement<2, spacedim>& fe = cell->get_fe (); - - if (fe.dofs_per_vertex > 0) - for (unsigned int vertex = 0; vertex < GeometryInfo<2>::vertices_per_cell; ++vertex) - if (cell->mg_vertex_dof_index (cell->level (), vertex, 0) == DoFHandler<2>::invalid_dof_index) - for (unsigned int dof = 0; dof < fe.dofs_per_vertex; ++dof) - cell->set_mg_vertex_dof_index (cell->level (), vertex, dof, next_free_dof++); - - if (fe.dofs_per_line > 0) - for (unsigned int face = 0; face < GeometryInfo<2>::faces_per_cell; ++face) { - typename DoFHandler<2, spacedim>::line_iterator line = cell->line (face); - - if (line->mg_dof_index (cell->level (), 0) == DoFHandler<2>::invalid_dof_index) - for (unsigned int dof = 0; dof < fe.dofs_per_line; ++dof) - line->set_mg_dof_index (cell->level (), dof, next_free_dof++); - } - - if (fe.dofs_per_quad > 0) - for (unsigned int dof = 0; dof < fe.dofs_per_quad; ++dof) - cell->set_mg_dof_index (cell->level (), dof, next_free_dof++); - - cell->set_user_flag (); - return next_free_dof; - } - - template - static - unsigned int distribute_dofs_on_cell (typename DoFHandler<3, spacedim>::cell_iterator& cell, unsigned int next_free_dof) { - const FiniteElement<3, spacedim>& fe = cell->get_fe (); - - if (fe.dofs_per_vertex > 0) - for (unsigned int vertex = 0; vertex < GeometryInfo<3>::vertices_per_cell; ++vertex) - if (cell->mg_vertex_dof_index (cell->level (), vertex, 0) == DoFHandler<3>::invalid_dof_index) - for (unsigned int dof = 0; dof < fe.dofs_per_vertex; ++dof) - cell->set_mg_vertex_dof_index (cell->level (), vertex, dof, next_free_dof++); - - if (fe.dofs_per_line > 0) - for (unsigned int line = 0; line < GeometryInfo<3>::lines_per_cell; ++line) { - typename DoFHandler<3, spacedim>::line_iterator line_it = cell->line (line); - - if (line_it->mg_dof_index (cell->level (), 0) == DoFHandler<3>::invalid_dof_index) - for (unsigned int dof = 0; dof < fe.dofs_per_line; ++dof) - line_it->set_mg_dof_index (cell->level (), dof, next_free_dof++); - } - - if (fe.dofs_per_quad > 0) - for (unsigned int face = 0; face < GeometryInfo<3>::quads_per_cell; ++face) { - typename DoFHandler<3, spacedim>::quad_iterator quad = cell->quad (face); - - if (quad->mg_dof_index (cell->level (), 0) == DoFHandler<3>::invalid_dof_index) - for (unsigned int dof = 0; dof < fe.dofs_per_quad; ++dof) - quad->set_mg_dof_index (cell->level (), dof, next_free_dof++); - } - - if (fe.dofs_per_hex > 0) - for (unsigned int dof = 0; dof < fe.dofs_per_hex; ++dof) - cell->set_mg_dof_index (cell->level (), dof, next_free_dof++); - - cell->set_user_flag (); - return next_free_dof; - } - - template - static - unsigned int get_dof_index (const DoFHandler<1, spacedim>& dof_handler, internal::DoFHandler::DoFLevel<1>& mg_level, internal::DoFHandler::DoFFaces<1>&, const unsigned int obj_index, const unsigned int fe_index, const unsigned int local_index, const int2type<1>) { - return mg_level.dof_object.get_dof_index (dof_handler, obj_index, fe_index, local_index); - } - - template - static - unsigned int get_dof_index (const DoFHandler<2, spacedim>& dof_handler, internal::DoFHandler::DoFLevel<2>&, internal::DoFHandler::DoFFaces<2>& mg_faces, const unsigned int obj_index, const unsigned int fe_index, const unsigned int local_index, const int2type<1>) { - return mg_faces.lines.get_dof_index (dof_handler, obj_index, fe_index, local_index); - } - - template - static - unsigned int get_dof_index (const DoFHandler<2, spacedim>& dof_handler, internal::DoFHandler::DoFLevel<2>& mg_level, internal::DoFHandler::DoFFaces<2>&, const unsigned int obj_index, const unsigned int fe_index, const unsigned int local_index, const int2type<2>) { - return mg_level.dof_object.get_dof_index (dof_handler, obj_index, fe_index, local_index); - } - - template - static - unsigned int get_dof_index (const DoFHandler<3, spacedim>& dof_handler, internal::DoFHandler::DoFLevel<3>&, internal::DoFHandler::DoFFaces<3>& mg_faces, const unsigned int obj_index, const unsigned int fe_index, const unsigned int local_index, const int2type<1>) { - return mg_faces.lines.get_dof_index (dof_handler, obj_index, fe_index, local_index); - } - - template - static - unsigned int get_dof_index (const DoFHandler<3, spacedim>& dof_handler, internal::DoFHandler::DoFLevel<3>&, internal::DoFHandler::DoFFaces<3>& mg_faces, const unsigned int obj_index, const unsigned int fe_index, const unsigned int local_index, const int2type<2>) { - return mg_faces.quads.get_dof_index (dof_handler, obj_index, fe_index, local_index); - } - - template - static - unsigned int get_dof_index (const DoFHandler<3, spacedim>& dof_handler, internal::DoFHandler::DoFLevel<3>& mg_level, internal::DoFHandler::DoFFaces<3>&, const unsigned int obj_index, const unsigned int fe_index, const unsigned int local_index, const int2type<3>) { - return mg_level.dof_object.get_dof_index (dof_handler, obj_index, fe_index, local_index); - } - - template - static - void set_dof_index (const DoFHandler<1, spacedim>& dof_handler, internal::DoFHandler::DoFLevel<1>& mg_level, internal::DoFHandler::DoFFaces<1>&, const unsigned int obj_index, const unsigned int fe_index, const unsigned int local_index, const unsigned int global_index, const int2type<1>) { - mg_level.dof_object.set_dof_index (dof_handler, obj_index, fe_index, local_index, global_index); - } - - template - static - void set_dof_index (const DoFHandler<2, spacedim>& dof_handler, internal::DoFHandler::DoFLevel<2>&, internal::DoFHandler::DoFFaces<2>& mg_faces, const unsigned int obj_index, const unsigned int fe_index, const unsigned int local_index, const unsigned int global_index, const int2type<1>) { - mg_faces.lines.set_dof_index (dof_handler, obj_index, fe_index, local_index, global_index); - } - - template - static - void set_dof_index (const DoFHandler<2, spacedim>& dof_handler, internal::DoFHandler::DoFLevel<2>& mg_level, internal::DoFHandler::DoFFaces<2>&, const unsigned int obj_index, const unsigned int fe_index, const unsigned int local_index, const unsigned int global_index, const int2type<2>) { - mg_level.dof_object.set_dof_index (dof_handler, obj_index, fe_index, local_index, global_index); - } - - template - static - void set_dof_index (const DoFHandler<3, spacedim>& dof_handler, internal::DoFHandler::DoFLevel<3>&, internal::DoFHandler::DoFFaces<3>& mg_faces, const unsigned int obj_index, const unsigned int fe_index, const unsigned int local_index, const unsigned int global_index, const int2type<1>) { - mg_faces.lines.set_dof_index (dof_handler, obj_index, fe_index, local_index, global_index); - } - - template - static - void set_dof_index (const DoFHandler<3, spacedim>& dof_handler, internal::DoFHandler::DoFLevel<3>&, internal::DoFHandler::DoFFaces<3>& mg_faces, const unsigned int obj_index, const unsigned int fe_index, const unsigned int local_index, const unsigned int global_index, const int2type<2>) { - mg_faces.quads.set_dof_index (dof_handler, obj_index, fe_index, local_index, global_index); - } - - template - static - void set_dof_index (const DoFHandler<3, spacedim>& dof_handler, internal::DoFHandler::DoFLevel<3>& mg_level, internal::DoFHandler::DoFFaces<3>&, const unsigned int obj_index, const unsigned int fe_index, const unsigned int local_index, const unsigned int global_index, const int2type<3>) { - mg_level.dof_object.set_dof_index (dof_handler, obj_index, fe_index, local_index, global_index); - } ++ } ++ ++ for (unsigned int vertex = 0; vertex < n_vertices; ++vertex) ++ if (tria.vertex_used (vertex)) ++ { ++ Assert (min_level[vertex] < n_levels, ExcInternalError ()); ++ Assert (max_level[vertex] >= min_level[vertex], ExcInternalError ()); ++ dof_handler.mg_vertex_dofs[vertex].init (min_level[vertex], max_level[vertex], fe.dofs_per_vertex); ++ } ++ ++ else ++ { ++ Assert (min_level[vertex] == n_levels, ExcInternalError ()); ++ Assert (max_level[vertex] == 0, ExcInternalError ()); ++ dof_handler.mg_vertex_dofs[vertex].init (1, 0, 0); ++ } ++ } ++ ++ template ++ static ++ void reserve_space_mg (DoFHandler<3, spacedim> &dof_handler) ++ { ++ Assert (dof_handler.get_tria ().n_levels () > 0, ExcMessage ("Invalid triangulation")); ++ dof_handler.clear_mg_space (); ++ ++ const dealii::FiniteElement<3, spacedim> &fe = dof_handler.get_fe (); ++ const dealii::Triangulation<3, spacedim> &tria = dof_handler.get_tria (); ++ const unsigned int &n_levels = tria.n_levels (); ++ ++ for (unsigned int i = 0; i < n_levels; ++i) ++ { ++ dof_handler.mg_levels.push_back (new internal::DoFHandler::DoFLevel<3>); ++ dof_handler.mg_levels.back ()->dof_object.dofs = std::vector (tria.n_raw_hexs (i) * fe.dofs_per_hex, DoFHandler<3>::invalid_dof_index); ++ } ++ ++ dof_handler.mg_faces = new internal::DoFHandler::DoFFaces<3>; ++ dof_handler.mg_faces->lines.dofs = std::vector (tria.n_raw_lines () * fe.dofs_per_line, DoFHandler<3>::invalid_dof_index); ++ dof_handler.mg_faces->quads.dofs = std::vector (tria.n_raw_quads () * fe.dofs_per_quad, DoFHandler<3>::invalid_dof_index); ++ ++ const unsigned int &n_vertices = tria.n_vertices (); ++ ++ dof_handler.mg_vertex_dofs.resize (n_vertices); ++ ++ std::vector max_level (n_vertices, 0); ++ std::vector min_level (n_vertices, n_levels); ++ ++ for (typename dealii::Triangulation<3, spacedim>::cell_iterator cell = tria.begin (); cell != tria.end (); ++cell) ++ { ++ const unsigned int level = cell->level (); ++ ++ for (unsigned int vertex = 0; vertex < GeometryInfo<3>::vertices_per_cell; ++vertex) ++ { ++ const unsigned int vertex_index = cell->vertex_index (vertex); ++ ++ if (min_level[vertex_index] > level) ++ min_level[vertex_index] = level; ++ ++ if (max_level[vertex_index] < level) ++ max_level[vertex_index] = level; ++ } ++ } ++ ++ for (unsigned int vertex = 0; vertex < n_vertices; ++vertex) ++ if (tria.vertex_used (vertex)) ++ { ++ Assert (min_level[vertex] < n_levels, ExcInternalError ()); ++ Assert (max_level[vertex] >= min_level[vertex], ExcInternalError ()); ++ dof_handler.mg_vertex_dofs[vertex].init (min_level[vertex], max_level[vertex], fe.dofs_per_vertex); ++ } ++ ++ else ++ { ++ Assert (min_level[vertex] == n_levels, ExcInternalError ()); ++ Assert (max_level[vertex] == 0, ExcInternalError ()); ++ dof_handler.mg_vertex_dofs[vertex].init (1, 0, 0); ++ } ++ } ++ ++ template ++ static ++ unsigned int distribute_dofs_on_cell (typename DoFHandler<1, spacedim>::cell_iterator &cell, unsigned int next_free_dof) ++ { ++ const FiniteElement<1, spacedim> &fe = cell->get_fe (); ++ ++ if (fe.dofs_per_vertex > 0) ++ for (unsigned int vertex = 0; vertex < GeometryInfo<1>::vertices_per_cell; ++vertex) ++ { ++ typename DoFHandler<1, spacedim>::cell_iterator neighbor = cell->neighbor (vertex); ++ ++ if (neighbor.state () == IteratorState::valid) ++ if (neighbor->user_flag_set () && (neighbor->level () == cell->level ())) ++ { ++ if (vertex == 0) ++ for (unsigned int dof = 0; dof < fe.dofs_per_vertex; ++dof) ++ cell->set_mg_vertex_dof_index (cell->level (), 0, dof, neighbor->mg_vertex_dof_index (cell->level (), 1, dof)); ++ ++ else ++ for (unsigned int dof = 0; dof < fe.dofs_per_vertex; ++dof) ++ cell->set_mg_vertex_dof_index (cell->level (), 1, dof, neighbor->mg_vertex_dof_index (cell->level (), 0, dof)); ++ ++ continue; ++ } ++ ++ for (unsigned int dof = 0; dof < fe.dofs_per_vertex; ++dof) ++ cell->set_mg_vertex_dof_index (cell->level (), vertex, dof, next_free_dof++); ++ } ++ ++ if (fe.dofs_per_line > 0) ++ for (unsigned int dof = 0; dof < fe.dofs_per_line; ++dof) ++ cell->set_mg_dof_index (cell->level (), dof, next_free_dof++); ++ ++ cell->set_user_flag (); ++ return next_free_dof; ++ } ++ ++ template ++ static ++ unsigned int distribute_dofs_on_cell (typename DoFHandler<2, spacedim>::cell_iterator &cell, unsigned int next_free_dof) ++ { ++ const FiniteElement<2, spacedim> &fe = cell->get_fe (); ++ ++ if (fe.dofs_per_vertex > 0) ++ for (unsigned int vertex = 0; vertex < GeometryInfo<2>::vertices_per_cell; ++vertex) ++ if (cell->mg_vertex_dof_index (cell->level (), vertex, 0) == DoFHandler<2>::invalid_dof_index) ++ for (unsigned int dof = 0; dof < fe.dofs_per_vertex; ++dof) ++ cell->set_mg_vertex_dof_index (cell->level (), vertex, dof, next_free_dof++); ++ ++ if (fe.dofs_per_line > 0) ++ for (unsigned int face = 0; face < GeometryInfo<2>::faces_per_cell; ++face) ++ { ++ typename DoFHandler<2, spacedim>::line_iterator line = cell->line (face); ++ ++ if (line->mg_dof_index (cell->level (), 0) == DoFHandler<2>::invalid_dof_index) ++ for (unsigned int dof = 0; dof < fe.dofs_per_line; ++dof) ++ line->set_mg_dof_index (cell->level (), dof, next_free_dof++); ++ } ++ ++ if (fe.dofs_per_quad > 0) ++ for (unsigned int dof = 0; dof < fe.dofs_per_quad; ++dof) ++ cell->set_mg_dof_index (cell->level (), dof, next_free_dof++); ++ ++ cell->set_user_flag (); ++ return next_free_dof; ++ } ++ ++ template ++ static ++ unsigned int distribute_dofs_on_cell (typename DoFHandler<3, spacedim>::cell_iterator &cell, unsigned int next_free_dof) ++ { ++ const FiniteElement<3, spacedim> &fe = cell->get_fe (); ++ ++ if (fe.dofs_per_vertex > 0) ++ for (unsigned int vertex = 0; vertex < GeometryInfo<3>::vertices_per_cell; ++vertex) ++ if (cell->mg_vertex_dof_index (cell->level (), vertex, 0) == DoFHandler<3>::invalid_dof_index) ++ for (unsigned int dof = 0; dof < fe.dofs_per_vertex; ++dof) ++ cell->set_mg_vertex_dof_index (cell->level (), vertex, dof, next_free_dof++); ++ ++ if (fe.dofs_per_line > 0) ++ for (unsigned int line = 0; line < GeometryInfo<3>::lines_per_cell; ++line) ++ { ++ typename DoFHandler<3, spacedim>::line_iterator line_it = cell->line (line); ++ ++ if (line_it->mg_dof_index (cell->level (), 0) == DoFHandler<3>::invalid_dof_index) ++ for (unsigned int dof = 0; dof < fe.dofs_per_line; ++dof) ++ line_it->set_mg_dof_index (cell->level (), dof, next_free_dof++); ++ } ++ ++ if (fe.dofs_per_quad > 0) ++ for (unsigned int face = 0; face < GeometryInfo<3>::quads_per_cell; ++face) ++ { ++ typename DoFHandler<3, spacedim>::quad_iterator quad = cell->quad (face); ++ ++ if (quad->mg_dof_index (cell->level (), 0) == DoFHandler<3>::invalid_dof_index) ++ for (unsigned int dof = 0; dof < fe.dofs_per_quad; ++dof) ++ quad->set_mg_dof_index (cell->level (), dof, next_free_dof++); ++ } ++ ++ if (fe.dofs_per_hex > 0) ++ for (unsigned int dof = 0; dof < fe.dofs_per_hex; ++dof) ++ cell->set_mg_dof_index (cell->level (), dof, next_free_dof++); ++ ++ cell->set_user_flag (); ++ return next_free_dof; ++ } ++ ++ template ++ static ++ unsigned int get_dof_index (const DoFHandler<1, spacedim> &dof_handler, internal::DoFHandler::DoFLevel<1> &mg_level, internal::DoFHandler::DoFFaces<1> &, const unsigned int obj_index, const unsigned int fe_index, const unsigned int local_index, const int2type<1>) ++ { ++ return mg_level.dof_object.get_dof_index (dof_handler, obj_index, fe_index, local_index); ++ } ++ ++ template ++ static ++ unsigned int get_dof_index (const DoFHandler<2, spacedim> &dof_handler, internal::DoFHandler::DoFLevel<2> &, internal::DoFHandler::DoFFaces<2> &mg_faces, const unsigned int obj_index, const unsigned int fe_index, const unsigned int local_index, const int2type<1>) ++ { ++ return mg_faces.lines.get_dof_index (dof_handler, obj_index, fe_index, local_index); ++ } ++ ++ template ++ static ++ unsigned int get_dof_index (const DoFHandler<2, spacedim> &dof_handler, internal::DoFHandler::DoFLevel<2> &mg_level, internal::DoFHandler::DoFFaces<2> &, const unsigned int obj_index, const unsigned int fe_index, const unsigned int local_index, const int2type<2>) ++ { ++ return mg_level.dof_object.get_dof_index (dof_handler, obj_index, fe_index, local_index); ++ } ++ ++ template ++ static ++ unsigned int get_dof_index (const DoFHandler<3, spacedim> &dof_handler, internal::DoFHandler::DoFLevel<3> &, internal::DoFHandler::DoFFaces<3> &mg_faces, const unsigned int obj_index, const unsigned int fe_index, const unsigned int local_index, const int2type<1>) ++ { ++ return mg_faces.lines.get_dof_index (dof_handler, obj_index, fe_index, local_index); ++ } ++ ++ template ++ static ++ unsigned int get_dof_index (const DoFHandler<3, spacedim> &dof_handler, internal::DoFHandler::DoFLevel<3> &, internal::DoFHandler::DoFFaces<3> &mg_faces, const unsigned int obj_index, const unsigned int fe_index, const unsigned int local_index, const int2type<2>) ++ { ++ return mg_faces.quads.get_dof_index (dof_handler, obj_index, fe_index, local_index); ++ } ++ ++ template ++ static ++ unsigned int get_dof_index (const DoFHandler<3, spacedim> &dof_handler, internal::DoFHandler::DoFLevel<3> &mg_level, internal::DoFHandler::DoFFaces<3> &, const unsigned int obj_index, const unsigned int fe_index, const unsigned int local_index, const int2type<3>) ++ { ++ return mg_level.dof_object.get_dof_index (dof_handler, obj_index, fe_index, local_index); ++ } ++ ++ template ++ static ++ void set_dof_index (const DoFHandler<1, spacedim> &dof_handler, internal::DoFHandler::DoFLevel<1> &mg_level, internal::DoFHandler::DoFFaces<1> &, const unsigned int obj_index, const unsigned int fe_index, const unsigned int local_index, const unsigned int global_index, const int2type<1>) ++ { ++ mg_level.dof_object.set_dof_index (dof_handler, obj_index, fe_index, local_index, global_index); ++ } ++ ++ template ++ static ++ void set_dof_index (const DoFHandler<2, spacedim> &dof_handler, internal::DoFHandler::DoFLevel<2> &, internal::DoFHandler::DoFFaces<2> &mg_faces, const unsigned int obj_index, const unsigned int fe_index, const unsigned int local_index, const unsigned int global_index, const int2type<1>) ++ { ++ mg_faces.lines.set_dof_index (dof_handler, obj_index, fe_index, local_index, global_index); ++ } ++ ++ template ++ static ++ void set_dof_index (const DoFHandler<2, spacedim> &dof_handler, internal::DoFHandler::DoFLevel<2> &mg_level, internal::DoFHandler::DoFFaces<2> &, const unsigned int obj_index, const unsigned int fe_index, const unsigned int local_index, const unsigned int global_index, const int2type<2>) ++ { ++ mg_level.dof_object.set_dof_index (dof_handler, obj_index, fe_index, local_index, global_index); ++ } ++ ++ template ++ static ++ void set_dof_index (const DoFHandler<3, spacedim> &dof_handler, internal::DoFHandler::DoFLevel<3> &, internal::DoFHandler::DoFFaces<3> &mg_faces, const unsigned int obj_index, const unsigned int fe_index, const unsigned int local_index, const unsigned int global_index, const int2type<1>) ++ { ++ mg_faces.lines.set_dof_index (dof_handler, obj_index, fe_index, local_index, global_index); ++ } ++ ++ template ++ static ++ void set_dof_index (const DoFHandler<3, spacedim> &dof_handler, internal::DoFHandler::DoFLevel<3> &, internal::DoFHandler::DoFFaces<3> &mg_faces, const unsigned int obj_index, const unsigned int fe_index, const unsigned int local_index, const unsigned int global_index, const int2type<2>) ++ { ++ mg_faces.quads.set_dof_index (dof_handler, obj_index, fe_index, local_index, global_index); ++ } ++ ++ template ++ static ++ void set_dof_index (const DoFHandler<3, spacedim> &dof_handler, internal::DoFHandler::DoFLevel<3> &mg_level, internal::DoFHandler::DoFFaces<3> &, const unsigned int obj_index, const unsigned int fe_index, const unsigned int local_index, const unsigned int global_index, const int2type<3>) ++ { ++ mg_level.dof_object.set_dof_index (dof_handler, obj_index, fe_index, local_index, global_index); ++ } }; } } @@@ -686,15 -358,14 +724,15 @@@ template DoFHandler::DoFHandler (const Triangulation &tria) - : - tria(&tria, typeid(*this).name()), - selected_fe(0, typeid(*this).name()), - faces(NULL), - mg_faces (NULL) + : + tria(&tria, typeid(*this).name()), + selected_fe(0, typeid(*this).name()), - faces(NULL) ++ faces(NULL), ++ mg_faces (NULL) { - // decide whether we need a - // sequential or a parallel - // distributed policy + // decide whether we need a + // sequential or a parallel + // distributed policy if (dynamic_cast*> (&tria) == 0) @@@ -706,11 -377,10 +744,11 @@@ template DoFHandler::DoFHandler () - : - tria(0, typeid(*this).name()), - selected_fe(0, typeid(*this).name()), - faces(NULL), - mg_faces (NULL) + : + tria(0, typeid(*this).name()), + selected_fe(0, typeid(*this).name()), - faces(NULL) ++ faces(NULL), ++ mg_faces (NULL) {} @@@ -1062,53 -723,6 +1100,57 @@@ distribute_dofs (const FiniteElement - void DoFHandler::distribute_mg_dofs (const FiniteElement& fe) { ++void DoFHandler::distribute_mg_dofs (const FiniteElement &fe) ++{ + Assert ((dynamic_cast*> (&*tria) == 0), ExcMessage ("Invalid triangulation")); + distribute_dofs (fe); + reserve_space (); + - const unsigned int& n_levels = (*tria).n_levels (); ++ const unsigned int &n_levels = (*tria).n_levels (); + + mg_used_dofs.resize (n_levels, 0); + + std::vector user_flags; + + (*tria).save_user_flags (user_flags); + const_cast&>(*tria).clear_user_flags (); + - for (unsigned int level = 0; level < n_levels; ++level) { - unsigned int next_free_dof = 0; ++ for (unsigned int level = 0; level < n_levels; ++level) ++ { ++ unsigned int next_free_dof = 0; + - for (cell_iterator cell = begin (level); cell != end (level); ++cell) - next_free_dof = internal::DoFHandler::Implementation::distribute_dofs_on_cell (cell, next_free_dof); ++ for (cell_iterator cell = begin (level); cell != end (level); ++cell) ++ next_free_dof = internal::DoFHandler::Implementation::distribute_dofs_on_cell (cell, next_free_dof); + - mg_used_dofs[level] = next_free_dof; - } ++ mg_used_dofs[level] = next_free_dof; ++ } + + const_cast&>(*tria).load_user_flags (user_flags); + block_info_object.initialize (*this, true, true); +} + +template - void DoFHandler::reserve_space () { ++void DoFHandler::reserve_space () ++{ + internal::DoFHandler::Implementation::reserve_space_mg (*this); +} + +template - void DoFHandler::clear_mg_space () { ++void DoFHandler::clear_mg_space () ++{ + for (unsigned int i = 0; i < mg_levels.size (); ++i) + delete mg_levels[i]; + + mg_levels.clear (); + delete mg_faces; + mg_faces = NULL; + + std::vector tmp; + + std::swap (mg_vertex_dofs, tmp); +} + template void DoFHandler::initialize_local_block_info () @@@ -1121,12 -735,11 +1163,12 @@@ template void DoFHandler::clear () { - // release lock to old fe + // release lock to old fe selected_fe = 0; - // release memory + // release memory clear_space (); + clear_mg_space (); } @@@ -1184,34 -797,34 +1226,34 @@@ DoFHandler::max_couplings { switch (dim) { - case 1: - return get_fe().dofs_per_vertex; - case 2: - return (3*get_fe().dofs_per_vertex + - 2*get_fe().dofs_per_line); - case 3: - // we need to take refinement of - // one boundary face into - // consideration here; in fact, - // this function returns what - // #max_coupling_between_dofs<2> - // returns - // - // we assume here, that only four - // faces meet at the boundary; - // this assumption is not - // justified and needs to be - // fixed some time. fortunately, - // ommitting it for now does no - // harm since the matrix will cry - // foul if its requirements are - // not satisfied - return (19*get_fe().dofs_per_vertex + - 28*get_fe().dofs_per_line + - 8*get_fe().dofs_per_quad); - default: - Assert (false, ExcNotImplemented()); - return numbers::invalid_unsigned_int; + case 1: + return get_fe().dofs_per_vertex; + case 2: + return (3*get_fe().dofs_per_vertex + + 2*get_fe().dofs_per_line); + case 3: + // we need to take refinement of + // one boundary face into + // consideration here; in fact, + // this function returns what + // #max_coupling_between_dofs<2> + // returns + // + // we assume here, that only four + // faces meet at the boundary; + // this assumption is not + // justified and needs to be + // fixed some time. fortunately, - // omitting it for now does no ++ // ommitting it for now does no + // harm since the matrix will cry + // foul if its requirements are + // not satisfied + return (19*get_fe().dofs_per_vertex + + 28*get_fe().dofs_per_line + + 8*get_fe().dofs_per_quad); + default: + Assert (false, ExcNotImplemented()); + return numbers::invalid_unsigned_int; } } @@@ -1233,72 -846,6 +1275,81 @@@ void DoFHandler::clear_sp number_cache.clear (); } +template +template - unsigned int DoFHandler::get_dof_index (const unsigned int obj_level, const unsigned int obj_index, const unsigned int fe_index, const unsigned int local_index) const { ++unsigned int DoFHandler::get_dof_index (const unsigned int obj_level, const unsigned int obj_index, const unsigned int fe_index, const unsigned int local_index) const ++{ + return internal::DoFHandler::Implementation::get_dof_index (*this, *this->mg_levels[obj_level], *this->mg_faces, obj_index, fe_index, local_index, internal::int2type ()); +} + +template +template - void DoFHandler::set_dof_index (const unsigned int obj_level, const unsigned int obj_index, const unsigned int fe_index, const unsigned int local_index, const unsigned int global_index) const { ++void DoFHandler::set_dof_index (const unsigned int obj_level, const unsigned int obj_index, const unsigned int fe_index, const unsigned int local_index, const unsigned int global_index) const ++{ + internal::DoFHandler::Implementation::set_dof_index (*this, *this->mg_levels[obj_level], *this->mg_faces, obj_index, fe_index, local_index, global_index, internal::int2type ()); +} + +template - DoFHandler::MGVertexDoFs::MGVertexDoFs (): coarsest_level (numbers::invalid_unsigned_int), finest_level (0), indices (0), indices_offset (0) { ++DoFHandler::MGVertexDoFs::MGVertexDoFs (): coarsest_level (numbers::invalid_unsigned_int), finest_level (0), indices (0), indices_offset (0) ++{ +} + +template - DoFHandler::MGVertexDoFs::~MGVertexDoFs () { ++DoFHandler::MGVertexDoFs::~MGVertexDoFs () ++{ + delete[] indices; + delete[] indices_offset; +} + +template - void DoFHandler::MGVertexDoFs::init (const unsigned int cl, const unsigned int fl, const unsigned int dofs_per_vertex) { - if (indices != 0) { - delete[] indices; - indices = 0; - } ++void DoFHandler::MGVertexDoFs::init (const unsigned int cl, const unsigned int fl, const unsigned int dofs_per_vertex) ++{ ++ if (indices != 0) ++ { ++ delete[] indices; ++ indices = 0; ++ } + - if (indices_offset != 0) { - delete[] indices_offset; - indices_offset = 0; - } ++ if (indices_offset != 0) ++ { ++ delete[] indices_offset; ++ indices_offset = 0; ++ } + + coarsest_level = cl; + finest_level = fl; + + if (cl > fl) + return; + + const unsigned int n_levels = finest_level - coarsest_level + 1; + const unsigned int n_indices = n_levels * dofs_per_vertex; + + indices = new unsigned int[n_indices]; + Assert (indices != 0, ExcNoMemory ()); + + for (unsigned int i = 0; i < n_indices; ++i) + indices[i] = DoFHandler::invalid_dof_index; + + indices_offset = new unsigned int[n_levels]; + Assert (indices != 0, ExcNoMemory ()); + + for (unsigned int i = 0; i < n_levels; ++i) + indices_offset[i] = i * dofs_per_vertex; +} + +template - unsigned int DoFHandler::MGVertexDoFs::get_coarsest_level () const { ++unsigned int DoFHandler::MGVertexDoFs::get_coarsest_level () const ++{ + return coarsest_level; +} + +template - unsigned int DoFHandler::MGVertexDoFs::get_finest_level () const { ++unsigned int DoFHandler::MGVertexDoFs::get_finest_level () const ++{ + return finest_level; +} + /*-------------- Explicit Instantiations -------------------------------*/ #include "dof_handler.inst" diff --cc deal.II/source/dofs/dof_renumbering.cc index 7ec87f74a3,dbba13d7be..4b26224d44 --- a/deal.II/source/dofs/dof_renumbering.cc +++ b/deal.II/source/dofs/dof_renumbering.cc @@@ -1871,9 -1871,9 +1871,9 @@@ namespace DoFRenumberin template - void downstream_dg (MGDoFHandler& dof, + void downstream_dg (MGDoFHandler &dof, const unsigned int level, - const Point& direction) - const Point &direction) ++ const Point &direction) { std::vector renumbering(dof.n_dofs(level)); std::vector reverse(dof.n_dofs(level)); @@@ -1885,9 -1885,9 +1885,9 @@@ template - void downstream (MGDoFHandler& dof, + void downstream (MGDoFHandler &dof, const unsigned int level, - const Point& direction, - const Point &direction, ++ const Point &direction, const bool dof_wise_renumbering) { std::vector renumbering(dof.n_dofs(level)); diff --cc deal.II/source/dofs/dof_tools.cc index 12d327599e,98e49222b9..ccab0b4190 --- a/deal.II/source/dofs/dof_tools.cc +++ b/deal.II/source/dofs/dof_tools.cc @@@ -1939,27 -1939,27 +1939,27 @@@ namespace DoFTool void - make_hp_hanging_node_constraints (const dealii::hp::DoFHandler<1> & /*dof_handler*/, - ConstraintMatrix & /*constraints*/) + make_hp_hanging_node_constraints (const dealii::hp::DoFHandler<1> &/*dof_handler*/, + ConstraintMatrix &/*constraints*/) { - // we may have to compute - // constraints for - // vertices. gotta think about - // that a bit more + // we may have to compute + // constraints for + // vertices. gotta think about + // that a bit more //TODO[WB]: think about what to do here... } void - make_oldstyle_hanging_node_constraints (const dealii::hp::DoFHandler<1> & /*dof_handler*/, - ConstraintMatrix & /*constraints*/, + make_oldstyle_hanging_node_constraints (const dealii::hp::DoFHandler<1> &/*dof_handler*/, + ConstraintMatrix &/*constraints*/, dealii::internal::int2type<1>) { - // we may have to compute - // constraints for - // vertices. gotta think about - // that a bit more + // we may have to compute + // constraints for + // vertices. gotta think about + // that a bit more //TODO[WB]: think about what to do here... } @@@ -3875,7 -3878,7 +3878,7 @@@ const ComponentMask &component_mask, std::vector &selected_dofs) { - const FiniteElement& fe = dof.get_fe(); - const FiniteElement &fe = dof.get_fe(); ++ const FiniteElement &fe = dof.get_fe(); Assert(component_mask.represents_n_components(n_components(dof)), ExcMessage ("The given component mask is not sized correctly to represent the " @@@ -3911,11 -3914,11 +3914,11 @@@ for (unsigned int i=0; i indices(fe.dofs_per_cell); - typename MGDoFHandler::cell_iterator c; + typename DH::cell_iterator c; - for (c = dof.begin(level) ; c != dof.end(level) ; ++ c) + for (c = dof.begin(level) ; c != dof.end(level) ; ++ c) { c->get_mg_dof_indices(indices); for (unsigned int i=0; i void - resolve_components (const FiniteElement&fe, + resolve_components (const FiniteElement &fe, const std::vector &dofs_by_component, - const std::vector &target_component, + const std::vector &target_component, const bool only_once, std::vector &dofs_per_component, unsigned int &component) @@@ -4762,9 -4765,9 +4765,9 @@@ template void - resolve_components (const hp::FECollection&fe_collection, + resolve_components (const hp::FECollection &fe_collection, const std::vector &dofs_by_component, - const std::vector &target_component, + const std::vector &target_component, const bool only_once, std::vector &dofs_per_component, unsigned int &component) @@@ -5933,11 -5937,11 +5937,11 @@@ { namespace { - template + template void - map_dofs_to_support_points(const hp::MappingCollection & mapping, + map_dofs_to_support_points(const hp::MappingCollection &mapping, - const DH &dof_handler, - std::map > &support_points) + const DH &dof_handler, + std::map > &support_points) { const unsigned int dim = DH::dimension; const unsigned int spacedim = DH::space_dimension; @@@ -5995,11 -5999,11 +5999,11 @@@ template void - map_dofs_to_support_points(const hp::MappingCollection & mapping, + map_dofs_to_support_points(const hp::MappingCollection &mapping, - const DH &dof_handler, - std::vector > &support_points) + const DH &dof_handler, + std::vector > &support_points) { - // get the data in the form of the map as above + // get the data in the form of the map as above std::map > x_support_points; map_dofs_to_support_points(mapping, dof_handler, x_support_points); diff --cc deal.II/source/fe/fe_dgp_nonparametric.cc index a82a329742,7f891fd4dc..fccbec69f8 --- a/deal.II/source/fe/fe_dgp_nonparametric.cc +++ b/deal.II/source/fe/fe_dgp_nonparametric.cc @@@ -286,18 -286,18 +286,18 @@@ FE_DGPNonparametric::get_ template void FE_DGPNonparametric::fill_fe_values ( - const Mapping&, - const typename Triangulation::cell_iterator&, - const Quadrature&, - typename Mapping::InternalDataBase&, - typename Mapping::InternalDataBase& fedata, - FEValuesData&data, + const Mapping &, + const typename Triangulation::cell_iterator &, + const Quadrature &, + typename Mapping::InternalDataBase &, + typename Mapping::InternalDataBase &fedata, + FEValuesData &data, - CellSimilarity::Similarity & /*cell_similarity*/) const + CellSimilarity::Similarity &/*cell_similarity*/) const { - // convert data object to internal - // data for this class. fails with - // an exception if that is not - // possible + // convert data object to internal + // data for this class. fails with + // an exception if that is not + // possible Assert (dynamic_cast (&fedata) != 0, ExcInternalError()); InternalData &fe_data = static_cast (fedata); diff --cc deal.II/source/fe/fe_nothing.cc index f0f435f297,527bc4f4cf..71b32f25d7 --- a/deal.II/source/fe/fe_nothing.cc +++ b/deal.II/source/fe/fe_nothing.cc @@@ -168,39 -168,39 +168,39 @@@ compare_for_face_domination (const Fini template std::vector > FE_Nothing :: -hp_vertex_dof_identities (const FiniteElement & /*fe_other*/) const +hp_vertex_dof_identities (const FiniteElement &/*fe_other*/) const { - // the FE_Nothing has no - // degrees of freedom, so there - // are no equivalencies to be - // recorded - return std::vector > (); + // the FE_Nothing has no + // degrees of freedom, so there + // are no equivalencies to be + // recorded + return std::vector > (); } template std::vector > FE_Nothing :: -hp_line_dof_identities (const FiniteElement & /*fe_other*/) const +hp_line_dof_identities (const FiniteElement &/*fe_other*/) const { - // the FE_Nothing has no - // degrees of freedom, so there - // are no equivalencies to be - // recorded - return std::vector > (); + // the FE_Nothing has no + // degrees of freedom, so there + // are no equivalencies to be + // recorded + return std::vector > (); } template std::vector > FE_Nothing :: -hp_quad_dof_identities (const FiniteElement & /*fe_other*/) const +hp_quad_dof_identities (const FiniteElement &/*fe_other*/) const { - // the FE_Nothing has no - // degrees of freedom, so there - // are no equivalencies to be - // recorded - return std::vector > (); + // the FE_Nothing has no + // degrees of freedom, so there + // are no equivalencies to be + // recorded + return std::vector > (); } @@@ -216,11 -216,11 +216,11 @@@ hp_constraints_are_implemented () cons template void FE_Nothing:: -get_face_interpolation_matrix (const FiniteElement & /*source_fe*/, +get_face_interpolation_matrix (const FiniteElement &/*source_fe*/, FullMatrix &interpolation_matrix) const { - // since this element has no face dofs, the - // interpolation matrix is necessarily empty + // since this element has no face dofs, the + // interpolation matrix is necessarily empty Assert (interpolation_matrix.m() == 0, ExcDimensionMismatch (interpolation_matrix.m(), @@@ -236,10 -236,10 +236,10 @@@ voi FE_Nothing:: get_subface_interpolation_matrix (const FiniteElement & /*source_fe*/, const unsigned int /*index*/, - FullMatrix &interpolation_matrix) const + FullMatrix &interpolation_matrix) const { - // since this element has no face dofs, the - // interpolation matrix is necessarily empty + // since this element has no face dofs, the + // interpolation matrix is necessarily empty Assert (interpolation_matrix.m() == 0, ExcDimensionMismatch (interpolation_matrix.m(), diff --cc deal.II/source/fe/fe_q.cc index a646da1fca,658c503146..546338d687 --- a/deal.II/source/fe/fe_q.cc +++ b/deal.II/source/fe/fe_q.cc @@@ -1334,9 -1335,9 +1335,9 @@@ void FE_Q<1>::initialize_unit_face_supp } template <> -void FE_Q<1>::initialize_unit_face_support_points (const Quadrature<1> & /*points*/) +void FE_Q<1>::initialize_unit_face_support_points (const Quadrature<1> &/*points*/) { - // no faces in 1d, so nothing to do + // no faces in 1d, so nothing to do } template <> @@@ -1346,9 -1347,9 +1347,9 @@@ void FE_Q<1,2>::initialize_unit_face_su } template <> -void FE_Q<1,2>::initialize_unit_face_support_points (const Quadrature<1> & /*points*/) +void FE_Q<1,2>::initialize_unit_face_support_points (const Quadrature<1> &/*points*/) { - // no faces in 1d, so nothing to do + // no faces in 1d, so nothing to do } template <> @@@ -1358,9 -1359,9 +1359,9 @@@ void FE_Q<1,3>::initialize_unit_face_su } template <> -void FE_Q<1,3>::initialize_unit_face_support_points (const Quadrature<1> & /*points*/) +void FE_Q<1,3>::initialize_unit_face_support_points (const Quadrature<1> &/*points*/) { - // no faces in 1d, so nothing to do + // no faces in 1d, so nothing to do } template diff --cc deal.II/source/fe/fe_system.cc index 33d6b8cd9c,d643c27b3c..f4dbfc3213 --- a/deal.II/source/fe/fe_system.cc +++ b/deal.II/source/fe/fe_system.cc @@@ -307,13 -310,13 +310,13 @@@ FESystem::FESystem (cons template FESystem::FESystem ( - const std::vector*> &fes, + const std::vector*> &fes, const std::vector &multiplicities) - : - FiniteElement (multiply_dof_numbers(fes, multiplicities), - compute_restriction_is_additive_flags (fes, multiplicities), - compute_nonzero_components(fes, multiplicities)), - base_elements(count_nonzeros(multiplicities)) + : + FiniteElement (multiply_dof_numbers(fes, multiplicities), + compute_restriction_is_additive_flags (fes, multiplicities), + compute_nonzero_components(fes, multiplicities)), + base_elements(count_nonzeros(multiplicities)) { initialize(fes, multiplicities); } @@@ -450,8 -453,8 +453,8 @@@ FESystem::shape_grad (con template Tensor<1,dim> FESystem::shape_grad_component (const unsigned int i, - const Point &p, - const unsigned int component) const - const Point &p, ++ const Point &p, + const unsigned int component) const { Assert (idofs_per_cell, ExcIndexRange(i, 0, this->dofs_per_cell)); Assert (component < this->n_components(), @@@ -502,8 -505,8 +505,8 @@@ FESystem::shape_grad_gra template Tensor<2,dim> FESystem::shape_grad_grad_component (const unsigned int i, - const Point &p, - const unsigned int component) const - const Point &p, ++ const Point &p, + const unsigned int component) const { Assert (idofs_per_cell, ExcIndexRange(i, 0, this->dofs_per_cell)); Assert (component < this->n_components(), diff --cc deal.II/source/fe/mapping_cartesian.cc index 5e3a1122f7,5529fc527b..48ec6cfaea --- a/deal.II/source/fe/mapping_cartesian.cc +++ b/deal.II/source/fe/mapping_cartesian.cc @@@ -464,16 -467,16 +467,16 @@@ MappingCartesian::fill_f typename Mapping::InternalDataBase &mapping_data, std::vector > &quadrature_points, std::vector &JxW_values, - std::vector > &boundary_forms, + std::vector > &boundary_forms, std::vector > &normal_vectors) const { - // convert data object to internal - // data for this class. fails with - // an exception if that is not - // possible - Assert (dynamic_cast (&mapping_data) != 0, + // convert data object to internal + // data for this class. fails with + // an exception if that is not + // possible + Assert (dynamic_cast (&mapping_data) != 0, ExcInternalError()); - InternalData &data = static_cast (mapping_data); + InternalData &data = static_cast (mapping_data); compute_fill (cell, face_no, invalid_face_number, CellSimilarity::none, @@@ -519,15 -522,15 +522,15 @@@ MappingCartesian::fill_f typename Mapping::InternalDataBase &mapping_data, std::vector > &quadrature_points, std::vector &JxW_values, - std::vector > &boundary_forms, + std::vector > &boundary_forms, std::vector > &normal_vectors) const { - // convert data object to internal - // data for this class. fails with - // an exception if that is not - // possible - Assert (dynamic_cast (&mapping_data) != 0, ExcInternalError()); - InternalData &data = static_cast (mapping_data); + // convert data object to internal + // data for this class. fails with + // an exception if that is not + // possible + Assert (dynamic_cast (&mapping_data) != 0, ExcInternalError()); + InternalData &data = static_cast (mapping_data); compute_fill (cell, face_no, sub_no, CellSimilarity::none, data, diff --cc deal.II/source/fe/mapping_q.cc index 53a95a9790,665748712b..213905d2a7 --- a/deal.II/source/fe/mapping_q.cc +++ b/deal.II/source/fe/mapping_q.cc @@@ -367,31 -367,31 +367,31 @@@ MappingQ::fill_fe_face_va typename Mapping::InternalDataBase &mapping_data, std::vector > &quadrature_points, std::vector &JxW_values, - std::vector > &exterior_forms, + std::vector > &exterior_forms, std::vector > &normal_vectors) const { - // convert data object to internal - // data for this class. fails with - // an exception if that is not - // possible - Assert (dynamic_cast (&mapping_data) != 0, + // convert data object to internal + // data for this class. fails with + // an exception if that is not + // possible + Assert (dynamic_cast (&mapping_data) != 0, ExcInternalError()); - InternalData &data = static_cast (mapping_data); - - // check whether this cell needs - // the full mapping or can be - // treated by a reduced Q1 mapping, - // e.g. if the cell is entirely in - // the interior of the domain. note - // that it is not sufficient to ask - // whether the present _face_ is in - // the interior, as the mapping on - // the face depends on the mapping - // of the cell, which in turn - // depends on the fact whether - // _any_ of the faces of this cell - // is at the boundary, not only the - // present face + InternalData &data = static_cast (mapping_data); + + // check whether this cell needs + // the full mapping or can be + // treated by a reduced Q1 mapping, + // e.g. if the cell is entirely in + // the interior of the domain. note + // that it is not sufficient to ask + // whether the present _face_ is in + // the interior, as the mapping on + // the face depends on the mapping + // of the cell, which in turn + // depends on the fact whether + // _any_ of the faces of this cell + // is at the boundary, not only the + // present face data.use_mapping_q1_on_current_cell=!(use_mapping_q_on_all_cells || cell->has_boundary_lines()); @@@ -423,37 -423,37 +423,37 @@@ template void MappingQ::fill_fe_subface_values (const typename Triangulation::cell_iterator &cell, - const unsigned int face_no, - const unsigned int sub_no, - const Quadrature &q, - typename Mapping::InternalDataBase &mapping_data, - std::vector > &quadrature_points, - std::vector &JxW_values, - std::vector > &exterior_forms, - std::vector > &normal_vectors) const + const unsigned int face_no, + const unsigned int sub_no, + const Quadrature &q, + typename Mapping::InternalDataBase &mapping_data, + std::vector > &quadrature_points, + std::vector &JxW_values, - std::vector > &exterior_forms, ++ std::vector > &exterior_forms, + std::vector > &normal_vectors) const { - // convert data object to internal - // data for this class. fails with - // an exception if that is not - // possible - Assert (dynamic_cast (&mapping_data) != 0, + // convert data object to internal + // data for this class. fails with + // an exception if that is not + // possible + Assert (dynamic_cast (&mapping_data) != 0, ExcInternalError()); - InternalData &data = static_cast (mapping_data); - - // check whether this cell needs - // the full mapping or can be - // treated by a reduced Q1 mapping, - // e.g. if the cell is entirely in - // the interior of the domain. note - // that it is not sufficient to ask - // whether the present _face_ is in - // the interior, as the mapping on - // the face depends on the mapping - // of the cell, which in turn - // depends on the fact whether - // _any_ of the faces of this cell - // is at the boundary, not only the - // present face + InternalData &data = static_cast (mapping_data); + + // check whether this cell needs + // the full mapping or can be + // treated by a reduced Q1 mapping, + // e.g. if the cell is entirely in + // the interior of the domain. note + // that it is not sufficient to ask + // whether the present _face_ is in + // the interior, as the mapping on + // the face depends on the mapping + // of the cell, which in turn + // depends on the fact whether + // _any_ of the faces of this cell + // is at the boundary, not only the + // present face data.use_mapping_q1_on_current_cell=!(use_mapping_q_on_all_cells || cell->has_boundary_lines()); diff --cc deal.II/source/fe/mapping_q1.cc index d40401de26,211ad2c8b9..953aea7fb3 --- a/deal.II/source/fe/mapping_q1.cc +++ b/deal.II/source/fe/mapping_q1.cc @@@ -1139,14 -1139,14 +1139,14 @@@ fill_fe_subface_values (const typename typename Mapping::InternalDataBase &mapping_data, std::vector > &quadrature_points, std::vector &JxW_values, - std::vector > &boundary_forms, + std::vector > &boundary_forms, std::vector > &normal_vectors) const { - // ensure that the following cast - // is really correct: + // ensure that the following cast + // is really correct: Assert (dynamic_cast(&mapping_data) != 0, ExcInternalError()); - InternalData &data = static_cast(mapping_data); + InternalData &data = static_cast(mapping_data); const unsigned int n_q_points = q.size(); diff --cc deal.II/source/fe/mapping_q1_eulerian.cc index 939b71706c,680f76d98b..479a58edca --- a/deal.II/source/fe/mapping_q1_eulerian.cc +++ b/deal.II/source/fe/mapping_q1_eulerian.cc @@@ -25,11 -25,11 +25,11 @@@ DEAL_II_NAMESPACE_OPE template MappingQ1Eulerian:: -MappingQ1Eulerian (const EulerVectorType &euler_transform_vectors, +MappingQ1Eulerian (const EulerVectorType &euler_transform_vectors, const DoFHandler &shiftmap_dof_handler) - : - euler_transform_vectors(&euler_transform_vectors), - shiftmap_dof_handler(&shiftmap_dof_handler) + : + euler_transform_vectors(&euler_transform_vectors), + shiftmap_dof_handler(&shiftmap_dof_handler) {} diff --cc deal.II/source/grid/grid_generator.cc index cfdbe313cd,a2b6d02c63..8e0966cb7c --- a/deal.II/source/grid/grid_generator.cc +++ b/deal.II/source/grid/grid_generator.cc @@@ -180,7 -180,7 +180,7 @@@ void GridGenerator::hyper_cube (Triangu void GridGenerator::moebius ( - Triangulation<3>& tria, - Triangulation<3> &tria, ++ Triangulation<3> &tria, const unsigned int n_cells, const unsigned int n_rotations, const double R, @@@ -233,7 -233,7 +233,7 @@@ void - GridGenerator::torus (Triangulation<2,3>& tria, -GridGenerator::torus (Triangulation<2,3> &tria, ++GridGenerator::torus (Triangulation<2,3> &tria, const double R, const double r) { @@@ -372,8 -372,8 +372,8 @@@ template<> void GridGenerator::parallelogram ( - Triangulation<2>& tria, - const Tensor<2,2>& corners, - Triangulation<2> &tria, ++ Triangulation<2> &tria, + const Tensor<2,2> &corners, const bool colorize) { std::vector > vertices (GeometryInfo<2>::vertices_per_cell); diff --cc deal.II/source/grid/grid_in.cc index 6fcd927fd9,022757a363..e61a7551ae --- a/deal.II/source/grid/grid_in.cc +++ b/deal.II/source/grid/grid_in.cc @@@ -2153,9 -2149,9 +2149,9 @@@ void GridIn::skip_commen template -void GridIn::debug_output_grid (const std::vector > & /*cells*/, - const std::vector > & /*vertices*/, - std::ostream & /*out*/) +void GridIn::debug_output_grid (const std::vector > &/*cells*/, - const std::vector > &/*vertices*/, - std::ostream &/*out*/) ++ const std::vector > &/*vertices*/, ++ std::ostream &/*out*/) { Assert (false, ExcNotImplemented()); } diff --cc deal.II/source/grid/grid_reordering.cc index 3355f06ded,f6b79371d0..dd8aeee74d --- a/deal.II/source/grid/grid_reordering.cc +++ b/deal.II/source/grid/grid_reordering.cc @@@ -224,9 -225,9 +225,9 @@@ namespace interna const unsigned int s1, const unsigned int s2, const unsigned int s3, - const CellData<2> &cd) + const CellData<2> &cd) - : - original_cell_data (cd) + : + original_cell_data (cd) { v[0] = v0; v[1] = v1; diff --cc deal.II/source/grid/tria.cc index eb5dc1836f,6b0834b82b..5f9f470826 --- a/deal.II/source/grid/tria.cc +++ b/deal.II/source/grid/tria.cc @@@ -1020,874 -1023,874 +1023,874 @@@ namespace interna << arg1 << " and " << arg2 << " is multiply set."); - /** - * A class into which we put many of the functions that implement - * functionality of the Triangulation class. The main reason for this - * class is as follows: the majority of the functions in Triangulation - * need to be implemented differently for dim==1, dim==2, and - * dim==3. However, their implementation is largly independent of the - * spacedim template parameter. So we would like to write things like - * - * template - * void Triangulation<1,spacedim>::create_triangulation (...) {...} - * - * Unfortunately, C++ doesn't allow this: member functions of class - * templates have to be either not specialized at all, or fully - * specialized. No partial specialization is allowed. One possible - * solution would be to just duplicate the bodies of the functions and - * have equally implemented functions - * - * template <> - * void Triangulation<1,1>::create_triangulation (...) {...} - * - * template <> - * void Triangulation<1,2>::create_triangulation (...) {...} - * - * but that is clearly an unsatisfactory solution. Rather, what we do - * is introduce the current Implementation class in which we can write - * these functions as member templates over spacedim, i.e. we can have - * - * template - * template - * void Triangulation::Implementation:: - * create_triangulation (..., - * Triangulation<1,spacedim> &tria ) {...} - * - * The outer template parameters are here unused, only the inner - * ones are of real interest. - * - * One may ask why we put these functions into an class rather - * than an anonymous namespace, for example? - * - * First, these implementation functions need to be friends of the - * Triangulation class. It is simpler to make the entire class a friend - * rather than listing all members of an implementation namespace as - * friends of the Triangulation class (there is no such thing as a "friend - * namespace XXX" directive). - * - * Ideally, we would make this class a member class of the - * Triangulation class, since then our implementation functions - * have immediate access to the typedefs and static functions of the - * surrounding Triangulation class. I.e., we do not have to write "typename - * Triangulation::active_cell_iterator" but can write - * "active_cell_iterator" right away. This is, in fact, the way it was - * implemented first, but we ran into a bug in gcc4.0: - * @code - * class Triangulation { - * struct Implementation; - * friend class TriaAccessor; - * }; - * - * class TriaAccessor { - * struct Implementation; - * friend class Triangulation; - * }; - * @endcode - * - * Here, friendship (per C++ standard) is supposed to extend to all members of - * the befriended class, including its 'Implementation' member class. But gcc4.0 - * gets this wrong: the members of Triangulation::Implementation are not friends - * of TriaAccessor and the other way around. Ideally, one would fix this by - * saying - * @code - * class Triangulation { - * struct Implementation; - * friend class TriaAccessor; - * friend class TriaAccessor::Implementation; // ** - * }; - * - * class TriaAccessor { - * struct Implementation; - * friend class Triangulation; - * friend class Triangulation::Implementation; - * }; - * @endcode - * but that's not legal because in ** we don't know yet that TriaAccessor has - * a member class Implementation and so we can't make it a friend. The only - * way forward at this point was to make Implementation a class in the - * internal namespace so that we can forward declare it and make it a friend - * of the respective other outer class -- not quite what we wanted but the - * only way I could see to make it work... - */ + /** + * A class into which we put many of the functions that implement + * functionality of the Triangulation class. The main reason for this + * class is as follows: the majority of the functions in Triangulation + * need to be implemented differently for dim==1, dim==2, and + * dim==3. However, their implementation is largly independent of the + * spacedim template parameter. So we would like to write things like + * + * template + * void Triangulation<1,spacedim>::create_triangulation (...) {...} + * + * Unfortunately, C++ doesn't allow this: member functions of class + * templates have to be either not specialized at all, or fully + * specialized. No partial specialization is allowed. One possible + * solution would be to just duplicate the bodies of the functions and + * have equally implemented functions + * + * template <> + * void Triangulation<1,1>::create_triangulation (...) {...} + * + * template <> + * void Triangulation<1,2>::create_triangulation (...) {...} + * + * but that is clearly an unsatisfactory solution. Rather, what we do + * is introduce the current Implementation class in which we can write + * these functions as member templates over spacedim, i.e. we can have + * + * template + * template + * void Triangulation::Implementation:: + * create_triangulation (..., + * Triangulation<1,spacedim> &tria ) {...} + * + * The outer template parameters are here unused, only the inner + * ones are of real interest. + * + * One may ask why we put these functions into an class rather + * than an anonymous namespace, for example? + * + * First, these implementation functions need to be friends of the + * Triangulation class. It is simpler to make the entire class a friend + * rather than listing all members of an implementation namespace as + * friends of the Triangulation class (there is no such thing as a "friend + * namespace XXX" directive). + * + * Ideally, we would make this class a member class of the + * Triangulation class, since then our implementation functions + * have immediate access to the typedefs and static functions of the + * surrounding Triangulation class. I.e., we do not have to write "typename + * Triangulation::active_cell_iterator" but can write + * "active_cell_iterator" right away. This is, in fact, the way it was + * implemented first, but we ran into a bug in gcc4.0: + * @code + * class Triangulation { + * struct Implementation; + * friend class TriaAccessor; + * }; + * + * class TriaAccessor { + * struct Implementation; + * friend class Triangulation; + * }; + * @endcode + * + * Here, friendship (per C++ standard) is supposed to extend to all members of + * the befriended class, including its 'Implementation' member class. But gcc4.0 + * gets this wrong: the members of Triangulation::Implementation are not friends + * of TriaAccessor and the other way around. Ideally, one would fix this by + * saying + * @code + * class Triangulation { + * struct Implementation; + * friend class TriaAccessor; + * friend class TriaAccessor::Implementation; // ** + * }; + * + * class TriaAccessor { + * struct Implementation; + * friend class Triangulation; + * friend class Triangulation::Implementation; + * }; + * @endcode + * but that's not legal because in ** we don't know yet that TriaAccessor has + * a member class Implementation and so we can't make it a friend. The only + * way forward at this point was to make Implementation a class in the + * internal namespace so that we can forward declare it and make it a friend + * of the respective other outer class -- not quite what we wanted but the + * only way I could see to make it work... + */ struct Implementation { - /** - * For a given Triangulation, update the - * number cache for lines. For 1d, we have - * to deal with the fact that lines have - * levels, whereas for higher dimensions - * they do not. - * - * The second argument indicates - * for how many levels the - * Triangulation has objects, - * though the highest levels need - * not contain active cells if they - * have previously all been - * coarsened away. - */ - template - static - void compute_number_cache (const Triangulation &triangulation, - const unsigned int level_objects, - internal::Triangulation::NumberCache<1> &number_cache) - { - typedef - typename Triangulation::line_iterator line_iterator; - typedef - typename Triangulation::active_line_iterator active_line_iterator; - - number_cache.n_levels = 0; - if (level_objects > 0) - // find the last level - // on which there are - // used cells - for (unsigned int level=0; level 0, ExcInternalError()); - - /////////////////////////////////// - // update the number of lines - // on the different levels in - // the cache - number_cache.n_lines_level.resize (number_cache.n_levels); - number_cache.n_lines = 0; - - number_cache.n_active_lines_level.resize (number_cache.n_levels); - number_cache.n_active_lines = 0; - - // for 1d, lines have levels so take - // count the objects per level and - // globally - if (dim == 1) - { - for (unsigned int level=0; levellevel() == static_cast(level)); ++line) - ++number_cache.n_active_lines_level[level]; - - // update total number of lines - number_cache.n_active_lines += number_cache.n_active_lines_level[level]; - } - } - else - { - // for dim>1, there are no - // levels for lines - { - line_iterator line = triangulation.begin_line (), - endc = triangulation.end_line(); - for (; line!=endc; ++line) - ++number_cache.n_lines; - } - - { - active_line_iterator line = triangulation.begin_active_line (), - endc = triangulation.end_line(); - for (; line!=endc; ++line) - ++number_cache.n_active_lines; - } - } - } - - /** - * For a given Triangulation, update the - * number cache for quads. For 2d, we have - * to deal with the fact that quads have - * levels, whereas for higher dimensions - * they do not. - * - * The second argument indicates - * for how many levels the - * Triangulation has objects, - * though the highest levels need - * not contain active cells if they - * have previously all been - * coarsened away. - * - * At the beginning of the function, we call the - * respective function to update the number - * cache for lines. - */ - template - static - void compute_number_cache (const Triangulation &triangulation, - const unsigned int level_objects, - internal::Triangulation::NumberCache<2> &number_cache) - { - // update lines and n_levels - compute_number_cache (triangulation, - level_objects, - static_cast&> - (number_cache)); - - typedef - typename Triangulation::quad_iterator quad_iterator; - typedef - typename Triangulation::active_quad_iterator active_quad_iterator; - - /////////////////////////////////// - // update the number of quads - // on the different levels in - // the cache - number_cache.n_quads_level.resize (number_cache.n_levels); - number_cache.n_quads = 0; - - number_cache.n_active_quads_level.resize (number_cache.n_levels); - number_cache.n_active_quads = 0; - - // for 2d, quads have levels so take - // count the objects per level and - // globally - if (dim == 2) - { - for (unsigned int level=0; levellevel() == static_cast(level)); ++quad) - ++number_cache.n_active_quads_level[level]; - - // update total number of quads - number_cache.n_active_quads += number_cache.n_active_quads_level[level]; - } - } - else - { - // for dim>2, there are no - // levels for quads - { - quad_iterator quad = triangulation.begin_quad (), - endc = triangulation.end_quad(); - for (; quad!=endc; ++quad) - ++number_cache.n_quads; - } - - { - active_quad_iterator quad = triangulation.begin_active_quad (), - endc = triangulation.end_quad(); - for (; quad!=endc; ++quad) - ++number_cache.n_active_quads; - } - } - } - - /** - * For a given Triangulation, update the - * number cache for hexes. For 3d, we have - * to deal with the fact that hexes have - * levels, whereas for higher dimensions - * they do not. - * - * The second argument indicates - * for how many levels the - * Triangulation has objects, - * though the highest levels need - * not contain active cells if they - * have previously all been - * coarsened away. - * - * At the end of the function, we call the - * respective function to update the number - * cache for quads, which will in turn call - * the respective function for lines. - */ - template - static - void compute_number_cache (const Triangulation &triangulation, - const unsigned int level_objects, - internal::Triangulation::NumberCache<3> &number_cache) - { - // update quads, lines and n_levels - compute_number_cache (triangulation, - level_objects, - static_cast&> - (number_cache)); - - typedef - typename Triangulation::hex_iterator hex_iterator; - typedef - typename Triangulation::active_hex_iterator active_hex_iterator; - - /////////////////////////////////// - // update the number of hexes - // on the different levels in - // the cache - number_cache.n_hexes_level.resize (number_cache.n_levels); - number_cache.n_hexes = 0; - - number_cache.n_active_hexes_level.resize (number_cache.n_levels); - number_cache.n_active_hexes = 0; - - // for 3d, hexes have levels so take - // count the objects per level and - // globally - if (dim == 3) - { - for (unsigned int level=0; levellevel() == static_cast(level)); ++hex) - ++number_cache.n_active_hexes_level[level]; - - // update total number of hexes - number_cache.n_active_hexes += number_cache.n_active_hexes_level[level]; - } - } - else - { - // for dim>3, there are no - // levels for hexs - { - hex_iterator hex = triangulation.begin_hex (), - endc = triangulation.end_hex(); - for (; hex!=endc; ++hex) - ++number_cache.n_hexes; - } - - { - active_hex_iterator hex = triangulation.begin_active_hex (), - endc = triangulation.end_hex(); - for (; hex!=endc; ++hex) - ++number_cache.n_active_hexes; - } - } - } - - - /** - * Create a triangulation from - * given data. This function does - * this work for 1-dimensional - * triangulations independently - * of the actual space dimension. - */ - template - static - void - create_triangulation (const std::vector > &v, - const std::vector > &cells, - const SubCellData &/*subcelldata*/, - Triangulation<1,spacedim> &triangulation) + /** + * For a given Triangulation, update the + * number cache for lines. For 1d, we have + * to deal with the fact that lines have + * levels, whereas for higher dimensions + * they do not. + * + * The second argument indicates + * for how many levels the + * Triangulation has objects, + * though the highest levels need + * not contain active cells if they + * have previously all been + * coarsened away. + */ + template + static + void compute_number_cache (const Triangulation &triangulation, + const unsigned int level_objects, + internal::Triangulation::NumberCache<1> &number_cache) + { + typedef + typename Triangulation::line_iterator line_iterator; + typedef + typename Triangulation::active_line_iterator active_line_iterator; + + number_cache.n_levels = 0; + if (level_objects > 0) + // find the last level + // on which there are + // used cells + for (unsigned int level=0; level 0, ExcInternalError()); + + /////////////////////////////////// + // update the number of lines + // on the different levels in + // the cache + number_cache.n_lines_level.resize (number_cache.n_levels); + number_cache.n_lines = 0; + + number_cache.n_active_lines_level.resize (number_cache.n_levels); + number_cache.n_active_lines = 0; + + // for 1d, lines have levels so take + // count the objects per level and + // globally + if (dim == 1) { - AssertThrow (v.size() > 0, ExcMessage ("No vertices given")); - AssertThrow (cells.size() > 0, ExcMessage ("No cells given")); - - // note: since no boundary - // information can be given in one - // dimension, the @p{subcelldata} - // field is ignored. (only used for - // error checking, which is a good - // idea in any case) - const unsigned int dim=1; - - // copy vertices - triangulation.vertices = v; - triangulation.vertices_used = std::vector (v.size(), true); - - // store the indices of the lines - // which are adjacent to a given - // vertex - std::vector > lines_at_vertex (v.size()); - - // reserve enough space - triangulation.levels.push_back (new internal::Triangulation::TriaLevel); - triangulation.levels[0]->reserve_space (cells.size(), dim, spacedim); - triangulation.levels[0]->cells.reserve_space (0,cells.size()); - - // make up cells - typename Triangulation::raw_line_iterator - next_free_line = triangulation.begin_raw_line (); - for (unsigned int cell=0; cellused()) - ++next_free_line; - - next_free_line->set (internal::Triangulation - ::TriaObject<1> (cells[cell].vertices[0], - cells[cell].vertices[1])); - next_free_line->set_used_flag (); - next_free_line->set_material_id (cells[cell].material_id); - next_free_line->clear_user_data (); - next_free_line->set_subdomain_id (0); - - // note that this cell is - // adjacent to these vertices - lines_at_vertex[cells[cell].vertices[0]].push_back (cell); - lines_at_vertex[cells[cell].vertices[1]].push_back (cell); + // count lines on this level + number_cache.n_lines_level[level] = 0; + + line_iterator line = triangulation.begin_line (level), + endc = (level == number_cache.n_levels-1 ? + line_iterator(triangulation.end_line()) : + triangulation.begin_line (level+1)); + for (; line!=endc; ++line) + ++number_cache.n_lines_level[level]; + + // update total number of lines + number_cache.n_lines += number_cache.n_lines_level[level]; } + // do the update for the number of + // active lines as well + for (unsigned int level=0; levellevel() == static_cast(level)); ++line) + ++number_cache.n_active_lines_level[level]; - // some security tests + // update total number of lines + number_cache.n_active_lines += number_cache.n_active_lines_level[level]; + } + } + else + { + // for dim>1, there are no + // levels for lines { - unsigned int boundary_nodes = 0; - for (unsigned int i=0; i 1), - ExcMessage("The Triangulation has too many end points")); + { + active_line_iterator line = triangulation.begin_active_line (), + endc = triangulation.end_line(); + for (; line!=endc; ++line) + ++number_cache.n_active_lines; } + } + } + /** + * For a given Triangulation, update the + * number cache for quads. For 2d, we have + * to deal with the fact that quads have + * levels, whereas for higher dimensions + * they do not. + * + * The second argument indicates + * for how many levels the + * Triangulation has objects, + * though the highest levels need + * not contain active cells if they + * have previously all been + * coarsened away. + * + * At the beginning of the function, we call the + * respective function to update the number + * cache for lines. + */ + template + static + void compute_number_cache (const Triangulation &triangulation, + const unsigned int level_objects, + internal::Triangulation::NumberCache<2> &number_cache) + { + // update lines and n_levels + compute_number_cache (triangulation, + level_objects, + static_cast&> + (number_cache)); + + typedef + typename Triangulation::quad_iterator quad_iterator; + typedef + typename Triangulation::active_quad_iterator active_quad_iterator; + + /////////////////////////////////// + // update the number of quads + // on the different levels in + // the cache + number_cache.n_quads_level.resize (number_cache.n_levels); + number_cache.n_quads = 0; + + number_cache.n_active_quads_level.resize (number_cache.n_levels); + number_cache.n_active_quads = 0; + + // for 2d, quads have levels so take + // count the objects per level and + // globally + if (dim == 2) + { + for (unsigned int level=0; level::active_line_iterator - line = triangulation.begin_active_line (); - // for all lines - for (; line!=triangulation.end(); ++line) - // for each of the two vertices - for (unsigned int vertex=0; vertex::vertices_per_cell; ++vertex) - // if first cell adjacent to - // this vertex is the present - // one, then the neighbor is - // the second adjacent cell and - // vice versa - if (lines_at_vertex[line->vertex_index(vertex)][0] == line->index()) - if (lines_at_vertex[line->vertex_index(vertex)].size() == 2) - { - const typename Triangulation::cell_iterator - neighbor (&triangulation, - 0, // level - lines_at_vertex[line->vertex_index(vertex)][1]); - line->set_neighbor (vertex, neighbor); - } - else - // no second adjacent cell - // entered -> cell at - // boundary - line->set_neighbor (vertex, triangulation.end()); - else - // present line is not first - // adjacent one -> first - // adjacent one is neighbor - { - const typename Triangulation::cell_iterator - neighbor (&triangulation, - 0, // level - lines_at_vertex[line->vertex_index(vertex)][0]); - line->set_neighbor (vertex, neighbor); - } + active_quad_iterator quad = triangulation.begin_active_quad (level), + endc = triangulation.end_quad (); + for (; (quad!=endc) && (quad->level() == static_cast(level)); ++quad) + ++number_cache.n_active_quads_level[level]; - // finally set the - // vertex_to_boundary_id_map_1d - // map - triangulation.vertex_to_boundary_id_map_1d->clear(); - for (typename Triangulation::active_cell_iterator - cell = triangulation.begin_active(); - cell != triangulation.end(); ++cell) - for (unsigned int f=0; f::faces_per_cell; ++f) - if (cell->at_boundary(f)) - (*triangulation - .vertex_to_boundary_id_map_1d)[cell->face(f)->vertex_index()] - = f; + // update total number of quads + number_cache.n_active_quads += number_cache.n_active_quads_level[level]; + } } + else + { + // for dim>2, there are no + // levels for quads + { + quad_iterator quad = triangulation.begin_quad (), + endc = triangulation.end_quad(); + for (; quad!=endc; ++quad) + ++number_cache.n_quads; + } + { + active_quad_iterator quad = triangulation.begin_active_quad (), + endc = triangulation.end_quad(); + for (; quad!=endc; ++quad) + ++number_cache.n_active_quads; + } + } + } - /** - * Create a triangulation from - * given data. This function does - * this work for 2-dimensional - * triangulations independently - * of the actual space dimension. - */ - template - static - void - create_triangulation (const std::vector > &v, - const std::vector > &cells, - const SubCellData &subcelldata, - Triangulation<2,spacedim> &triangulation) + /** + * For a given Triangulation, update the + * number cache for hexes. For 3d, we have + * to deal with the fact that hexes have + * levels, whereas for higher dimensions + * they do not. + * + * The second argument indicates + * for how many levels the + * Triangulation has objects, + * though the highest levels need + * not contain active cells if they + * have previously all been + * coarsened away. + * + * At the end of the function, we call the + * respective function to update the number + * cache for quads, which will in turn call + * the respective function for lines. + */ + template + static + void compute_number_cache (const Triangulation &triangulation, + const unsigned int level_objects, + internal::Triangulation::NumberCache<3> &number_cache) + { + // update quads, lines and n_levels + compute_number_cache (triangulation, + level_objects, + static_cast&> + (number_cache)); + + typedef + typename Triangulation::hex_iterator hex_iterator; + typedef + typename Triangulation::active_hex_iterator active_hex_iterator; + + /////////////////////////////////// + // update the number of hexes + // on the different levels in + // the cache + number_cache.n_hexes_level.resize (number_cache.n_levels); + number_cache.n_hexes = 0; + + number_cache.n_active_hexes_level.resize (number_cache.n_levels); + number_cache.n_active_hexes = 0; + + // for 3d, hexes have levels so take + // count the objects per level and + // globally + if (dim == 3) { - AssertThrow (v.size() > 0, ExcMessage ("No vertices given")); - AssertThrow (cells.size() > 0, ExcMessage ("No cells given")); - - const unsigned int dim=2; - - // copy vertices - triangulation.vertices = v; - triangulation.vertices_used = std::vector (v.size(), true); - - // make up a list of the needed - // lines each line is a pair of - // vertices. The list is kept - // sorted and it is guaranteed that - // each line is inserted only once. - // While the key of such an entry - // is the pair of vertices, the - // thing it points to is an - // iterator pointing to the line - // object itself. In the first run, - // these iterators are all invalid - // ones, but they are filled - // afterwards - std::map, - typename Triangulation::line_iterator> needed_lines; - for (unsigned int cell=0; cell::faces_per_cell; ++line) - { - // given a line vertex number - // (0,1) on a specific line we - // get the cell vertex number - // (0-4) through the - // line_to_cell_vertices - // function - std::pair line_vertices( - cells[cell].vertices[GeometryInfo::line_to_cell_vertices(line, 0)], - cells[cell].vertices[GeometryInfo::line_to_cell_vertices(line, 1)]); - - // assert that the line was - // not already inserted in - // reverse order. This - // happens in spite of the - // vertex rotation above, - // if the sense of the cell - // was incorrect. - // - // Here is what usually - // happened when this - // exception is thrown: - // consider these two cells - // and the vertices - // 3---4---5 - // | | | - // 0---1---2 - // If in the input vector - // the two cells are given - // with vertices <0 1 4 3> - // and <4 1 2 5>, in the - // first cell the middle - // line would have - // direction 1->4, while in - // the second it would be - // 4->1. This will cause - // the exception. - AssertThrow (needed_lines.find(std::make_pair(line_vertices.second, - line_vertices.first)) - == - needed_lines.end(), - ExcGridHasInvalidCell(cell)); - - // insert line, with - // invalid iterator if line - // already exists, then - // nothing bad happens here - needed_lines[line_vertices] = triangulation.end_line(); - } + // count hexes on this level + number_cache.n_hexes_level[level] = 0; + + hex_iterator hex = triangulation.begin_hex (level), + endc = (level == number_cache.n_levels-1 ? + hex_iterator(triangulation.end_hex()) : + triangulation.begin_hex (level+1)); + for (; hex!=endc; ++hex) + ++number_cache.n_hexes_level[level]; + + // update total number of hexes + number_cache.n_hexes += number_cache.n_hexes_level[level]; } + // do the update for the number of + // active hexes as well + for (unsigned int level=0; levellevel() == static_cast(level)); ++hex) + ++number_cache.n_active_hexes_level[level]; - // check that every vertex has at - // least two adjacent lines + // update total number of hexes + number_cache.n_active_hexes += number_cache.n_active_hexes_level[level]; + } + } + else + { + // for dim>3, there are no + // levels for hexs { - std::vector vertex_touch_count (v.size(), 0); - typename std::map, - typename Triangulation::line_iterator>::iterator i; - for (i=needed_lines.begin(); i!=needed_lines.end(); i++) - { - // touch the vertices of - // this line - ++vertex_touch_count[i->first.first]; - ++vertex_touch_count[i->first.second]; - } + hex_iterator hex = triangulation.begin_hex (), + endc = triangulation.end_hex(); + for (; hex!=endc; ++hex) + ++number_cache.n_hexes; + } - // assert minimum touch count - // is at least two. if not so, - // then clean triangulation and - // exit with an exception - AssertThrow (* (std::min_element(vertex_touch_count.begin(), - vertex_touch_count.end())) >= 2, - ExcGridHasInvalidVertices()); + { + active_hex_iterator hex = triangulation.begin_active_hex (), + endc = triangulation.end_hex(); + for (; hex!=endc; ++hex) + ++number_cache.n_active_hexes; } + } + } - // reserve enough space - triangulation.levels.push_back (new internal::Triangulation::TriaLevel); - triangulation.faces = new internal::Triangulation::TriaFaces; - triangulation.levels[0]->reserve_space (cells.size(), dim, spacedim); - triangulation.faces->lines.reserve_space (0,needed_lines.size()); - triangulation.levels[0]->cells.reserve_space (0,cells.size()); - // make up lines - { - typename Triangulation::raw_line_iterator - line = triangulation.begin_raw_line(); - typename std::map, - typename Triangulation::line_iterator>::iterator i; - for (i = needed_lines.begin(); - line!=triangulation.end_line(); ++line, ++i) + /** + * Create a triangulation from + * given data. This function does + * this work for 1-dimensional + * triangulations independently + * of the actual space dimension. + */ + template + static + void + create_triangulation (const std::vector > &v, + const std::vector > &cells, - const SubCellData & /*subcelldata*/, ++ const SubCellData &/*subcelldata*/, + Triangulation<1,spacedim> &triangulation) + { + AssertThrow (v.size() > 0, ExcMessage ("No vertices given")); + AssertThrow (cells.size() > 0, ExcMessage ("No cells given")); + + // note: since no boundary + // information can be given in one + // dimension, the @p{subcelldata} + // field is ignored. (only used for + // error checking, which is a good + // idea in any case) + const unsigned int dim=1; + + // copy vertices + triangulation.vertices = v; + triangulation.vertices_used = std::vector (v.size(), true); + + // store the indices of the lines + // which are adjacent to a given + // vertex + std::vector > lines_at_vertex (v.size()); + + // reserve enough space + triangulation.levels.push_back (new internal::Triangulation::TriaLevel); + triangulation.levels[0]->reserve_space (cells.size(), dim, spacedim); + triangulation.levels[0]->cells.reserve_space (0,cells.size()); + + // make up cells + typename Triangulation::raw_line_iterator + next_free_line = triangulation.begin_raw_line (); + for (unsigned int cell=0; cellused()) + ++next_free_line; + + next_free_line->set (internal::Triangulation + ::TriaObject<1> (cells[cell].vertices[0], + cells[cell].vertices[1])); + next_free_line->set_used_flag (); + next_free_line->set_material_id (cells[cell].material_id); + next_free_line->clear_user_data (); + next_free_line->set_subdomain_id (0); + + // note that this cell is + // adjacent to these vertices + lines_at_vertex[cells[cell].vertices[0]].push_back (cell); + lines_at_vertex[cells[cell].vertices[1]].push_back (cell); + } + + + // some security tests + { + unsigned int boundary_nodes = 0; + for (unsigned int i=0; i 1), + ExcMessage("The Triangulation has too many end points")); + } + + + + // update neighborship info + typename Triangulation::active_line_iterator + line = triangulation.begin_active_line (); + // for all lines + for (; line!=triangulation.end(); ++line) + // for each of the two vertices + for (unsigned int vertex=0; vertex::vertices_per_cell; ++vertex) + // if first cell adjacent to + // this vertex is the present + // one, then the neighbor is + // the second adjacent cell and + // vice versa + if (lines_at_vertex[line->vertex_index(vertex)][0] == line->index()) + if (lines_at_vertex[line->vertex_index(vertex)].size() == 2) { - line->set (internal::Triangulation::TriaObject<1>(i->first.first, - i->first.second)); - line->set_used_flag (); - line->clear_user_flag (); - line->clear_user_data (); - i->second = line; + const typename Triangulation::cell_iterator + neighbor (&triangulation, + 0, // level + lines_at_vertex[line->vertex_index(vertex)][1]); + line->set_neighbor (vertex, neighbor); } + else + // no second adjacent cell + // entered -> cell at + // boundary + line->set_neighbor (vertex, triangulation.end()); + else + // present line is not first + // adjacent one -> first + // adjacent one is neighbor + { + const typename Triangulation::cell_iterator + neighbor (&triangulation, + 0, // level + lines_at_vertex[line->vertex_index(vertex)][0]); + line->set_neighbor (vertex, neighbor); + } + + // finally set the + // vertex_to_boundary_id_map_1d + // map + triangulation.vertex_to_boundary_id_map_1d->clear(); + for (typename Triangulation::active_cell_iterator + cell = triangulation.begin_active(); + cell != triangulation.end(); ++cell) + for (unsigned int f=0; f::faces_per_cell; ++f) + if (cell->at_boundary(f)) + (*triangulation + .vertex_to_boundary_id_map_1d)[cell->face(f)->vertex_index()] + = f; + } + + + /** + * Create a triangulation from + * given data. This function does + * this work for 2-dimensional + * triangulations independently + * of the actual space dimension. + */ + template + static + void + create_triangulation (const std::vector > &v, + const std::vector > &cells, + const SubCellData &subcelldata, + Triangulation<2,spacedim> &triangulation) + { + AssertThrow (v.size() > 0, ExcMessage ("No vertices given")); + AssertThrow (cells.size() > 0, ExcMessage ("No cells given")); + + const unsigned int dim=2; + + // copy vertices + triangulation.vertices = v; + triangulation.vertices_used = std::vector (v.size(), true); + + // make up a list of the needed + // lines each line is a pair of + // vertices. The list is kept + // sorted and it is guaranteed that + // each line is inserted only once. + // While the key of such an entry + // is the pair of vertices, the + // thing it points to is an + // iterator pointing to the line + // object itself. In the first run, + // these iterators are all invalid + // ones, but they are filled + // afterwards + std::map, + typename Triangulation::line_iterator> needed_lines; + for (unsigned int cell=0; cell::faces_per_cell; ++line) + { + // given a line vertex number + // (0,1) on a specific line we + // get the cell vertex number + // (0-4) through the + // line_to_cell_vertices + // function + std::pair line_vertices( + cells[cell].vertices[GeometryInfo::line_to_cell_vertices(line, 0)], + cells[cell].vertices[GeometryInfo::line_to_cell_vertices(line, 1)]); + + // assert that the line was + // not already inserted in + // reverse order. This + // happens in spite of the + // vertex rotation above, + // if the sense of the cell + // was incorrect. + // + // Here is what usually + // happened when this + // exception is thrown: + // consider these two cells + // and the vertices + // 3---4---5 + // | | | + // 0---1---2 + // If in the input vector + // the two cells are given + // with vertices <0 1 4 3> + // and <4 1 2 5>, in the + // first cell the middle + // line would have + // direction 1->4, while in + // the second it would be + // 4->1. This will cause + // the exception. + AssertThrow (needed_lines.find(std::make_pair(line_vertices.second, + line_vertices.first)) + == + needed_lines.end(), + ExcGridHasInvalidCell(cell)); + + // insert line, with + // invalid iterator if line + // already exists, then + // nothing bad happens here + needed_lines[line_vertices] = triangulation.end_line(); + } + } + + + // check that every vertex has at + // least two adjacent lines + { + std::vector vertex_touch_count (v.size(), 0); + typename std::map, + typename Triangulation::line_iterator>::iterator i; + for (i=needed_lines.begin(); i!=needed_lines.end(); i++) + { + // touch the vertices of + // this line + ++vertex_touch_count[i->first.first]; + ++vertex_touch_count[i->first.second]; } + // assert minimum touch count + // is at least two. if not so, + // then clean triangulation and + // exit with an exception + AssertThrow (* (std::min_element(vertex_touch_count.begin(), + vertex_touch_count.end())) >= 2, + ExcGridHasInvalidVertices()); + } - // store for each line index - // the adjacent cells - std::map::cell_iterator> > - adjacent_cells; + // reserve enough space + triangulation.levels.push_back (new internal::Triangulation::TriaLevel); + triangulation.faces = new internal::Triangulation::TriaFaces; + triangulation.levels[0]->reserve_space (cells.size(), dim, spacedim); + triangulation.faces->lines.reserve_space (0,needed_lines.size()); + triangulation.levels[0]->cells.reserve_space (0,cells.size()); - // finally make up cells + // make up lines + { + typename Triangulation::raw_line_iterator + line = triangulation.begin_raw_line(); + typename std::map, + typename Triangulation::line_iterator>::iterator i; + for (i = needed_lines.begin(); + line!=triangulation.end_line(); ++line, ++i) { - typename Triangulation::raw_cell_iterator - cell = triangulation.begin_raw_quad(); - for (unsigned int c=0; c::line_iterator - lines[GeometryInfo::lines_per_cell]; - for (unsigned int line=0; line::lines_per_cell; ++line) - lines[line]=needed_lines[std::make_pair( - cells[c].vertices[GeometryInfo::line_to_cell_vertices(line, 0)], - cells[c].vertices[GeometryInfo::line_to_cell_vertices(line, 1)])]; - - cell->set (internal::Triangulation::TriaObject<2> (lines[0]->index(), - lines[1]->index(), - lines[2]->index(), - lines[3]->index())); - - cell->set_used_flag (); - cell->set_material_id (cells[c].material_id); - cell->clear_user_data (); - cell->set_subdomain_id (0); - - // note that this cell is - // adjacent to the four - // lines - for (unsigned int line=0; line::lines_per_cell; ++line) - adjacent_cells[lines[line]->index()].push_back (cell); - } + line->set (internal::Triangulation::TriaObject<1>(i->first.first, + i->first.second)); + line->set_used_flag (); + line->clear_user_flag (); + line->clear_user_data (); + i->second = line; } + } - for (typename Triangulation::line_iterator - line=triangulation.begin_line(); - line!=triangulation.end_line(); ++line) - { - const unsigned int n_adj_cells = adjacent_cells[line->index()].size(); - // assert that every line has - // one or two adjacent cells - AssertThrow ((n_adj_cells >= 1) && - (n_adj_cells <= 2), - ExcInternalError()); + // store for each line index + // the adjacent cells + std::map::cell_iterator> > + adjacent_cells; - // if only one cell: line is at - // boundary -> give it the - // boundary indicator zero by - // default - if (n_adj_cells == 1) - line->set_boundary_indicator (0); - else - // interior line -> numbers::internal_face_boundary_id - line->set_boundary_indicator (numbers::internal_face_boundary_id); - } + // finally make up cells + { + typename Triangulation::raw_cell_iterator + cell = triangulation.begin_raw_quad(); + for (unsigned int c=0; c::line_iterator + lines[GeometryInfo::lines_per_cell]; + for (unsigned int line=0; line::lines_per_cell; ++line) + lines[line]=needed_lines[std::make_pair( + cells[c].vertices[GeometryInfo::line_to_cell_vertices(line, 0)], + cells[c].vertices[GeometryInfo::line_to_cell_vertices(line, 1)])]; + + cell->set (internal::Triangulation::TriaObject<2> (lines[0]->index(), + lines[1]->index(), + lines[2]->index(), + lines[3]->index())); + + cell->set_used_flag (); + cell->set_material_id (cells[c].material_id); + cell->clear_user_data (); + cell->set_subdomain_id (0); + + // note that this cell is + // adjacent to the four + // lines + for (unsigned int line=0; line::lines_per_cell; ++line) + adjacent_cells[lines[line]->index()].push_back (cell); + } + } + + + for (typename Triangulation::line_iterator + line=triangulation.begin_line(); + line!=triangulation.end_line(); ++line) + { + const unsigned int n_adj_cells = adjacent_cells[line->index()].size(); + // assert that every line has + // one or two adjacent cells + AssertThrow ((n_adj_cells >= 1) && + (n_adj_cells <= 2), + ExcInternalError()); + + // if only one cell: line is at + // boundary -> give it the + // boundary indicator zero by + // default + if (n_adj_cells == 1) + line->set_boundary_indicator (0); + else + // interior line -> numbers::internal_face_boundary_id + line->set_boundary_indicator (numbers::internal_face_boundary_id); + } - // set boundary indicators where - // given - std::vector >::const_iterator boundary_line - = subcelldata.boundary_lines.begin(); - std::vector >::const_iterator end_boundary_line - = subcelldata.boundary_lines.end(); - for (; boundary_line!=end_boundary_line; ++boundary_line) + // set boundary indicators where + // given + std::vector >::const_iterator boundary_line + = subcelldata.boundary_lines.begin(); + std::vector >::const_iterator end_boundary_line + = subcelldata.boundary_lines.end(); + for (; boundary_line!=end_boundary_line; ++boundary_line) + { + typename Triangulation::line_iterator line; + std::pair line_vertices(std::make_pair(boundary_line->vertices[0], + boundary_line->vertices[1])); + if (needed_lines.find(line_vertices) != needed_lines.end()) + // line found in this + // direction + line = needed_lines[line_vertices]; + else { - typename Triangulation::line_iterator line; - std::pair line_vertices(std::make_pair(boundary_line->vertices[0], - boundary_line->vertices[1])); + // look whether it exists + // in reverse direction + std::swap (line_vertices.first, line_vertices.second); if (needed_lines.find(line_vertices) != needed_lines.end()) - // line found in this - // direction line = needed_lines[line_vertices]; else - { - // look whether it exists - // in reverse direction - std::swap (line_vertices.first, line_vertices.second); - if (needed_lines.find(line_vertices) != needed_lines.end()) - line = needed_lines[line_vertices]; - else - // line does not exist - AssertThrow (false, ExcLineInexistant(line_vertices.first, - line_vertices.second)); - } + // line does not exist + AssertThrow (false, ExcLineInexistant(line_vertices.first, + line_vertices.second)); + } - // assert that we only set - // boundary info once - AssertThrow (! (line->boundary_indicator() != 0 && - line->boundary_indicator() != numbers::internal_face_boundary_id), - ExcMultiplySetLineInfoOfLine(line_vertices.first, - line_vertices.second)); + // assert that we only set + // boundary info once + AssertThrow (! (line->boundary_indicator() != 0 && + line->boundary_indicator() != numbers::internal_face_boundary_id), + ExcMultiplySetLineInfoOfLine(line_vertices.first, + line_vertices.second)); - // Assert that only exterior lines - // are given a boundary indicator - AssertThrow (! (line->boundary_indicator() == numbers::internal_face_boundary_id), - ExcInteriorLineCantBeBoundary()); + // Assert that only exterior lines + // are given a boundary indicator + AssertThrow (! (line->boundary_indicator() == numbers::internal_face_boundary_id), + ExcInteriorLineCantBeBoundary()); - line->set_boundary_indicator (boundary_line->boundary_id); - } + line->set_boundary_indicator (boundary_line->boundary_id); + } - // finally update neighborship info - for (typename Triangulation::cell_iterator - cell=triangulation.begin(); cell!=triangulation.end(); ++cell) - for (unsigned int side=0; side<4; ++side) - if (adjacent_cells[cell->line(side)->index()][0] == cell) - // first adjacent cell is - // this one - { - if (adjacent_cells[cell->line(side)->index()].size() == 2) - // there is another - // adjacent cell - cell->set_neighbor (side, - adjacent_cells[cell->line(side)->index()][1]); - } - // first adjacent cell is not this - // one, -> it must be the neighbor - // we are looking for - else + // finally update neighborship info + for (typename Triangulation::cell_iterator + cell=triangulation.begin(); cell!=triangulation.end(); ++cell) + for (unsigned int side=0; side<4; ++side) + if (adjacent_cells[cell->line(side)->index()][0] == cell) + // first adjacent cell is + // this one + { + if (adjacent_cells[cell->line(side)->index()].size() == 2) + // there is another + // adjacent cell cell->set_neighbor (side, - adjacent_cells[cell->line(side)->index()][0]); - } + adjacent_cells[cell->line(side)->index()][1]); + } + // first adjacent cell is not this + // one, -> it must be the neighbor + // we are looking for + else + cell->set_neighbor (side, + adjacent_cells[cell->line(side)->index()][0]); + } - /** - * Invent an object which compares two internal::Triangulation::TriaObject<2> - * against each other. This comparison is needed in order to establish a map - * of TriaObject<2> to iterators in the Triangulation<3,3>::create_triangulation - * function. - * - * Since this comparison is not canonical, we do not include it into the - * general internal::Triangulation::TriaObject<2> class. - */ - struct QuadComparator + /** + * Invent an object which compares two internal::Triangulation::TriaObject<2> + * against each other. This comparison is needed in order to establish a map + * of TriaObject<2> to iterators in the Triangulation<3,3>::create_triangulation + * function. + * + * Since this comparison is not canonical, we do not include it into the + * general internal::Triangulation::TriaObject<2> class. + */ + struct QuadComparator + { + inline bool operator () (const internal::Triangulation::TriaObject<2> &q1, + const internal::Triangulation::TriaObject<2> &q2) const { - inline bool operator () (const internal::Triangulation::TriaObject<2> &q1, - const internal::Triangulation::TriaObject<2> &q2) const - { - // here is room to - // optimize the repeated - // equality test of the - // previous lines; the - // compiler will probably - // take care of most of - // it anyway - if ((q1.face(0) < q2.face(0)) || - ((q1.face(0) == q2.face(0)) && - (q1.face(1) < q2.face(1))) || - ((q1.face(0) == q2.face(0)) && - (q1.face(1) == q2.face(1)) && - (q1.face(2) < q2.face(2))) || - ((q1.face(0) == q2.face(0)) && - (q1.face(1) == q2.face(1)) && - (q1.face(2) == q2.face(2)) && - (q1.face(3) < q2.face(3)))) - return true; - else - return false; - } - }; + // here is room to + // optimize the repeated + // equality test of the + // previous lines; the + // compiler will probably + // take care of most of + // it anyway + if ((q1.face(0) < q2.face(0)) || + ((q1.face(0) == q2.face(0)) && + (q1.face(1) < q2.face(1))) || + ((q1.face(0) == q2.face(0)) && + (q1.face(1) == q2.face(1)) && + (q1.face(2) < q2.face(2))) || + ((q1.face(0) == q2.face(0)) && + (q1.face(1) == q2.face(1)) && + (q1.face(2) == q2.face(2)) && + (q1.face(3) < q2.face(3)))) + return true; + else + return false; + } + }; diff --cc deal.II/source/hp/dof_handler.cc index 55af7806f8,c78764fceb..4d4a5dba88 --- a/deal.II/source/hp/dof_handler.cc +++ b/deal.II/source/hp/dof_handler.cc @@@ -1825,8 -1825,7 +1825,8 @@@ namespace h } + - template <> + template <> unsigned int DoFHandler<1,3>::n_boundary_dofs () const { Assert(false,ExcNotImplemented()); @@@ -3194,8 -3193,7 +3194,8 @@@ } + - template <> + template <> void DoFHandler<1,3>::pre_refinement_action () { create_active_fe_table (); diff --cc deal.II/source/hp/fe_values.cc index 66c1d73e25,04278575f9..a8e7ece1d4 --- a/deal.II/source/hp/fe_values.cc +++ b/deal.II/source/hp/fe_values.cc @@@ -311,25 -311,25 +311,25 @@@ namespace h template FEFaceValues::FEFaceValues (const hp::MappingCollection &mapping, - const hp::FECollection &fe_collection, - const hp::QCollection &q_collection, - const UpdateFlags update_flags) - : - internal::hp::FEValuesBase > (mapping, - fe_collection, - q_collection, - update_flags) - const hp::FECollection &fe_collection, ++ const hp::FECollection &fe_collection, + const hp::QCollection &q_collection, + const UpdateFlags update_flags) + : + internal::hp::FEValuesBase > (mapping, + fe_collection, + q_collection, + update_flags) {} template - FEFaceValues::FEFaceValues (const hp::FECollection &fe_collection, + FEFaceValues::FEFaceValues (const hp::FECollection &fe_collection, - const hp::QCollection &q_collection, - const UpdateFlags update_flags) - : - internal::hp::FEValuesBase > (fe_collection, - q_collection, - update_flags) + const hp::QCollection &q_collection, + const UpdateFlags update_flags) + : + internal::hp::FEValuesBase > (fe_collection, + q_collection, + update_flags) {} @@@ -511,25 -511,25 +511,25 @@@ template FESubfaceValues::FESubfaceValues (const hp::MappingCollection &mapping, - const hp::FECollection &fe_collection, - const hp::QCollection &q_collection, - const UpdateFlags update_flags) - : - internal::hp::FEValuesBase > (mapping, - fe_collection, - q_collection, - update_flags) - const hp::FECollection &fe_collection, ++ const hp::FECollection &fe_collection, + const hp::QCollection &q_collection, + const UpdateFlags update_flags) + : + internal::hp::FEValuesBase > (mapping, + fe_collection, + q_collection, + update_flags) {} template - FESubfaceValues::FESubfaceValues (const hp::FECollection &fe_collection, + FESubfaceValues::FESubfaceValues (const hp::FECollection &fe_collection, - const hp::QCollection &q_collection, - const UpdateFlags update_flags) - : - internal::hp::FEValuesBase > (fe_collection, - q_collection, - update_flags) + const hp::QCollection &q_collection, + const UpdateFlags update_flags) + : + internal::hp::FEValuesBase > (fe_collection, + q_collection, + update_flags) {} diff --cc deal.II/source/lac/petsc_parallel_vector.cc index 1773cf1a6c,225c1ebda1..fb25d77f61 --- a/deal.II/source/lac/petsc_parallel_vector.cc +++ b/deal.II/source/lac/petsc_parallel_vector.cc @@@ -53,10 -53,10 +53,10 @@@ namespace PETScWrapper Vector::Vector (const MPI_Comm &communicator, - const VectorBase &v, + const VectorBase &v, const unsigned int local_size) - : - communicator (communicator) + : + communicator (communicator) { Vector::create_vector (v.size(), local_size); diff --cc deal.II/source/lac/petsc_solver.cc index b0713c88b6,60a7ecc4e4..baa55261a2 --- a/deal.II/source/lac/petsc_solver.cc +++ b/deal.II/source/lac/petsc_solver.cc @@@ -42,11 -42,11 +42,11 @@@ namespace PETScWrapper - SolverBase::SolverBase (SolverControl &cn, + SolverBase::SolverBase (SolverControl &cn, const MPI_Comm &mpi_communicator) - : - solver_control (cn), - mpi_communicator (mpi_communicator) + : + solver_control (cn), + mpi_communicator (mpi_communicator) {} diff --cc deal.II/source/lac/sparse_direct.cc index 47139fc874,200a2d09de..5bdb4eed7a --- a/deal.II/source/lac/sparse_direct.cc +++ b/deal.II/source/lac/sparse_direct.cc @@@ -2103,10 -2103,10 +2103,10 @@@ void SparseDirectMA27::factorize (cons template void SparseDirectMA27::solve (const SparseMatrix &matrix, - Vector &rhs_and_solution); + Vector &rhs_and_solution); template -void SparseDirectMA27::solve (const SparseMatrix &matrix, +void SparseDirectMA27::solve (const SparseMatrix &matrix, Vector &rhs_and_solution); diff --cc deal.II/source/lac/trilinos_solver.cc index f87bd8e421,061a203401..3f1e06cf19 --- a/deal.II/source/lac/trilinos_solver.cc +++ b/deal.II/source/lac/trilinos_solver.cc @@@ -37,10 -37,10 +37,10 @@@ namespace TrilinosWrapper - SolverBase::SolverBase (SolverControl &cn) + SolverBase::SolverBase (SolverControl &cn) - : - solver_name (gmres), - solver_control (cn) + : + solver_name (gmres), + solver_control (cn) {} @@@ -384,11 -384,11 +384,11 @@@ - SolverDirect::SolverDirect (SolverControl &cn, + SolverDirect::SolverDirect (SolverControl &cn, const AdditionalData &data) - : - solver_control (cn), - additional_data (data.output_solver_details) + : + solver_control (cn), + additional_data (data.output_solver_details) {} diff --cc deal.II/source/lac/trilinos_sparse_matrix.cc index 449562d108,9f077e789d..6d3bae3004 --- a/deal.II/source/lac/trilinos_sparse_matrix.cc +++ b/deal.II/source/lac/trilinos_sparse_matrix.cc @@@ -112,14 -112,14 +112,14 @@@ namespace TrilinosWrapper - SparseMatrix::SparseMatrix (const Epetra_Map &input_map, + SparseMatrix::SparseMatrix (const Epetra_Map &input_map, const unsigned int n_max_entries_per_row) - : - column_space_map (new Epetra_Map (input_map)), - matrix (new Epetra_FECrsMatrix(Copy, *column_space_map, - int(n_max_entries_per_row), false)), - last_action (Zero), - compressed (false) + : + column_space_map (new Epetra_Map (input_map)), + matrix (new Epetra_FECrsMatrix(Copy, *column_space_map, + int(n_max_entries_per_row), false)), + last_action (Zero), + compressed (false) {} @@@ -138,15 -138,15 +138,15 @@@ - SparseMatrix::SparseMatrix (const Epetra_Map &input_row_map, - const Epetra_Map &input_col_map, + SparseMatrix::SparseMatrix (const Epetra_Map &input_row_map, + const Epetra_Map &input_col_map, const unsigned int n_max_entries_per_row) - : - column_space_map (new Epetra_Map (input_col_map)), - matrix (new Epetra_FECrsMatrix(Copy, input_row_map, - int(n_max_entries_per_row), false)), - last_action (Zero), - compressed (false) + : + column_space_map (new Epetra_Map (input_col_map)), + matrix (new Epetra_FECrsMatrix(Copy, input_row_map, + int(n_max_entries_per_row), false)), + last_action (Zero), + compressed (false) {} @@@ -366,10 -366,10 +366,10 @@@ void SparseMatrix::reinit (const Epetra_Map &input_row_map, const Epetra_Map &input_col_map, - const SparsityType &sparsity_pattern, + const SparsityType &sparsity_pattern, const bool exchange_data) { - // release memory before reallocation + // release memory before reallocation temp_vector.clear(); matrix.reset(); diff --cc deal.II/source/multigrid/mg_dof_handler.cc index 16f347783c,b2b0a91f52..351811a180 --- a/deal.II/source/multigrid/mg_dof_handler.cc +++ b/deal.II/source/multigrid/mg_dof_handler.cc @@@ -1100,7 -1102,8 +1102,8 @@@ void MGDoFHandler<1>::renumber_dofs (co template <> void MGDoFHandler<2>::renumber_dofs (const unsigned int level, - const std::vector &new_numbers) { - const std::vector &new_numbers) ++ const std::vector &new_numbers) + { Assert (new_numbers.size() == n_dofs(level), DoFHandler<2>::ExcRenumberingIncomplete()); @@@ -1155,7 -1158,8 +1158,8 @@@ template <> void MGDoFHandler<3>::renumber_dofs (const unsigned int level, - const std::vector &new_numbers) { - const std::vector &new_numbers) ++ const std::vector &new_numbers) + { Assert (new_numbers.size() == n_dofs(level), DoFHandler<3>::ExcRenumberingIncomplete()); diff --cc deal.II/source/multigrid/mg_tools.cc index 7d2e640eba,a6dfd675a0..0019cc798c --- a/deal.II/source/multigrid/mg_tools.cc +++ b/deal.II/source/multigrid/mg_tools.cc @@@ -550,8 -549,8 +550,8 @@@ namespace MGTool const unsigned int dofs_per_cell = dof.get_fe().dofs_per_cell; std::vector dofs_on_this_cell(dofs_per_cell); - typename MGDoFHandler::cell_iterator cell = dof.begin(level), - endc = dof.end(level); + typename DH::cell_iterator cell = dof.begin(level), - endc = dof.end(level); ++ endc = dof.end(level); for (; cell!=endc; ++cell) { cell->get_mg_dof_indices (dofs_on_this_cell); @@@ -940,12 -939,13 +940,12 @@@ template void - count_dofs_per_component (const MGDoFHandler &dof_handler, - std::vector > &result, - bool only_once, - std::vector target_component) + std::vector > &result, + bool only_once, + std::vector target_component) { - const FiniteElement& fe = dof_handler.get_fe(); + const FiniteElement &fe = dof_handler.get_fe(); const unsigned int n_components = fe.n_components(); const unsigned int nlevels = dof_handler.get_tria().n_levels(); @@@ -982,57 -984,57 +984,57 @@@ dofs_in_component (n_components, std::vector(dof_handler.n_dofs(l), false)); - std::vector component_select (n_components); - Threads::TaskGroup<> tasks; - for (unsigned int i=0; i &, - const ComponentMask &, - std::vector &) - = &DoFTools::template extract_level_dofs >; - - std::vector tmp(n_components, false); - tmp[i] = true; - component_select[i] = ComponentMask(tmp); - - tasks += Threads::new_task (fun_ptr, - l, dof_handler, - component_select[i], - dofs_in_component[i]); - } - tasks.join_all(); - - // next count what we got - unsigned int component = 0; - for (unsigned int b=0;b& base = fe.base_element(b); - // Dimension of base element - unsigned int d = base.n_components(); - - for (unsigned int m=0;m component_select (n_components); + Threads::TaskGroup<> tasks; + for (unsigned int i=0; i &, + const ComponentMask &, + std::vector &) - = &DoFTools::template extract_level_dofs; ++ = &DoFTools::template extract_level_dofs >; + + std::vector tmp(n_components, false); + tmp[i] = true; + component_select[i] = ComponentMask(tmp); + + tasks += Threads::new_task (fun_ptr, + l, dof_handler, + component_select[i], + dofs_in_component[i]); + } + tasks.join_all(); + + // next count what we got + unsigned int component = 0; + for (unsigned int b=0; b &base = fe.base_element(b); + // Dimension of base element + unsigned int d = base.n_components(); + + for (unsigned int m=0; m + template void count_dofs_per_block ( - const DH& dof_handler, - std::vector >& dofs_per_block, - const MGDoFHandler &dof_handler, ++ const DH &dof_handler, + std::vector > &dofs_per_block, std::vector target_block) { - const FiniteElement& fe = dof_handler.get_fe(); - const FiniteElement &fe = dof_handler.get_fe(); ++ const FiniteElement &fe = dof_handler.get_fe(); const unsigned int n_blocks = fe.n_blocks(); const unsigned int n_levels = dof_handler.get_tria().n_levels(); @@@ -1107,14 -1109,14 +1109,14 @@@ for (unsigned int i=0; i &, ++ const DH &, const BlockMask &, - std::vector&) + std::vector &) - = &DoFTools::template extract_level_dofs; + = &DoFTools::template extract_level_dofs; std::vector tmp(n_blocks, false); - tmp[i] = true; - block_select[i] = tmp; + tmp[i] = true; + block_select[i] = tmp; tasks += Threads::new_task (fun_ptr, l, dof_handler, block_select[i], @@@ -1261,87 -1264,88 +1263,87 @@@ "elements")); } - typename MGDoFHandler::face_iterator face = cell->face(face_no); - const types::boundary_id boundary_component = face->boundary_indicator(); - if (function_map.find(boundary_component) != function_map.end()) - // face is of the right component - { - // get indices, physical location and - // boundary values of dofs on this - // face - local_dofs.resize (fe.dofs_per_face); - face->get_mg_dof_indices (level, local_dofs); - if (fe_is_system) - { - // enter those dofs - // into the list that - // match the - // component - // signature. avoid - // the usual - // complication that - // we can't just use - // *_system_to_component_index - // for non-primitive - // FEs - for (unsigned int i=0; i::face_iterator face = cell->face(face_no); + const types::boundary_id boundary_component = face->boundary_indicator(); + if (function_map.find(boundary_component) != function_map.end()) + // face is of the right component + { + // get indices, physical location and + // boundary values of dofs on this + // face + local_dofs.resize (fe.dofs_per_face); + face->get_mg_dof_indices (level, local_dofs); + if (fe_is_system) + { + // enter those dofs + // into the list that + // match the + // component + // signature. avoid + // the usual + // complication that + // we can't just use + // *_system_to_component_index + // for non-primitive + // FEs + for (unsigned int i=0; i void + make_boundary_list( - const DoFHandler& dof, - const typename FunctionMap::type& function_map, - std::vector >& boundary_indices, - const std::vector& component_mask) - { - // if for whatever reason we were - // passed an empty map, return - // immediately ++ const DoFHandler &dof, ++ const typename FunctionMap::type &function_map, ++ std::vector > &boundary_indices, ++ const std::vector &component_mask) ++{ ++ // if for whatever reason we were ++ // passed an empty map, return ++ // immediately + if (function_map.size() == 0) + return; + const unsigned int n_levels = dof.get_tria().n_levels(); + + + + const unsigned int n_components = DoFTools::n_components(dof); + const bool fe_is_system = (n_components != 1); + + AssertDimension (boundary_indices.size(), n_levels); + + std::vector local_dofs; + local_dofs.reserve (DoFTools::max_dofs_per_face(dof)); + std::fill (local_dofs.begin (), local_dofs.end (), - DoFHandler::invalid_dof_index); ++ DoFHandler::invalid_dof_index); + - // First, deal with the simpler - // case when we have to identify - // all boundary dofs ++ // First, deal with the simpler ++ // case when we have to identify ++ // all boundary dofs + if (component_mask.size() == 0) + { + typename DoFHandler::cell_iterator - cell = dof.begin(), - endc = dof.end(); ++ cell = dof.begin(), ++ endc = dof.end(); + for (; cell!=endc; ++cell) - { - const FiniteElement &fe = cell->get_fe(); - const unsigned int level = cell->level(); - local_dofs.resize(fe.dofs_per_face); - - for (unsigned int face_no = 0; face_no < GeometryInfo::faces_per_cell; - ++face_no) - if (cell->at_boundary(face_no)) - { - const typename DoFHandler::face_iterator - face = cell->face(face_no); - const unsigned char bi = face->boundary_indicator(); - // Face is listed in - // boundary map - if (function_map.find(bi) != function_map.end()) - { - face->get_mg_dof_indices(level, local_dofs); - for (unsigned int i=0;i &fe = cell->get_fe(); ++ const unsigned int level = cell->level(); ++ local_dofs.resize(fe.dofs_per_face); ++ ++ for (unsigned int face_no = 0; face_no < GeometryInfo::faces_per_cell; ++ ++face_no) ++ if (cell->at_boundary(face_no)) ++ { ++ const typename DoFHandler::face_iterator ++ face = cell->face(face_no); ++ const unsigned char bi = face->boundary_indicator(); ++ // Face is listed in ++ // boundary map ++ if (function_map.find(bi) != function_map.end()) ++ { ++ face->get_mg_dof_indices(level, local_dofs); ++ for (unsigned int i=0; i 0, - ExcMessage("It's probably worthwhile to select at least one component.")); ++ ExcMessage("It's probably worthwhile to select at least one component.")); + + typename DoFHandler::cell_iterator - cell = dof.begin(), - endc = dof.end(); ++ cell = dof.begin(), ++ endc = dof.end(); + for (; cell!=endc; ++cell) - for (unsigned int face_no = 0; face_no < GeometryInfo::faces_per_cell; - ++face_no) - { - if (!(cell->at_boundary(face_no))) - continue; - - const FiniteElement &fe = cell->get_fe(); - const unsigned int level = cell->level(); - - // we can presently deal only with - // primitive elements for boundary - // values. this does not preclude - // us using non-primitive elements - // in components that we aren't - // interested in, however. make - // sure that all shape functions - // that are non-zero for the - // components we are interested in, - // are in fact primitive - for (unsigned int i=0; iget_fe().dofs_per_cell; ++i) - { ++ for (unsigned int face_no = 0; face_no < GeometryInfo::faces_per_cell; ++ ++face_no) ++ { ++ if (!(cell->at_boundary(face_no))) ++ continue; ++ ++ const FiniteElement &fe = cell->get_fe(); ++ const unsigned int level = cell->level(); ++ ++ // we can presently deal only with ++ // primitive elements for boundary ++ // values. this does not preclude ++ // us using non-primitive elements ++ // in components that we aren't ++ // interested in, however. make ++ // sure that all shape functions ++ // that are non-zero for the ++ // components we are interested in, ++ // are in fact primitive ++ for (unsigned int i=0; iget_fe().dofs_per_cell; ++i) ++ { + const ComponentMask &nonzero_component_array + = cell->get_fe().get_nonzero_components (i); - for (unsigned int c=0; cget_fe().is_primitive (i), - ExcMessage ("This function can only deal with requested boundary " - "values that correspond to primitive (scalar) base " - "elements")); - } - - typename DoFHandler::face_iterator face = cell->face(face_no); - const unsigned char boundary_component = face->boundary_indicator(); - if (function_map.find(boundary_component) != function_map.end()) - // face is of the right component - { - // get indices, physical location and - // boundary values of dofs on this - // face - local_dofs.resize (fe.dofs_per_face); - face->get_mg_dof_indices (level, local_dofs); - if (fe_is_system) - { - // enter those dofs - // into the list that - // match the - // component - // signature. avoid - // the usual - // complication that - // we can't just use - // *_system_to_component_index - // for non-primitive - // FEs - for (unsigned int i=0; iget_fe().is_primitive (i), ++ ExcMessage ("This function can only deal with requested boundary " ++ "values that correspond to primitive (scalar) base " ++ "elements")); ++ } ++ ++ typename DoFHandler::face_iterator face = cell->face(face_no); ++ const unsigned char boundary_component = face->boundary_indicator(); ++ if (function_map.find(boundary_component) != function_map.end()) ++ // face is of the right component ++ { ++ // get indices, physical location and ++ // boundary values of dofs on this ++ // face ++ local_dofs.resize (fe.dofs_per_face); ++ face->get_mg_dof_indices (level, local_dofs); ++ if (fe_is_system) ++ { ++ // enter those dofs ++ // into the list that ++ // match the ++ // component ++ // signature. avoid ++ // the usual ++ // complication that ++ // we can't just use ++ // *_system_to_component_index ++ // for non-primitive ++ // FEs ++ for (unsigned int i=0; i + void - make_boundary_list(const MGDoFHandler& dof, - const typename FunctionMap::type& function_map, - std::vector& boundary_indices, - const ComponentMask & component_mask) - { + make_boundary_list(const MGDoFHandler &dof, + const typename FunctionMap::type &function_map, + std::vector &boundary_indices, + const ComponentMask &component_mask) + { Assert (boundary_indices.size() == dof.get_tria().n_levels(), - ExcDimensionMismatch (boundary_indices.size(), - dof.get_tria().n_levels())); + ExcDimensionMismatch (boundary_indices.size(), + dof.get_tria().n_levels())); std::vector > - my_boundary_indices (dof.get_tria().n_levels()); + my_boundary_indices (dof.get_tria().n_levels()); make_boundary_list (dof, function_map, my_boundary_indices, component_mask); for (unsigned int i=0; i void extract_inner_interface_dofs (const MGDoFHandler &mg_dof_handler, - std::vector > &interface_dofs) - std::vector > &interface_dofs) ++ std::vector > &interface_dofs) { Assert (interface_dofs.size() == mg_dof_handler.get_tria().n_levels(), - ExcDimensionMismatch (interface_dofs.size(), - mg_dof_handler.get_tria().n_levels())); + ExcDimensionMismatch (interface_dofs.size(), + mg_dof_handler.get_tria().n_levels())); for (unsigned int l=0; l void extract_non_interface_dofs (const MGDoFHandler &mg_dof_handler, - std::vector > &non_interface_dofs) - std::vector > &non_interface_dofs) ++ std::vector > &non_interface_dofs) { Assert (non_interface_dofs.size() == mg_dof_handler.get_tria().n_levels(), - ExcDimensionMismatch (non_interface_dofs.size(), - mg_dof_handler.get_tria().n_levels())); + ExcDimensionMismatch (non_interface_dofs.size(), + mg_dof_handler.get_tria().n_levels())); const FiniteElement &fe = mg_dof_handler.get_fe(); @@@ -1736,15 -1530,15 +1738,15 @@@ template void extract_inner_interface_dofs (const MGDoFHandler &mg_dof_handler, - std::vector > &interface_dofs, - std::vector > &boundary_interface_dofs) - std::vector > &interface_dofs, - std::vector > &boundary_interface_dofs) ++ std::vector > &interface_dofs, ++ std::vector > &boundary_interface_dofs) { Assert (interface_dofs.size() == mg_dof_handler.get_tria().n_levels(), - ExcDimensionMismatch (interface_dofs.size(), - mg_dof_handler.get_tria().n_levels())); + ExcDimensionMismatch (interface_dofs.size(), + mg_dof_handler.get_tria().n_levels())); Assert (boundary_interface_dofs.size() == mg_dof_handler.get_tria().n_levels(), - ExcDimensionMismatch (boundary_interface_dofs.size(), - mg_dof_handler.get_tria().n_levels())); + ExcDimensionMismatch (boundary_interface_dofs.size(), + mg_dof_handler.get_tria().n_levels())); for (unsigned int l=0; l::faces_per_cell; ++face_nr) - { - const typename DoFHandler::face_iterator face = cell->face(face_nr); - if (!face->at_boundary()) - { - //interior face - const typename MGDoFHandler::cell_iterator - neighbor = cell->neighbor(face_nr); - - // Do refinement face - // from the coarse side - if (neighbor->level() < cell->level()) - { - for (unsigned int j=0; j::face_iterator face = cell->face(face_nr); + if (!face->at_boundary()) + { + //interior face + const typename MGDoFHandler::cell_iterator + neighbor = cell->neighbor(face_nr); + + // Do refinement face + // from the coarse side + if (neighbor->level() < cell->level()) + { + for (unsigned int j=0; j::faces_per_cell; ++face_nr) - if(cell->at_boundary(face_nr)) - for(unsigned int j=0; j::faces_per_cell; ++face_nr) + if (cell->at_boundary(face_nr)) + for (unsigned int j=0; jlevel(); + cell->get_mg_dof_indices (local_dof_indices); + - for(unsigned int i=0; i + void + extract_inner_interface_dofs (const DoFHandler &dof_handler, - std::vector > &interface_dofs, - std::vector > &boundary_interface_dofs) ++ std::vector > &interface_dofs, ++ std::vector > &boundary_interface_dofs) + { + Assert (interface_dofs.size() == dof_handler.get_tria().n_levels(), - ExcDimensionMismatch (interface_dofs.size(), - dof_handler.get_tria().n_levels())); ++ ExcDimensionMismatch (interface_dofs.size(), ++ dof_handler.get_tria().n_levels())); + Assert (boundary_interface_dofs.size() == dof_handler.get_tria().n_levels(), - ExcDimensionMismatch (boundary_interface_dofs.size(), - dof_handler.get_tria().n_levels())); ++ ExcDimensionMismatch (boundary_interface_dofs.size(), ++ dof_handler.get_tria().n_levels())); + + for (unsigned int l=0; l &fe = dof_handler.get_fe(); + + const unsigned int dofs_per_cell = fe.dofs_per_cell; + const unsigned int dofs_per_face = fe.dofs_per_face; + + std::vector local_dof_indices (dofs_per_cell); + std::vector face_dof_indices (dofs_per_face); + + std::vector cell_dofs(dofs_per_cell, false); + std::vector boundary_cell_dofs(dofs_per_cell, false); + + typename DoFHandler::cell_iterator cell = dof_handler.begin(), - endc = dof_handler.end(); ++ endc = dof_handler.end(); + + for (; cell!=endc; ++cell) + { + bool has_coarser_neighbor = false; + + std::fill (cell_dofs.begin(), cell_dofs.end(), false); + std::fill (boundary_cell_dofs.begin(), boundary_cell_dofs.end(), false); + + for (unsigned int face_nr=0; face_nr::faces_per_cell; ++face_nr) - { - const typename DoFHandler::face_iterator face = cell->face(face_nr); - if (!face->at_boundary()) - { - //interior face - const typename DoFHandler::cell_iterator - neighbor = cell->neighbor(face_nr); - - // Do refinement face - // from the coarse side - if (neighbor->level() < cell->level()) - { - for (unsigned int j=0; j::face_iterator face = cell->face(face_nr); ++ if (!face->at_boundary()) ++ { ++ //interior face ++ const typename DoFHandler::cell_iterator ++ neighbor = cell->neighbor(face_nr); ++ ++ // Do refinement face ++ // from the coarse side ++ if (neighbor->level() < cell->level()) ++ { ++ for (unsigned int j=0; j::faces_per_cell; ++face_nr) - if(cell->at_boundary(face_nr)) - for(unsigned int j=0; j::faces_per_cell; ++face_nr) ++ if (cell->at_boundary(face_nr)) ++ for (unsigned int j=0; jlevel(); diff --cc deal.II/source/multigrid/mg_transfer_prebuilt.cc index 601a010eb4,2b8437a028..10054a7a20 --- a/deal.II/source/multigrid/mg_transfer_prebuilt.cc +++ b/deal.II/source/multigrid/mg_transfer_prebuilt.cc @@@ -230,282 -230,40 +230,282 @@@ void MGTransferPrebuilt::build_ temp_copy_indices.resize (0); temp_copy_indices.resize (mg_dof.n_dofs(level), numbers::invalid_unsigned_int); - // Compute coarse level right hand side - // by restricting from fine level. + // Compute coarse level right hand side + // by restricting from fine level. for (; level_cell!=level_end; ++level_cell) - { - DoFAccessor >& global_cell = *level_cell; - // get the dof numbers of - // this cell for the global - // and the level-wise - // numbering - global_cell.get_dof_indices(global_dof_indices); - level_cell->get_mg_dof_indices (level_dof_indices); - - for (unsigned int i=0; i > &global_cell = *level_cell; + // get the dof numbers of + // this cell for the global + // and the level-wise + // numbering + global_cell.get_dof_indices(global_dof_indices); + level_cell->get_mg_dof_indices (level_dof_indices); + + for (unsigned int i=0; iat_refinement_edge(level,level_dof_indices[i])) - temp_copy_indices[level_dof_indices[i]] = global_dof_indices[i]; + if (!mg_constrained_dofs->at_refinement_edge(level,level_dof_indices[i])) + temp_copy_indices[level_dof_indices[i]] = global_dof_indices[i]; } else - temp_copy_indices[level_dof_indices[i]] = global_dof_indices[i]; + temp_copy_indices[level_dof_indices[i]] = global_dof_indices[i]; } - } - - // now all the active dofs got a valid entry, - // the other ones have an invalid entry. Count - // the invalid entries and then resize the - // copy_indices object. Then, insert the pairs - // of global index and level index into - // copy_indices. + } + + // now all the active dofs got a valid entry, ++ // the other ones have an invalid entry. Count ++ // the invalid entries and then resize the ++ // copy_indices object. Then, insert the pairs ++ // of global index and level index into ++ // copy_indices. + const unsigned int n_active_dofs = - std::count_if (temp_copy_indices.begin(), temp_copy_indices.end(), - std::bind2nd(std::not_equal_to(), - numbers::invalid_unsigned_int)); ++ std::count_if (temp_copy_indices.begin(), temp_copy_indices.end(), ++ std::bind2nd(std::not_equal_to(), ++ numbers::invalid_unsigned_int)); + copy_indices[level].resize (n_active_dofs); + unsigned int counter = 0; + for (unsigned int i=0; i (temp_copy_indices[i], i); ++ if (temp_copy_indices[i] != numbers::invalid_unsigned_int) ++ copy_indices[level][counter++] = ++ std::pair (temp_copy_indices[i], i); + Assert (counter == n_active_dofs, ExcInternalError()); + } +} + + + +template +template +void MGTransferPrebuilt::build_matrices ( + const DoFHandler &dof_handler) +{ + const unsigned int n_levels = dof_handler.get_tria().n_levels(); + const unsigned int dofs_per_cell = dof_handler.get_fe().dofs_per_cell; + + sizes.resize(n_levels); - for (unsigned int l=0;l (new SparsityPattern)); ++ (std_cxx1x::shared_ptr (new SparsityPattern)); + prolongation_matrices.push_back - (std_cxx1x::shared_ptr > (new SparseMatrix)); ++ (std_cxx1x::shared_ptr > (new SparseMatrix)); + } + - // two fields which will store the - // indices of the multigrid dofs - // for a cell and one of its children ++ // two fields which will store the ++ // indices of the multigrid dofs ++ // for a cell and one of its children + std::vector dof_indices_parent (dofs_per_cell); + std::vector dof_indices_child (dofs_per_cell); + - // for each level: first build the sparsity - // pattern of the matrices and then build the - // matrices themselves. note that we only - // need to take care of cells on the coarser - // level which have children ++ // for each level: first build the sparsity ++ // pattern of the matrices and then build the ++ // matrices themselves. note that we only ++ // need to take care of cells on the coarser ++ // level which have children + for (unsigned int level=0; levelreinit (sizes[level+1], - sizes[level], - dofs_per_cell+1); ++ sizes[level], ++ dofs_per_cell+1); + + for (typename DoFHandler::cell_iterator cell = dof_handler.begin(level); - cell != dof_handler.end(level); ++cell) - if (cell->has_children()) - { - cell->get_mg_dof_indices (dof_indices_parent); - - Assert(cell->n_children()==GeometryInfo::max_children_per_cell, - ExcNotImplemented()); - for (unsigned int child=0; childn_children(); ++child) - { - // set an alias to the - // prolongation matrix for - // this child - const FullMatrix &prolongation - = dof_handler.get_fe().get_prolongation_matrix (child, - cell->refinement_case()); - - Assert (prolongation.n() != 0, ExcNoProlongation()); - - cell->child(child)->get_mg_dof_indices (dof_indices_child); - - // now tag the entries in the - // matrix which will be used - // for this pair of parent/child - for (unsigned int i=0; iadd (dof_indices_child[i], - dof_indices_parent[j]); - } - } ++ cell != dof_handler.end(level); ++cell) ++ if (cell->has_children()) ++ { ++ cell->get_mg_dof_indices (dof_indices_parent); ++ ++ Assert(cell->n_children()==GeometryInfo::max_children_per_cell, ++ ExcNotImplemented()); ++ for (unsigned int child=0; childn_children(); ++child) ++ { ++ // set an alias to the ++ // prolongation matrix for ++ // this child ++ const FullMatrix &prolongation ++ = dof_handler.get_fe().get_prolongation_matrix (child, ++ cell->refinement_case()); ++ ++ Assert (prolongation.n() != 0, ExcNoProlongation()); ++ ++ cell->child(child)->get_mg_dof_indices (dof_indices_child); ++ ++ // now tag the entries in the ++ // matrix which will be used ++ // for this pair of parent/child ++ for (unsigned int i=0; iadd (dof_indices_child[i], ++ dof_indices_parent[j]); ++ } ++ } + + prolongation_sparsities[level]->compress (); + + prolongation_matrices[level]->reinit (*prolongation_sparsities[level]); + - // now actually build the matrices ++ // now actually build the matrices + for (typename DoFHandler::cell_iterator cell = dof_handler.begin(level); - cell != dof_handler.end(level); ++cell) - if (cell->has_children()) - { - cell->get_mg_dof_indices (dof_indices_parent); - - Assert(cell->n_children()==GeometryInfo::max_children_per_cell, - ExcNotImplemented()); - for (unsigned int child=0; childn_children(); ++child) - { - // set an alias to the - // prolongation matrix for - // this child - const FullMatrix &prolongation - = dof_handler.get_fe().get_prolongation_matrix (child, - cell->refinement_case()); - - cell->child(child)->get_mg_dof_indices (dof_indices_child); - - // now set the entries in the - // matrix - for (unsigned int i=0; iset (dof_indices_child[i], - dofs_per_cell, - &dof_indices_parent[0], - &prolongation(i,0), - true); - } - } ++ cell != dof_handler.end(level); ++cell) ++ if (cell->has_children()) ++ { ++ cell->get_mg_dof_indices (dof_indices_parent); ++ ++ Assert(cell->n_children()==GeometryInfo::max_children_per_cell, ++ ExcNotImplemented()); ++ for (unsigned int child=0; childn_children(); ++child) ++ { ++ // set an alias to the ++ // prolongation matrix for ++ // this child ++ const FullMatrix &prolongation ++ = dof_handler.get_fe().get_prolongation_matrix (child, ++ cell->refinement_case()); ++ ++ cell->child(child)->get_mg_dof_indices (dof_indices_child); ++ ++ // now set the entries in the ++ // matrix ++ for (unsigned int i=0; iset (dof_indices_child[i], ++ dofs_per_cell, ++ &dof_indices_parent[0], ++ &prolongation(i,0), ++ true); ++ } ++ } + } + + - // impose boundary conditions - // but only in the column of - // the prolongation matrix ++ // impose boundary conditions ++ // but only in the column of ++ // the prolongation matrix + if (mg_constrained_dofs != 0) - if (mg_constrained_dofs->set_boundary_values()) - { - std::vector constrain_indices; - for (int level=n_levels-2; level>=0; --level) - { - if (mg_constrained_dofs->get_boundary_indices()[level].size() == 0) - continue; - - // need to delete all the columns in the - // matrix that are on the boundary. to achive - // this, create an array as long as there are - // matrix columns, and find which columns we - // need to filter away. - constrain_indices.resize (0); - constrain_indices.resize (prolongation_matrices[level]->n(), 0); - std::set::const_iterator dof ++ if (mg_constrained_dofs->set_boundary_values()) ++ { ++ std::vector constrain_indices; ++ for (int level=n_levels-2; level>=0; --level) ++ { ++ if (mg_constrained_dofs->get_boundary_indices()[level].size() == 0) ++ continue; ++ ++ // need to delete all the columns in the ++ // matrix that are on the boundary. to achive ++ // this, create an array as long as there are ++ // matrix columns, and find which columns we ++ // need to filter away. ++ constrain_indices.resize (0); ++ constrain_indices.resize (prolongation_matrices[level]->n(), 0); ++ std::set::const_iterator dof + = mg_constrained_dofs->get_boundary_indices()[level].begin(), - endd = mg_constrained_dofs->get_boundary_indices()[level].end(); - for (; dof != endd; ++dof) - constrain_indices[*dof] = 1; - - const unsigned int n_dofs = prolongation_matrices[level]->m(); - for (unsigned int i=0; i::iterator - start_row = prolongation_matrices[level]->begin(i), - end_row = prolongation_matrices[level]->end(i); - for(; start_row != end_row; ++start_row) - { - if (constrain_indices[start_row->column()] == 1) - start_row->value() = 0; - } - } - } - } - - // to find the indices that describe the - // relation between global dofs and local - // numbering on the individual level, first - // create a temp vector where the ith level - // entry contains the respective global - // entry. this gives a neat way to find those - // indices. in a second step, actually build - // the std::vector > that - // only contains the active dofs on the - // levels. ++ endd = mg_constrained_dofs->get_boundary_indices()[level].end(); ++ for (; dof != endd; ++dof) ++ constrain_indices[*dof] = 1; ++ ++ const unsigned int n_dofs = prolongation_matrices[level]->m(); ++ for (unsigned int i=0; i::iterator ++ start_row = prolongation_matrices[level]->begin(i), ++ end_row = prolongation_matrices[level]->end(i); ++ for (; start_row != end_row; ++start_row) ++ { ++ if (constrain_indices[start_row->column()] == 1) ++ start_row->value() = 0; ++ } ++ } ++ } ++ } ++ ++ // to find the indices that describe the ++ // relation between global dofs and local ++ // numbering on the individual level, first ++ // create a temp vector where the ith level ++ // entry contains the respective global ++ // entry. this gives a neat way to find those ++ // indices. in a second step, actually build ++ // the std::vector > that ++ // only contains the active dofs on the ++ // levels. + + copy_indices.resize(n_levels); + std::vector temp_copy_indices; + std::vector global_dof_indices (dofs_per_cell); + std::vector level_dof_indices (dofs_per_cell); + for (int level=dof_handler.get_tria().n_levels()-1; level>=0; --level) + { + copy_indices[level].clear(); + typename DoFHandler::active_cell_iterator - level_cell = dof_handler.begin_active(level); ++ level_cell = dof_handler.begin_active(level); + const typename DoFHandler::active_cell_iterator - level_end = dof_handler.end_active(level); ++ level_end = dof_handler.end_active(level); + + temp_copy_indices.resize (0); + temp_copy_indices.resize (dof_handler.n_dofs(level), numbers::invalid_unsigned_int); + - // Compute coarse level right hand side - // by restricting from fine level. ++ // Compute coarse level right hand side ++ // by restricting from fine level. + for (; level_cell!=level_end; ++level_cell) - { - DoFAccessor >& global_cell = *level_cell; - // get the dof numbers of - // this cell for the global - // and the level-wise - // numbering - global_cell.get_dof_indices(global_dof_indices); - level_cell->get_mg_dof_indices (level_dof_indices); - - for (unsigned int i=0; i > &global_cell = *level_cell; ++ // get the dof numbers of ++ // this cell for the global ++ // and the level-wise ++ // numbering ++ global_cell.get_dof_indices(global_dof_indices); ++ level_cell->get_mg_dof_indices (level_dof_indices); ++ ++ for (unsigned int i=0; iat_refinement_edge(level,level_dof_indices[i])) - temp_copy_indices[level_dof_indices[i]] = global_dof_indices[i]; ++ if (mg_constrained_dofs != 0) ++ { ++ if (!mg_constrained_dofs->at_refinement_edge(level,level_dof_indices[i])) ++ temp_copy_indices[level_dof_indices[i]] = global_dof_indices[i]; ++ } ++ else ++ temp_copy_indices[level_dof_indices[i]] = global_dof_indices[i]; + } - else - temp_copy_indices[level_dof_indices[i]] = global_dof_indices[i]; - } - } - - // now all the active dofs got a valid entry, - // the other ones have an invalid entry. Count - // the invalid entries and then resize the - // copy_indices object. Then, insert the pairs - // of global index and level index into - // copy_indices. ++ } ++ ++ // now all the active dofs got a valid entry, + // the other ones have an invalid entry. Count + // the invalid entries and then resize the + // copy_indices object. Then, insert the pairs + // of global index and level index into + // copy_indices. const unsigned int n_active_dofs = - std::count_if (temp_copy_indices.begin(), temp_copy_indices.end(), - std::bind2nd(std::not_equal_to(), - numbers::invalid_unsigned_int)); + std::count_if (temp_copy_indices.begin(), temp_copy_indices.end(), + std::bind2nd(std::not_equal_to(), + numbers::invalid_unsigned_int)); copy_indices[level].resize (n_active_dofs); unsigned int counter = 0; for (unsigned int i=0; i template void KellyErrorEstimator<1,spacedim>:: -estimate (const Mapping<1,spacedim> & /*mapping*/, - const DH & /*dof_handler*/, +estimate (const Mapping<1,spacedim> &/*mapping*/, + const DH &/*dof_handler*/, - const hp::QCollection<0> &, + const hp::QCollection<0> &, - const typename FunctionMap::type & /*neumann_bc*/, - const std::vector & /*solutions*/, - std::vector*> & /*errors*/, - const ComponentMask & /*component_mask_*/, - const Function * /*coefficient*/, + const typename FunctionMap::type &/*neumann_bc*/, + const std::vector &/*solutions*/, + std::vector*> &/*errors*/, + const ComponentMask &/*component_mask_*/, + const Function */*coefficient*/, const unsigned int, const types::subdomain_id /*subdomain_id*/, const types::material_id /*material_id*/) diff --cc deal.II/source/numerics/matrix_tools.cc index 94a3a8f7b3,9fe2a393d1..24bb447f41 --- a/deal.II/source/numerics/matrix_tools.cc +++ b/deal.II/source/numerics/matrix_tools.cc @@@ -1123,19 -1123,19 +1123,19 @@@ namespace MatrixCreato template void - create_boundary_mass_matrix (const Mapping &mapping, + create_boundary_mass_matrix (const Mapping &mapping, const DoFHandler &dof, const Quadrature &q, - SparseMatrix &matrix, - const typename FunctionMap::type &boundary_functions, + SparseMatrix &matrix, + const typename FunctionMap::type &boundary_functions, Vector &rhs_vector, std::vector &dof_to_boundary_mapping, - const Function * const coefficient, + const Function *const coefficient, std::vector component_mapping) { - // what would that be in 1d? the - // identity matrix on the boundary - // dofs? + // what would that be in 1d? the + // identity matrix on the boundary + // dofs? if (dim == 1) { Assert (false, ExcNotImplemented());