cell->get_dof_indices (local_dof_indices);
hanging_node_constraints
.distribute_local_to_global(cell_matrix, cell_rhs,
- local_dof_indices,
- system_matrix, system_rhs);
+ local_dof_indices,
+ system_matrix, system_rhs);
}
// Now compress the vector and the system matrix:
cell->get_dof_indices (local_dof_indices);
hanging_node_constraints
- .distribute_local_to_global (cell_matrix, cell_rhs,
+ .distribute_local_to_global (cell_matrix, cell_rhs,
local_dof_indices,
- system_matrix, system_rhs);
+ system_matrix, system_rhs);
}
// Now compress the vector and the system matrix:
system_matrix.compress(VectorOperation::add);
system_rhs.compress(VectorOperation::add);
-
+
// The last step is to again fix up boundary values, just as we already
// did in previous programs. A slight complication is that the
// <code>apply_boundary_values</code> function wants to have a solution
void solve_time_step();
void output_results() const;
void refine_mesh (const unsigned int min_grid_level,
- const unsigned int max_grid_level);
+ const unsigned int max_grid_level);
Triangulation<dim> triangulation;
FE_Q<dim> fe;
dof_handler.distribute_dofs(fe);
std::cout << std::endl
- << "==========================================="
- << std::endl
- << "Number of active cells: " << triangulation.n_active_cells()
+ << "==========================================="
<< std::endl
- << "Number of degrees of freedom: " << dof_handler.n_dofs()
+ << "Number of active cells: " << triangulation.n_active_cells()
<< std::endl
- << std::endl;
+ << "Number of degrees of freedom: " << dof_handler.n_dofs()
+ << std::endl
+ << std::endl;
constraints.clear ();
DoFTools::make_hanging_node_constraints (dof_handler,
MatrixCreator::create_mass_matrix(dof_handler,
QGauss<dim>(fe.degree+1),
mass_matrix,
- (const Function<dim>*)0,
+ (const Function<dim> *)0,
constraints);
MatrixCreator::create_laplace_matrix(dof_handler,
QGauss<dim>(fe.degree+1),
laplace_matrix,
- (const Function<dim>*)0,
+ (const Function<dim> *)0,
constraints);
solution.reinit(dof_handler.n_dofs());
// too high a mesh level.
template <int dim>
void HeatEquation<dim>::refine_mesh (const unsigned int min_grid_level,
- const unsigned int max_grid_level)
+ const unsigned int max_grid_level)
{
Vector<float> estimated_error_per_cell (triangulation.n_active_cells());
cell != triangulation.end(); ++cell)
cell->clear_refine_flag ();
for (typename Triangulation<dim>::active_cell_iterator
- cell = triangulation.begin_active(min_grid_level);
- cell != triangulation.end_active(min_grid_level); ++cell)
+ cell = triangulation.begin_active(min_grid_level);
+ cell != triangulation.end_active(min_grid_level); ++cell)
cell->clear_coarsen_flag ();
std::cout << "Time step " << timestep_number << " at t=" << time
<< std::endl;
- tmp.reinit (solution.size());
- forcing_terms.reinit (solution.size());
+ tmp.reinit (solution.size());
+ forcing_terms.reinit (solution.size());
mass_matrix.vmult(system_rhs, old_solution);
(pre_refinement_step < n_adaptive_pre_refinement_steps))
{
refine_mesh (initial_global_refinement,
- initial_global_refinement + n_adaptive_pre_refinement_steps);
+ initial_global_refinement + n_adaptive_pre_refinement_steps);
++pre_refinement_step;
- std::cout << std::endl;
+ std::cout << std::endl;
goto start_time_iteration;
}
else if ((timestep_number > 0) && (timestep_number % 5 == 0))
refine_mesh (initial_global_refinement,
- initial_global_refinement + n_adaptive_pre_refinement_steps);
+ initial_global_refinement + n_adaptive_pre_refinement_steps);
old_solution = solution;
}
typedef PreconditionChebyshev<LevelMatrixType,Vector<double> > SMOOTHER;
MGSmootherPrecondition<LevelMatrixType, SMOOTHER, Vector<double> >
- mg_smoother;
+ mg_smoother;
// Then, we initialize the smoother with our level matrices and the
// mandatory additional data for the Chebyshev smoother. We use quite a
{
public:
void cell(MeshWorker::DoFInfo<dim> &dinfo,
- typename MeshWorker::IntegrationInfo<dim> &info) const;
+ typename MeshWorker::IntegrationInfo<dim> &info) const;
void boundary(MeshWorker::DoFInfo<dim> &dinfo,
- typename MeshWorker::IntegrationInfo<dim> &info) const;
+ typename MeshWorker::IntegrationInfo<dim> &info) const;
void face(MeshWorker::DoFInfo<dim> &dinfo1,
- MeshWorker::DoFInfo<dim> &dinfo2,
- typename MeshWorker::IntegrationInfo<dim> &info1,
- typename MeshWorker::IntegrationInfo<dim> &info2) const;
+ MeshWorker::DoFInfo<dim> &dinfo2,
+ typename MeshWorker::IntegrationInfo<dim> &info1,
+ typename MeshWorker::IntegrationInfo<dim> &info2) const;
};
void cell(MeshWorker::DoFInfo<dim> &dinfo, typename MeshWorker::IntegrationInfo<dim> &info) const;
void boundary(MeshWorker::DoFInfo<dim> &dinfo, typename MeshWorker::IntegrationInfo<dim> &info) const;
void face(MeshWorker::DoFInfo<dim> &dinfo1,
- MeshWorker::DoFInfo<dim> &dinfo2,
- typename MeshWorker::IntegrationInfo<dim> &info1,
- typename MeshWorker::IntegrationInfo<dim> &info2) const;
+ MeshWorker::DoFInfo<dim> &dinfo2,
+ typename MeshWorker::IntegrationInfo<dim> &info1,
+ typename MeshWorker::IntegrationInfo<dim> &info2) const;
};
void cell(MeshWorker::DoFInfo<dim> &dinfo, typename MeshWorker::IntegrationInfo<dim> &info) const;
void boundary(MeshWorker::DoFInfo<dim> &dinfo, typename MeshWorker::IntegrationInfo<dim> &info) const;
void face(MeshWorker::DoFInfo<dim> &dinfo1,
- MeshWorker::DoFInfo<dim> &dinfo2,
- typename MeshWorker::IntegrationInfo<dim> &info1,
- typename MeshWorker::IntegrationInfo<dim> &info2) const;
+ MeshWorker::DoFInfo<dim> &dinfo2,
+ typename MeshWorker::IntegrationInfo<dim> &info1,
+ typename MeshWorker::IntegrationInfo<dim> &info2) const;
};
void cell(MeshWorker::DoFInfo<dim> &dinfo, typename MeshWorker::IntegrationInfo<dim> &info) const;
void boundary(MeshWorker::DoFInfo<dim> &dinfo, typename MeshWorker::IntegrationInfo<dim> &info) const;
void face(MeshWorker::DoFInfo<dim> &dinfo1,
- MeshWorker::DoFInfo<dim> &dinfo2,
- typename MeshWorker::IntegrationInfo<dim> &info1,
- typename MeshWorker::IntegrationInfo<dim> &info2) const;
+ MeshWorker::DoFInfo<dim> &dinfo2,
+ typename MeshWorker::IntegrationInfo<dim> &info1,
+ typename MeshWorker::IntegrationInfo<dim> &info2) const;
};
// Here we have the integration on cells. There is currently no good
MeshWorker::Assembler::MGMatrixSimple<SparseMatrix<double> > assembler;
assembler.initialize(mg_matrix);
assembler.initialize_fluxes(mg_matrix_dg_up, mg_matrix_dg_down);
-
+
MatrixIntegrator<dim> integrator;
// Here is the other difference to the previous function: we run over all
// cells, not only the active ones. And we use <tt>mg_dof_handler</tt>,
computing_timer (pcout,
TimerOutput::summary,
TimerOutput::wall_times)
- {}
+ {}
TimerOutput::Scope t(computing_timer, "output");
output_results (cycle);
}
-
+
pcout << std::endl;
computing_timer.print_summary ();
computing_timer.reset ();
}
-
+
}
}
template <int dim>
class Input
{
- public:
- Input (const char* _name) :
+ public:
+ Input (const char *_name) :
name (_name),
mpi_communicator (MPI_COMM_WORLD),
pcout (std::cout,
- (Utilities::MPI::this_mpi_process(mpi_communicator) == 0)),
+ (Utilities::MPI::this_mpi_process(mpi_communicator) == 0)),
obstacle_data (0),
hx (0),
hy (0),
nx (0),
ny (0)
- {read_obstacle (name);}
+ {
+ read_obstacle (name);
+ }
- double hv (int i, int j);
+ double hv (int i, int j);
- double obstacle_function (double x,double y);
+ double obstacle_function (double x,double y);
- void read_obstacle (const char* name);
+ void read_obstacle (const char *name);
- private:
- const char* name;
- MPI_Comm mpi_communicator;
- ConditionalOStream pcout;
- std::vector<double> obstacle_data;
- double hx, hy;
- int nx, ny;
+ private:
+ const char *name;
+ MPI_Comm mpi_communicator;
+ ConditionalOStream pcout;
+ std::vector<double> obstacle_data;
+ double hx, hy;
+ int nx, ny;
};
// This function is used in obstacle_function ()
// obstacle datas and stores them in the std::vector
// obstacle_data. It will be used only in run ().
template <int dim>
- void Input<dim>::read_obstacle (const char* name)
+ void Input<dim>::read_obstacle (const char *name)
{
std::ifstream f(name);
for (int k=0; k<nx*ny; k++)
{
- double val;
- f >> val;
- obstacle_data.push_back(val);
+ double val;
+ f >> val;
+ obstacle_data.push_back(val);
}
hx = 1.0/(nx - 1);
MPI_Comm _mpi_communicator,
ConditionalOStream _pcout);
- void plast_linear_hardening (SymmetricTensor<4,dim> &stress_strain_tensor,
+ void plast_linear_hardening (SymmetricTensor<4,dim> &stress_strain_tensor,
const SymmetricTensor<2,dim> &strain_tensor,
- unsigned int &elast_points,
- unsigned int &plast_points,
- double &yield);
- void linearized_plast_linear_hardening (SymmetricTensor<4,dim> &stress_strain_tensor_linearized,
- SymmetricTensor<4,dim> &stress_strain_tensor,
+ unsigned int &elast_points,
+ unsigned int &plast_points,
+ double &yield);
+ void linearized_plast_linear_hardening (SymmetricTensor<4,dim> &stress_strain_tensor_linearized,
+ SymmetricTensor<4,dim> &stress_strain_tensor,
const SymmetricTensor<2,dim> &strain_tensor);
inline SymmetricTensor<2,dim> get_strain (const FEValues<dim> &fe_values,
const unsigned int shape_func,
const unsigned int q_point) const;
- void set_sigma_0 (double sigma_hlp) {sigma_0 = sigma_hlp;}
+ void set_sigma_0 (double sigma_hlp)
+ {
+ sigma_0 = sigma_hlp;
+ }
private:
SymmetricTensor<4,dim> stress_strain_tensor_mu;
// Also we sum up the elastic and the plastic quadrature
// points.
template <int dim>
- void ConstitutiveLaw<dim>::plast_linear_hardening (SymmetricTensor<4,dim> &stress_strain_tensor,
+ void ConstitutiveLaw<dim>::plast_linear_hardening (SymmetricTensor<4,dim> &stress_strain_tensor,
const SymmetricTensor<2,dim> &strain_tensor,
- unsigned int &elast_points,
- unsigned int &plast_points,
- double &yield)
+ unsigned int &elast_points,
+ unsigned int &plast_points,
+ double &yield)
{
if (dim == 3)
{
// This function returns the linearized stress strain tensor.
// It contains the derivative of the nonlinear constitutive law.
template <int dim>
- void ConstitutiveLaw<dim>::linearized_plast_linear_hardening (SymmetricTensor<4,dim> &stress_strain_tensor_linearized,
- SymmetricTensor<4,dim> &stress_strain_tensor,
- const SymmetricTensor<2,dim> &strain_tensor)
+ void ConstitutiveLaw<dim>::linearized_plast_linear_hardening (SymmetricTensor<4,dim> &stress_strain_tensor_linearized,
+ SymmetricTensor<4,dim> &stress_strain_tensor,
+ const SymmetricTensor<2,dim> &strain_tensor)
{
if (dim == 3)
{
Function<dim>(dim),
input_obstacle_copy(_input),
use_read_obstacle(_use_read_obstacle)
- {}
+ {}
virtual double value (const Point<dim> &p,
const unsigned int component = 0) const;
private:
std_cxx1x::shared_ptr<Input<dim> > const &input_obstacle_copy;
- bool use_read_obstacle;
+ bool use_read_obstacle;
};
template <int dim>
return_value = p(1);
if (component == 2)
{
- if (use_read_obstacle)
- return_value = 1.999 - input_obstacle_copy->obstacle_function (p(0), p(1));
- else
- return_value = -std::sqrt (0.36 - (p(0)-0.5)*(p(0)-0.5) - (p(1)-0.5)*(p(1)-0.5)) + 1.59;
+ if (use_read_obstacle)
+ return_value = 1.999 - input_obstacle_copy->obstacle_function (p(0), p(1));
+ else
+ return_value = -std::sqrt (0.36 - (p(0)-0.5)*(p(0)-0.5) - (p(1)-0.5)*(p(1)-0.5)) + 1.59;
}
return return_value;
}
computing_timer.exit_section("Setup: distribute DoFs");
}
- // Setup of the hanging nodes and the Dirichlet constraints.
+ // Setup of the hanging nodes and the Dirichlet constraints.
{
constraints_hanging_nodes.clear ();
constraints_hanging_nodes.reinit (locally_relevant_dofs);
dirichlet_constraints ();
}
- // Initialization for matrices and vectors.
+ // Initialization for matrices and vectors.
{
solution.reinit (locally_relevant_dofs, mpi_communicator);
system_rhs_newton.reinit (locally_owned_dofs, mpi_communicator);
active_set.set_size (locally_relevant_dofs.size ());
}
- // Here we setup sparsity pattern.
+ // Here we setup sparsity pattern.
{
computing_timer.enter_section("Setup: matrix");
TrilinosWrappers::SparsityPattern sp (locally_owned_dofs,
system_matrix_newton.reinit (sp);
- // we are going to reuse the system
- // matrix for assembling the diagonal
- // of the mass matrix so that we do not
- // need to allocate two sparse matrices
- // at the same time:
- TrilinosWrappers::SparseMatrix & mass_matrix = system_matrix_newton;
+ // we are going to reuse the system
+ // matrix for assembling the diagonal
+ // of the mass matrix so that we do not
+ // need to allocate two sparse matrices
+ // at the same time:
+ TrilinosWrappers::SparseMatrix &mass_matrix = system_matrix_newton;
assemble_mass_matrix_diagonal (mass_matrix);
const unsigned int
start = (system_rhs_newton.local_range().first),
for (unsigned int j=start; j<end; j++)
diag_mass_matrix_vector (j) = mass_matrix.diag_element (j);
number_iterations = 0;
-
+
diag_mass_matrix_vector.compress (VectorOperation::insert);
- // remove the mass matrix entries from the matrix:
+ // remove the mass matrix entries from the matrix:
mass_matrix = 0;
computing_timer.exit_section("Setup: matrix");
template <int dim>
void PlasticityContactProblem<dim>::assemble_mass_matrix_diagonal (TrilinosWrappers::SparseMatrix &mass_matrix)
{
- QTrapez<dim-1> face_quadrature_formula;
+ QTrapez<dim-1> face_quadrature_formula;
FEFaceValues<dim> fe_values_face (fe, face_quadrature_formula,
update_values |
FullMatrix<double> cell_matrix (dofs_per_cell, dofs_per_cell);
Tensor<1,dim,double> ones (dim);
for (unsigned i=0; i<dim; i++)
- ones[i] = 1.0;
+ ones[i] = 1.0;
std::vector<types::global_dof_index> local_dof_indices (dofs_per_cell);
for (unsigned int q_point=0; q_point<n_face_q_points; ++q_point)
for (unsigned int i=0; i<dofs_per_cell; ++i)
cell_matrix(i,i) += (fe_values_face[displacement].value (i, q_point) *
- ones *
+ ones *
fe_values_face.JxW (q_point));
cell->get_dof_indices (local_dof_indices);
SolverControl solver_control (system_matrix_newton.m(), solver_tolerance);
SolverBicgstab<TrilinosWrappers::MPI::Vector>
solver(solver_control, mem,
- SolverBicgstab<TrilinosWrappers::MPI::Vector>::
- AdditionalData(false, 1.e-10));
+ SolverBicgstab<TrilinosWrappers::MPI::Vector>::
+ AdditionalData(false, 1.e-10));
solver.solve(system_matrix_newton, distributed_solution, system_rhs_newton, preconditioner_u);
computing_timer.exit_section("Residual and lambda");
pcout << " Residual of the non-contact part of the system: " << resid
- << std::endl
- << " with a damping parameter alpha = " << a
- << std::endl;
+ << std::endl
+ << " with a damping parameter alpha = " << a
+ << std::endl;
// The previous iteration of step 0 is the solution of an elastic problem.
// So a linear combination of a plastic and an elastic solution makes no sense
int is_my_set_changed = (active_set == active_set_old)?0:1;
int num_changed = Utilities::MPI::sum(is_my_set_changed, MPI_COMM_WORLD);
if (num_changed==0 && resid < 1e-8)
- break;
+ break;
active_set_old = active_set;
}
template <int dim>
void PlasticityContactProblem<dim>::refine_grid ()
{
- Vector<float> estimated_error_per_cell (triangulation.n_active_cells());
+ Vector<float> estimated_error_per_cell (triangulation.n_active_cells());
KellyErrorEstimator<dim>::estimate (dof_handler,
QGauss<dim-1>(3),
typename FunctionMap<dim>::type(),
template <int dim>
void PlasticityContactProblem<dim>::run ()
{
- use_read_obstacle = false;
- if (use_read_obstacle)
- {
- pcout << "Read the obstacle from a file." << std::endl;
- input_obstacle.reset (new Input<dim>("obstacle_file.pbm"));
- pcout << "Obstacle is available now." << std::endl;
- }
+ use_read_obstacle = false;
+ if (use_read_obstacle)
+ {
+ pcout << "Read the obstacle from a file." << std::endl;
+ input_obstacle.reset (new Input<dim>("obstacle_file.pbm"));
+ pcout << "Obstacle is available now." << std::endl;
+ }
const unsigned int n_cycles = 6;
for (cycle=0; cycle<n_cycles; ++cycle)
}
else
{
- computing_timer.enter_section("Setup: refine mesh");
- soltrans.reset (new parallel::distributed::SolutionTransfer<dim,TrilinosWrappers::MPI::Vector>(dof_handler));
- refine_grid ();
- computing_timer.exit_section("Setup: refine mesh");
+ computing_timer.enter_section("Setup: refine mesh");
+ soltrans.reset (new parallel::distributed::SolutionTransfer<dim,TrilinosWrappers::MPI::Vector>(dof_handler));
+ refine_grid ();
+ computing_timer.exit_section("Setup: refine mesh");
}
setup_system ();
{
TrilinosWrappers::MPI::Vector distributed_solution (system_rhs_newton);
distributed_solution = solution;
- soltrans->interpolate(distributed_solution);
- solution = distributed_solution;
+ soltrans->interpolate(distributed_solution);
+ solution = distributed_solution;
}
computing_timer.exit_section("Setup");
int _n_refinements_global = 3;
if (argc == 2)
- _n_refinements_global = atoi(argv[1]);
+ _n_refinements_global = atoi(argv[1]);
PlasticityContactProblem<3> laplace_problem_3d (_n_refinements_global);
laplace_problem_3d.run ();
"solution-" + Utilities::int_to_string (timestep_number, 3);
std::ofstream output ((filename +
- "." + Utilities::int_to_string (Utilities::MPI::
- this_mpi_process(MPI_COMM_WORLD),4) + ".vtu").c_str());
+ "." + Utilities::int_to_string (Utilities::MPI::
+ this_mpi_process(MPI_COMM_WORLD),4) + ".vtu").c_str());
data_out.write_vtu (output);
if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0)
// format that can easily be visualized in the same way as was done in step-1.
template<int dim>
void mesh_info(const Triangulation<dim> &tria,
- const std::string &filename)
+ const std::string &filename)
{
std::cout << "Mesh info:" << std::endl
<< " dimension: " << dim << std::endl
GridOut grid_out;
grid_out.write_eps (tria, out);
std::cout << " written to " << filename
- << std::endl
- << std::endl;
+ << std::endl
+ << std::endl;
}
// @sect3{Main routines}
repetitions[1]=2;
GridGenerator::subdivided_hyper_rectangle (tria2, repetitions,
Point<2>(1.0,-1.0),
- Point<2>(4.0,1.0));
+ Point<2>(4.0,1.0));
Triangulation<2> triangulation;
GridGenerator::merge_triangulations (tria1, tria2, triangulation);
Point<2> grid_5_transform (const Point<2> &in)
{
return Point<2>(in(0),
- in(1) + std::sin(in(0)/5.0*3.14159));
+ in(1) + std::sin(in(0)/5.0*3.14159));
}
repetitions[1] = 2;
GridGenerator::subdivided_hyper_rectangle (tria, repetitions,
Point<2>(0.0,0.0),
- Point<2>(10.0,1.0));
+ Point<2>(10.0,1.0));
GridTools::transform(&grid_5_transform, tria);
mesh_info(tria, "grid-5.eps");
// vertices.
struct Grid6Func
{
- double trans(const double y) const
- {
- return std::tanh(2*y)/tanh(2);
- }
+ double trans(const double y) const
+ {
+ return std::tanh(2*y)/tanh(2);
+ }
- Point<2> operator() (const Point<2> & in) const
- {
- return Point<2> (in(0),
- trans(in(1)));
- }
+ Point<2> operator() (const Point<2> &in) const
+ {
+ return Point<2> (in(0),
+ trans(in(1)));
+ }
};
repetitions[0] = repetitions[1] = 40;
GridGenerator::subdivided_hyper_rectangle (tria, repetitions,
Point<2>(0.0,0.0),
- Point<2>(1.0,1.0));
+ Point<2>(1.0,1.0));
GridTools::transform(Grid6Func(), tria);
mesh_info(tria, "grid-6.eps");
repetitions[0] = repetitions[1] = 16;
GridGenerator::subdivided_hyper_rectangle (tria, repetitions,
Point<2>(0.0,0.0),
- Point<2>(1.0,1.0));
+ Point<2>(1.0,1.0));
GridTools::distort_random (0.3, tria, true);
mesh_info(tria, "grid-7.eps");
deallog << std::endl;
DoFTools::extract_locally_relevant_dofs (mg_dof_handler,
- locally_relevant_set);
+ locally_relevant_set);
//solution.reinit (mg_dof_handler.n_dofs());
MGTools::make_sparsity_pattern(mg_dof_handler, csp, level);
mg_matrices[level].reinit(mg_dof_handler.locally_owned_mg_dofs(level),
- mg_dof_handler.locally_owned_mg_dofs(level),
- csp,
- MPI_COMM_WORLD, true);
+ mg_dof_handler.locally_owned_mg_dofs(level),
+ csp,
+ MPI_COMM_WORLD, true);
mg_interface_matrices[level].reinit(mg_dof_handler.locally_owned_mg_dofs(level),
- mg_dof_handler.locally_owned_mg_dofs(level),
- csp,
- MPI_COMM_WORLD, true);
+ mg_dof_handler.locally_owned_mg_dofs(level),
+ csp,
+ MPI_COMM_WORLD, true);
}
}
mg_interface_matrices[cell->level()]);
}
- for (unsigned int i=0;i<triangulation.n_global_levels();++i)
+ for (unsigned int i=0; i<triangulation.n_global_levels(); ++i)
{
mg_matrices[i].compress(VectorOperation::add);
mg_interface_matrices[i].compress(VectorOperation::add);
// argument.
mg_transfer.build_matrices(mg_dof_handler);
- matrix_t & coarse_matrix = mg_matrices[0];
+ matrix_t &coarse_matrix = mg_matrices[0];
//coarse_matrix.copy_from (mg_matrices[0]);
//MGCoarseGridHouseholder<double,vector_t> coarse_grid_solver;
//coarse_grid_solver.initialize (coarse_matrix);
if (false)
{
- // code to optionally compare to Trilinos ML
- TrilinosWrappers::PreconditionAMG prec;
-
- TrilinosWrappers::PreconditionAMG::AdditionalData Amg_data;
- // Amg_data.constant_modes = constant_modes;
- Amg_data.elliptic = true;
- Amg_data.higher_order_elements = true;
- Amg_data.smoother_sweeps = 2;
- Amg_data.aggregation_threshold = 0.02;
- // Amg_data.symmetric = true;
-
- prec.initialize (system_matrix,
- Amg_data);
- cg.solve (system_matrix, solution, system_rhs,
- prec);
+ // code to optionally compare to Trilinos ML
+ TrilinosWrappers::PreconditionAMG prec;
+
+ TrilinosWrappers::PreconditionAMG::AdditionalData Amg_data;
+ // Amg_data.constant_modes = constant_modes;
+ Amg_data.elliptic = true;
+ Amg_data.higher_order_elements = true;
+ Amg_data.smoother_sweeps = 2;
+ Amg_data.aggregation_threshold = 0.02;
+ // Amg_data.symmetric = true;
+
+ prec.initialize (system_matrix,
+ Amg_data);
+ cg.solve (system_matrix, solution, system_rhs,
+ prec);
}
else
{
- cg.solve (system_matrix, solution, system_rhs,
- preconditioner);
+ cg.solve (system_matrix, solution, system_rhs,
+ preconditioner);
}
-
+
constraints.distribute (solution);
}
TrilinosWrappers::MPI::Vector temp_solution;
temp_solution.reinit(locally_relevant_set, MPI_COMM_WORLD);
temp_solution = solution;
-
+
KellyErrorEstimator<dim>::estimate (static_cast<DoFHandler<dim>&>(mg_dof_handler),
QGauss<dim-1>(3),
typename FunctionMap<dim>::type(),
temp_solution,
estimated_error_per_cell);
parallel::distributed::GridRefinement::
- refine_and_coarsen_fixed_fraction (triangulation,
- estimated_error_per_cell,
- 0.3, 0.03);
+ refine_and_coarsen_fixed_fraction (triangulation,
+ estimated_error_per_cell,
+ 0.3, 0.03);
triangulation.execute_coarsening_and_refinement ();
}
triangulation.refine_global (3);
}
else
- refine_grid ();
+ refine_grid ();
deallog << " Number of active cells: "
<< triangulation.n_global_active_cells()
*/
struct SvgFlags
{
- public:
- /**
- * This denotes the number of the
- * data vector which shall be used
- * for generating the height
- * information. By default, the
- * first data vector is taken,
- * i.e. <tt>height_vector==0</tt>, if
- * there is any data vector. If there
- * is no data vector, no height
- * information is generated.
- */
- unsigned int height_vector;
+ public:
+ /**
+ * This denotes the number of the
+ * data vector which shall be used
+ * for generating the height
+ * information. By default, the
+ * first data vector is taken,
+ * i.e. <tt>height_vector==0</tt>, if
+ * there is any data vector. If there
+ * is no data vector, no height
+ * information is generated.
+ */
+ unsigned int height_vector;
- /*
- * Angles for the perspective view
- */
- int azimuth_angle, polar_angle;
-
- unsigned int line_thickness;
+ /*
+ * Angles for the perspective view
+ */
+ int azimuth_angle, polar_angle;
- /*
- * Draw a margin of 5% around the plotted area
- */
- bool margin;
+ unsigned int line_thickness;
- /*
- * Draw a colorbar encoding the cell coloring
- */
- bool draw_colorbar;
+ /*
+ * Draw a margin of 5% around the plotted area
+ */
+ bool margin;
- /*
- * Constructor.
- */
- SvgFlags(const unsigned int height_vector = 0,
- const int azimuth_angle = 37,
- const int polar_angle = 45,
- const unsigned int line_thickness = 1,
- const bool margin = true,
- const bool draw_colorbar = true);
+ /*
+ * Draw a colorbar encoding the cell coloring
+ */
+ bool draw_colorbar;
- /**
- * Determine an estimate for
- * the memory consumption (in
- * bytes) of this
- * object. Since sometimes
- * the size of objects can
- * not be determined exactly
- * (for example: what is the
- * memory consumption of an
- * STL <tt>std::map</tt> type with a
- * certain number of
- * elements?), this is only
- * an estimate. however often
- * quite close to the true
- * value.
- */
- std::size_t memory_consumption () const;
-
- private:
+ /*
+ * Constructor.
+ */
+ SvgFlags(const unsigned int height_vector = 0,
+ const int azimuth_angle = 37,
+ const int polar_angle = 45,
+ const unsigned int line_thickness = 1,
+ const bool margin = true,
+ const bool draw_colorbar = true);
+
+ /**
+ * Determine an estimate for
+ * the memory consumption (in
+ * bytes) of this
+ * object. Since sometimes
+ * the size of objects can
+ * not be determined exactly
+ * (for example: what is the
+ * memory consumption of an
+ * STL <tt>std::map</tt> type with a
+ * certain number of
+ * elements?), this is only
+ * an estimate. however often
+ * quite close to the true
+ * value.
+ */
+ std::size_t memory_consumption () const;
+
+ private:
};
vtu,
/**
- * Output in
+ * Output in
* SVG format.
*/
svg,
const std::vector<std_cxx1x::tuple<unsigned int, unsigned int, std::string> > &vector_data_ranges,
const VtkFlags &flags,
std::ostream &out);
-
+
/**
* Write the given list of patches to the output stream in SVG format.
*
- * SVG (Scalable Vector Graphics) is an XML-based vector image format
- * developed and maintained by the World Wide Web Consortium (W3C).
- * This function conforms to the latest specification SVG 1.1,
+ * SVG (Scalable Vector Graphics) is an XML-based vector image format
+ * developed and maintained by the World Wide Web Consortium (W3C).
+ * This function conforms to the latest specification SVG 1.1,
* released on August 16, 2011. Controlling the graphic output is
- * possible by setting or clearing the respective flags (see the
- * SvgFlags struct). At present, this format only supports output
+ * possible by setting or clearing the respective flags (see the
+ * SvgFlags struct). At present, this format only supports output
* for two-dimensional data, with values in the third direction
* taken from a data vector.
- *
- * For the output, each patch is subdivided into four triangles
- * which are then written as polygons and filled with a linear
- * color gradient. The arising coloring of the patches visualizes
- * the data values at the vertices taken from the specified data
+ *
+ * For the output, each patch is subdivided into four triangles
+ * which are then written as polygons and filled with a linear
+ * color gradient. The arising coloring of the patches visualizes
+ * the data values at the vertices taken from the specified data
* vector. A colorbar can be drawn to encode the coloring.
*
* @note Yet only implemented for two dimensions with an additional
const std::vector<std_cxx1x::tuple<unsigned int, unsigned int, std::string> > &vector_data_ranges,
const SvgFlags &flags,
std::ostream &out);
-
+
/**
* Write the given list of patches to the output stream in deal.II
* intermediate format. This is not a format understood by any other
/**
- * This function projects a three-dimensional point (Point<3> point)
- * onto a two-dimensional image plane, specified by the position of
- * the camera viewing system (Point<3> camera_position), camera
- * direction (Point<3> camera_position), camera horizontal (Point<3>
- * camera_horizontal, necessary for the correct alignment of the
+ * This function projects a three-dimensional point (Point<3> point)
+ * onto a two-dimensional image plane, specified by the position of
+ * the camera viewing system (Point<3> camera_position), camera
+ * direction (Point<3> camera_position), camera horizontal (Point<3>
+ * camera_horizontal, necessary for the correct alignment of the
* later images), and the focus of the camera (float camera_focus).
*
* For SVG output.
*/
- static Point<2> svg_project_point(Point<3> point,
- Point<3> camera_position,
- Point<3> camera_direction,
- Point<3> camera_horizontal,
+ static Point<2> svg_project_point(Point<3> point,
+ Point<3> camera_position,
+ Point<3> camera_direction,
+ Point<3> camera_horizontal,
float camera_focus);
/**
* Function to compute the gradient parameters for
* in SVG format. See
* DataOutBase::write_svg.
*/
- void write_svg(std::ostream &out) const;
+ void write_svg(std::ostream &out) const;
/**
* Obtain data through get_patches()
Tensor<2,dim> DF_t (dealii::transpose(invert( (Tensor<2,dim>)(*this) )));
DerivativeForm<1,dim, spacedim> result = DF_t;
- return(result);
+ return (result);
}
else
{
ExcIndexRange((index),0,(range)))
#define AssertGlobalIndexRange(index,range) Assert((index) < (range), \
- ExcIndexRange<types::global_dof_index>((index),0,(range)))
+ ExcIndexRange<types::global_dof_index>((index),0,(range)))
using namespace StandardExceptions;
/**
* Enable output to a second stream <tt>o</tt>.
*
- * The optional argument @p print_job_id specifies whether
+ * The optional argument @p print_job_id specifies whether
*/
void attach (std::ostream &o,
const bool print_job_id = true);
{
return sizeof(int);
}
-
+
inline
return sizeof(unsigned int);
}
-
+
inline
std::size_t memory_consumption (const unsigned long long int)
* Called by the constructors.
*/
void do_init(int &argc,
- char ** &argv,
- unsigned int max_num_threads);
+ char ** &argv,
+ unsigned int max_num_threads);
};
namespace internal
{
return MPI_UNSIGNED_LONG;
}
-
+
inline MPI_Datatype mpi_type_id (const unsigned long long int *)
{
* To set n_default_threads add the following at the start of your main():
* <code>
* multithread_info.n_default_threads=1;
- * </code>
+ * </code>
*
* @ingroup threads
* @author Thomas Richter, Wolfgang Bangerth, 2000
* Returns the MPI communicator underlying the
* partitioner object.
*/
- const MPI_Comm& get_communicator() const;
+ const MPI_Comm &get_communicator() const;
/**
* Computes the memory consumption of this
*/
DeclException2 (ExcIndexNotPresent,
types::global_dof_index,
- unsigned int,
+ unsigned int,
<< "Global index " << arg1
<< " neither owned nor ghost on proc " << arg2);
{
types::global_dof_index size= local_range_data.second - local_range_data.first;
Assert(size<=std::numeric_limits<unsigned int>::max(),
- ExcNotImplemented());
+ ExcNotImplemented());
return static_cast<unsigned int>(size);
}
inline
- const MPI_Comm&
+ const MPI_Comm &
Partitioner::get_communicator() const
{
return communicator;
y = offset+step+step-x;
}
else
- {
+ {
const number offset = step * interval;
if (x<offset || x>offset+step)
return 0;
int, char *, std::string &,
<< "Object of class " << arg2
<< " is still used by " << arg1 << " other objects.\n"
- << "(Additional information: " << arg3 << ")\n"
- << "Note the entry in the Frequently Asked Questions of "
- << "deal.II (linked to from http://www.dealii.org/) for "
- << "more information on what this error means.");
+ << "(Additional information: " << arg3 << ")\n"
+ << "Note the entry in the Frequently Asked Questions of "
+ << "deal.II (linked to from http://www.dealii.org/) for "
+ << "more information on what this error means.");
/**
* A subscriber with the
* <tt>i</tt>th index.
*/
unsigned int operator[] (const unsigned int i) const;
-
+
/**
* Write access the value of the
* <tt>i</tt>th index.
*/
- unsigned int & operator[] (const unsigned int i);
+ unsigned int &operator[] (const unsigned int i);
/**
* Compare two index fields for
template <int N>
inline
unsigned int &
-TableIndicesBase<N>::operator [] (const unsigned int i)
+TableIndicesBase<N>::operator [] (const unsigned int i)
{
Assert (i < N, ExcIndexRange (i, 0, N));
return indices[i];
* number of independent components of each sub-tensor.
*/
static const unsigned int
- n_independent_components = Tensor<rank_-1,dim>::n_independent_components * dim;
+ n_independent_components = Tensor<rank_-1,dim>::n_independent_components *dim;
/**
* Type of stored objects. This
/**
* Read access using TableIndices <tt>indices</tt>
*/
- Number operator [](const TableIndices<rank_> & indices) const;
-
+ Number operator [](const TableIndices<rank_> &indices) const;
+
/**
* Read and write access using TableIndices <tt>indices</tt>
*/
- Number &operator [](const TableIndices<rank_> & indices);
-
+ Number &operator [](const TableIndices<rank_> &indices);
+
/**
* Assignment operator.
*/
template <int rank_, int dim, typename Number>
inline
-Number
-Tensor<rank_,dim,Number>::operator[] (const TableIndices<rank_> & indices) const
+Number
+Tensor<rank_,dim,Number>::operator[] (const TableIndices<rank_> &indices) const
{
- const unsigned int inner_ind = indices[0];
- Assert (inner_ind<dim, ExcIndexRange(inner_ind, 0, dim));
-
- TableIndices<rank_-1> indices1;
- for (unsigned int i = 0; i < rank_-1;i++)
- indices1[i] = indices[i+1];
- return (subtensor[inner_ind])[indices1];
+ const unsigned int inner_ind = indices[0];
+ Assert (inner_ind<dim, ExcIndexRange(inner_ind, 0, dim));
+
+ TableIndices<rank_-1> indices1;
+ for (unsigned int i = 0; i < rank_-1; i++)
+ indices1[i] = indices[i+1];
+ return (subtensor[inner_ind])[indices1];
}
template <int rank_, int dim, typename Number>
inline
-Number &
-Tensor<rank_,dim,Number>::operator[] (const TableIndices<rank_> & indices)
+Number &
+Tensor<rank_,dim,Number>::operator[] (const TableIndices<rank_> &indices)
{
- const unsigned int inner_ind = indices[0];
- Assert (inner_ind<dim, ExcIndexRange(inner_ind, 0, dim));
-
- TableIndices<rank_-1> indices1;
- for (unsigned int i = 0; i < rank_-1;i++)
- indices1[i] = indices[i+1];
- return (subtensor[inner_ind])[indices1];
+ const unsigned int inner_ind = indices[0];
+ Assert (inner_ind<dim, ExcIndexRange(inner_ind, 0, dim));
+
+ TableIndices<rank_-1> indices1;
+ for (unsigned int i = 0; i < rank_-1; i++)
+ indices1[i] = indices[i+1];
+ return (subtensor[inner_ind])[indices1];
}
template <int rank_, int dim, typename Number>
Tensor<rank_, dim, Number>::component_to_unrolled_index(const TableIndices<rank_> &indices)
{
TableIndices<rank_-1> indices1;
- for (unsigned int i = 0; i < rank_-1;i++)
+ for (unsigned int i = 0; i < rank_-1; i++)
indices1[i] = indices[i];
Assert (indices[rank_-1] < dim,
- ExcIndexRange (indices[rank_-1], 0, dim));
+ ExcIndexRange (indices[rank_-1], 0, dim));
return ( Tensor<rank_-1,dim,Number>::component_to_unrolled_index(indices1) * dim + indices[rank_-1]);
}
Tensor<rank_, dim, Number>::unrolled_to_component_indices(const unsigned int i)
{
Assert (i < n_independent_components,
- ExcIndexRange (i, 0, n_independent_components));
+ ExcIndexRange (i, 0, n_independent_components));
TableIndices<rank_> indices;
* backcompatibility.
*/
Number &operator [] (const unsigned int index);
-
+
/**
* Read access using TableIndices <tt>indices</tt>
*/
- Number operator [](const TableIndices<1> & indices) const;
-
+ Number operator [](const TableIndices<1> &indices) const;
+
/**
* Read and write access using TableIndices <tt>indices</tt>
*/
- Number &operator [](const TableIndices<1> & indices);
+ Number &operator [](const TableIndices<1> &indices);
/**
* Assignment operator.
template <int dim, typename Number>
inline
-Number Tensor<1,dim,Number>::operator [] (const TableIndices<1> & indices) const
+Number Tensor<1,dim,Number>::operator [] (const TableIndices<1> &indices) const
{
Assert (indices[0]<dim, ExcIndexRange (indices[0], 0, dim));
return values[indices[0]];
template <int dim, typename Number>
inline
-Number &Tensor<1,dim,Number>::operator [] (const TableIndices<1> & indices)
+Number &Tensor<1,dim,Number>::operator [] (const TableIndices<1> &indices)
{
Assert (indices[0]<dim, ExcIndexRange (indices[0], 0, dim));
return values[indices[0]];
inline
internal::fun_encapsulator<RT,
std_cxx1x::tuple<Arg1, Arg2, Arg3, Arg4, Arg5, Arg6>,6>
- spawn (const C &c, RT (C::*fun_ptr)(Arg1,Arg2,Arg3,Arg4,Arg5,Arg6) const) DEAL_II_DEPRECATED;
+ spawn (const C &c, RT (C::*fun_ptr)(Arg1,Arg2,Arg3,Arg4,Arg5,Arg6) const) DEAL_II_DEPRECATED;
template <typename RT, typename C,
inline
internal::fun_encapsulator<RT,std_cxx1x::tuple<Arg1, Arg2, Arg3,
Arg4, Arg5, Arg6, Arg7>,7>
- spawn (RT (*fun_ptr)(Arg1,Arg2,Arg3,Arg4,Arg5,Arg6,Arg7)) DEAL_II_DEPRECATED;
+ spawn (RT (*fun_ptr)(Arg1,Arg2,Arg3,Arg4,Arg5,Arg6,Arg7)) DEAL_II_DEPRECATED;
template <typename RT,
- typename Arg1, typename Arg2, typename Arg3,
- typename Arg4, typename Arg5, typename Arg6,
- typename Arg7>
+ typename Arg1, typename Arg2, typename Arg3,
+ typename Arg4, typename Arg5, typename Arg6,
+ typename Arg7>
inline
internal::fun_encapsulator<RT,std_cxx1x::tuple<Arg1, Arg2, Arg3,
Arg4, Arg5, Arg6, Arg7>,7>
inline
internal::fun_encapsulator<RT,std_cxx1x::tuple<Arg1, Arg2, Arg3,
Arg4, Arg5, Arg6, Arg7>,7>
- spawn (C &c, RT (C::*fun_ptr)(Arg1,Arg2,Arg3,Arg4,Arg5,Arg6,Arg7)) DEAL_II_DEPRECATED;
+ spawn (C &c, RT (C::*fun_ptr)(Arg1,Arg2,Arg3,Arg4,Arg5,Arg6,Arg7)) DEAL_II_DEPRECATED;
template <typename RT, typename C,
- typename Arg1, typename Arg2, typename Arg3,
- typename Arg4, typename Arg5, typename Arg6,
- typename Arg7>
+ typename Arg1, typename Arg2, typename Arg3,
+ typename Arg4, typename Arg5, typename Arg6,
+ typename Arg7>
inline
internal::fun_encapsulator<RT,std_cxx1x::tuple<Arg1, Arg2, Arg3,
Arg4, Arg5, Arg6, Arg7>,7>
internal::fun_encapsulator<RT,
std_cxx1x::tuple<Arg1, Arg2, Arg3,
Arg4, Arg5, Arg6, Arg7>,7>
- spawn (const C &c, RT (C::*fun_ptr)(Arg1,Arg2,Arg3,Arg4,Arg5,Arg6,Arg7) const) DEAL_II_DEPRECATED;
+ spawn (const C &c, RT (C::*fun_ptr)(Arg1,Arg2,Arg3,Arg4,Arg5,Arg6,Arg7) const) DEAL_II_DEPRECATED;
template <typename RT, typename C,
- typename Arg1, typename Arg2, typename Arg3,
- typename Arg4, typename Arg5, typename Arg6,
- typename Arg7>
+ typename Arg1, typename Arg2, typename Arg3,
+ typename Arg4, typename Arg5, typename Arg6,
+ typename Arg7>
inline
internal::fun_encapsulator<RT,
std_cxx1x::tuple<Arg1, Arg2, Arg3,
template <typename RT,
- typename Arg1, typename Arg2, typename Arg3,
- typename Arg4, typename Arg5, typename Arg6,
- typename Arg7, typename Arg8>
+ typename Arg1, typename Arg2, typename Arg3,
+ typename Arg4, typename Arg5, typename Arg6,
+ typename Arg7, typename Arg8>
inline
internal::fun_encapsulator<RT,std_cxx1x::tuple<Arg1, Arg2, Arg3,
Arg4, Arg5, Arg6,
template <typename RT, typename C,
- typename Arg1, typename Arg2, typename Arg3,
- typename Arg4, typename Arg5, typename Arg6,
- typename Arg7, typename Arg8>
+ typename Arg1, typename Arg2, typename Arg3,
+ typename Arg4, typename Arg5, typename Arg6,
+ typename Arg7, typename Arg8>
inline
internal::fun_encapsulator<RT,std_cxx1x::tuple<Arg1, Arg2, Arg3,
Arg4, Arg5, Arg6,
template <typename RT, typename C,
- typename Arg1, typename Arg2, typename Arg3,
- typename Arg4, typename Arg5, typename Arg6,
- typename Arg7, typename Arg8>
+ typename Arg1, typename Arg2, typename Arg3,
+ typename Arg4, typename Arg5, typename Arg6,
+ typename Arg7, typename Arg8>
inline
internal::fun_encapsulator<RT,
std_cxx1x::tuple<Arg1, Arg2, Arg3,
template <typename RT,
- typename Arg1, typename Arg2, typename Arg3,
- typename Arg4, typename Arg5, typename Arg6,
- typename Arg7, typename Arg8, typename Arg9>
+ typename Arg1, typename Arg2, typename Arg3,
+ typename Arg4, typename Arg5, typename Arg6,
+ typename Arg7, typename Arg8, typename Arg9>
inline
internal::fun_encapsulator<RT,std_cxx1x::tuple<Arg1, Arg2, Arg3,
Arg4, Arg5, Arg6,
template <typename RT, typename C,
- typename Arg1, typename Arg2, typename Arg3,
- typename Arg4, typename Arg5, typename Arg6,
- typename Arg7, typename Arg8, typename Arg9>
+ typename Arg1, typename Arg2, typename Arg3,
+ typename Arg4, typename Arg5, typename Arg6,
+ typename Arg7, typename Arg8, typename Arg9>
inline
internal::fun_encapsulator<RT,std_cxx1x::tuple<Arg1, Arg2, Arg3,
Arg4, Arg5, Arg6,
template <typename RT, typename C,
- typename Arg1, typename Arg2, typename Arg3,
- typename Arg4, typename Arg5, typename Arg6,
- typename Arg7, typename Arg8, typename Arg9>
+ typename Arg1, typename Arg2, typename Arg3,
+ typename Arg4, typename Arg5, typename Arg6,
+ typename Arg7, typename Arg8, typename Arg9>
inline
internal::fun_encapsulator<RT,
std_cxx1x::tuple<Arg1, Arg2, Arg3,
/**
* Reference to the TimerOutput object
*/
- dealii::TimerOutput & timer;
+ dealii::TimerOutput &timer;
/**
* Do we still need to exit the section we are in?
*/
inline
TimerOutput::Scope::Scope(dealii::TimerOutput &timer_, const std::string §ion_name)
-:
-timer(timer_), in(true)
+ :
+ timer(timer_), in(true)
{
timer.enter_section(section_name);
}
* The data type always indicates an
* unsigned integer type.
*/
- typedef unsigned int global_dof_index;
+ typedef unsigned int global_dof_index;
- /**
- * An identifier that denotes the MPI type
- * associated with types::global_dof_index.
- */
+ /**
+ * An identifier that denotes the MPI type
+ * associated with types::global_dof_index.
+ */
# define DEAL_II_DOF_INDEX_MPI_TYPE MPI_UNSIGNED
#endif
std_cxx1x::shared_ptr<ScratchData> scratch_data;
bool currently_in_use;
- /**
- * Default constructor.
- */
- ScratchDataObject ()
- :
- currently_in_use (false)
- {}
-
- ScratchDataObject (ScratchData *p,
- const bool in_use)
- :
- scratch_data (p),
- currently_in_use (in_use)
- {}
+ /**
+ * Default constructor.
+ */
+ ScratchDataObject ()
+ :
+ currently_in_use (false)
+ {}
+
+ ScratchDataObject (ScratchData *p,
+ const bool in_use)
+ :
+ scratch_data (p),
+ currently_in_use (in_use)
+ {}
};
const ScratchData *sample_scratch_data;
- /**
- * Default constructor.
- * Initialize everything that doesn't
- * have a default constructor itself.
- */
- ItemType ()
- :
- n_items (0),
- scratch_data (0),
- sample_scratch_data (0)
- {}
+ /**
+ * Default constructor.
+ * Initialize everything that doesn't
+ * have a default constructor itself.
+ */
+ ItemType ()
+ :
+ n_items (0),
+ scratch_data (0),
+ sample_scratch_data (0)
+ {}
};
tbb::filter (/*is_serial=*/true),
remaining_iterator_range (begin, end),
ring_buffer (buffer_size),
- sample_scratch_data (sample_scratch_data),
+ sample_scratch_data (sample_scratch_data),
n_emitted_items (0),
chunk_size (chunk_size)
{
ring_buffer[element].work_items
.resize (chunk_size, remaining_iterator_range.second);
ring_buffer[element].scratch_data
- = &thread_local_scratch;
+ = &thread_local_scratch;
ring_buffer[element].sample_scratch_data
- = &sample_scratch_data;
+ = &sample_scratch_data;
ring_buffer[element].copy_datas
.resize (chunk_size, sample_copy_data);
}
// see if there is an unused object. if so, grab it and mark
// it as used
for (typename ItemType::ScratchDataList::iterator
- p = scratch_data_list.begin();
- p != scratch_data_list.end(); ++p)
+ p = scratch_data_list.begin();
+ p != scratch_data_list.end(); ++p)
if (p->currently_in_use == false)
{
scratch_data = p->scratch_data.get();
scratch_data = new ScratchData(*current_item->sample_scratch_data);
typename ItemType::ScratchDataList::value_type
- new_scratch_object (scratch_data, true);
+ new_scratch_object (scratch_data, true);
scratch_data_list.push_back (new_scratch_object);
}
}
// nothing good can happen if they throw an exception and we are best
// off catching it and showing an error message
for (unsigned int i=0; i<current_item->n_items; ++i)
- {
- try
- {
- worker (current_item->work_items[i],
- *scratch_data,
- current_item->copy_datas[i]);
- }
- catch (const std::exception &exc)
- {
- Threads::internal::handle_std_exception (exc);
- }
- catch (...)
- {
- Threads::internal::handle_unknown_exception ();
- }
- }
+ {
+ try
+ {
+ worker (current_item->work_items[i],
+ *scratch_data,
+ current_item->copy_datas[i]);
+ }
+ catch (const std::exception &exc)
+ {
+ Threads::internal::handle_std_exception (exc);
+ }
+ catch (...)
+ {
+ Threads::internal::handle_unknown_exception ();
+ }
+ }
// finally mark the scratch object as unused again. as above, there
// is no need to lock anything here since the object we work on
scratch_data_list = current_item->scratch_data->get();
for (typename ItemType::ScratchDataList::iterator p =
- scratch_data_list.begin(); p != scratch_data_list.end();
- ++p)
+ scratch_data_list.begin(); p != scratch_data_list.end();
+ ++p)
if (p->scratch_data.get() == scratch_data)
{
Assert(p->currently_in_use == true, ExcInternalError());
// above, catch exceptions rather than letting it propagate into
// unknown territories
for (unsigned int i=0; i<current_item->n_items; ++i)
- {
- try
- {
- copier (current_item->copy_datas[i]);
- }
- catch (const std::exception &exc)
- {
- Threads::internal::handle_std_exception (exc);
- }
- catch (...)
- {
- Threads::internal::handle_unknown_exception ();
- }
- }
+ {
+ try
+ {
+ copier (current_item->copy_datas[i]);
+ }
+ catch (const std::exception &exc)
+ {
+ Threads::internal::handle_std_exception (exc);
+ }
+ catch (...)
+ {
+ Threads::internal::handle_unknown_exception ();
+ }
+ }
// return an invalid
* level this line lives on.
*/
void get_mg_dof_indices (const int level,
- std::vector<types::global_dof_index> &dof_indices,
- const unsigned int fe_index = DH::default_fe_index) const;
+ std::vector<types::global_dof_index> &dof_indices,
+ const unsigned int fe_index = DH::default_fe_index) const;
/**
* Sets the level DoF indices that are returned by get_mg_dof_indices.
*/
void set_mg_dof_indices (const int level,
- const std::vector<types::global_dof_index> &dof_indices,
- const unsigned int fe_index = DH::default_fe_index);
+ const std::vector<types::global_dof_index> &dof_indices,
+ const unsigned int fe_index = DH::default_fe_index);
/**
* Global DoF index of the <i>i</i>
* level @p level. Also see vertex_dof_index().
*/
types::global_dof_index mg_vertex_dof_index (const int level,
- const unsigned int vertex,
- const unsigned int i,
- const unsigned int fe_index = DH::default_fe_index) const;
+ const unsigned int vertex,
+ const unsigned int i,
+ const unsigned int fe_index = DH::default_fe_index) const;
/**
* Index of the <i>i</i>th degree
}
else
pointer += static_cast<types::global_dof_index>(
- (*dof_handler.finite_elements)[this_fe_index].dofs_per_vertex + 1);
+ (*dof_handler.finite_elements)[this_fe_index].dofs_per_vertex + 1);
}
}
return *(pointer + 1 + local_index);
else
pointer += static_cast<types::global_dof_index>(
- (*dof_handler.finite_elements)[this_fe_index].dofs_per_vertex + 1);
+ (*dof_handler.finite_elements)[this_fe_index].dofs_per_vertex + 1);
}
}
else
{
pointer += static_cast<types::global_dof_index>(
- (*dof_handler.finite_elements)[this_fe_index].dofs_per_vertex + 1);
+ (*dof_handler.finite_elements)[this_fe_index].dofs_per_vertex + 1);
++counter;
}
}
ExcInternalError());
pointer += static_cast<types::global_dof_index>(
- (*dof_handler.finite_elements)[this_fe_index].dofs_per_vertex + 1);
+ (*dof_handler.finite_elements)[this_fe_index].dofs_per_vertex + 1);
++counter;
}
}
inline
types::global_dof_index
DoFAccessor<structdim, DH,lda>::mg_dof_index (const int level,
- const unsigned int i) const
+ const unsigned int i) const
{
return this->dof_handler->template get_dof_index<structdim> (level, this->present_index, 0, i);
}
inline
types::global_dof_index
DoFAccessor<structdim, DH,lda>::mg_vertex_dof_index (const int level,
- const unsigned int vertex,
- const unsigned int i,
- const unsigned int fe_index) const
+ const unsigned int vertex,
+ const unsigned int i,
+ const unsigned int fe_index) const
{
Assert (this->dof_handler != 0, ExcInvalidObject ());
Assert (&this->dof_handler->get_fe () != 0, ExcInvalidObject ());
void
DoFAccessor<structdim, DH,lda>::set_vertex_dof_index (const unsigned int vertex,
const unsigned int i,
- const types::global_dof_index index,
+ const types::global_dof_index index,
const unsigned int fe_index) const
{
dealii::internal::DoFAccessor::Implementation::set_vertex_dof_index
inline
void
DoFAccessor<structdim, DH,lda>::set_mg_vertex_dof_index (const int level,
- const unsigned int vertex,
- const unsigned int i,
- const types::global_dof_index index,
- const unsigned int fe_index) const
+ const unsigned int vertex,
+ const unsigned int i,
+ const types::global_dof_index index,
+ const unsigned int fe_index) const
{
Assert (this->dof_handler != 0, ExcInvalidObject ());
Assert (&this->dof_handler->get_fe () != 0, ExcInvalidObject ());
inline
void
DoFAccessor<structdim, DH,lda>::set_mg_dof_index (const int level,
- const unsigned int i,
- const types::global_dof_index index) const
+ const unsigned int i,
+ const types::global_dof_index index) const
{
this->dof_handler->template set_dof_index<structdim> (level, this->present_index, 0, i, index);
}
// checking of 'i' happens in line_index(i)
return typename dealii::internal::DoFHandler::Iterators<DH,lda>::line_iterator
- (this->tria,
- 0, // only sub-objects are allowed, which have no level
- this->line_index(i),
- this->dof_handler);
+ (this->tria,
+ 0, // only sub-objects are allowed, which have no level
+ this->line_index(i),
+ this->dof_handler);
}
// checking of 'i' happens in quad_index(i)
return typename dealii::internal::DoFHandler::Iterators<DH,lda>::quad_iterator
- (this->tria,
- 0, // only sub-objects are allowed, which have no level
- this->quad_index(i),
- this->dof_handler);
+ (this->tria,
+ 0, // only sub-objects are allowed, which have no level
+ this->quad_index(i),
+ this->dof_handler);
}
* given (inclusive) range of levels.
*/
void init (const unsigned int coarsest_level,
- const unsigned int finest_level,
- const unsigned int dofs_per_vertex);
+ const unsigned int finest_level,
+ const unsigned int dofs_per_vertex);
/**
* Return the coarsest level for which this structure
*/
types::global_dof_index
get_index (const unsigned int level,
- const unsigned int dof_number) const;
+ const unsigned int dof_number) const;
/**
* Set the index of the <code>dof_number</code>th degree of
* to <code>index</code>.
*/
void set_index (const unsigned int level,
- const unsigned int dof_number,
- const types::global_dof_index index);
+ const unsigned int dof_number,
+ const types::global_dof_index index);
/**
* Exception.
*/
typedef dealii::Tensor<1, spacedim> divergence_type;
- /**
- * A structure where for each shape
- * function we pre-compute a bunch of
- * data that will make later accesses
- * much cheaper.
- */
- struct ShapeFunctionData
- {
+ /**
+ * A structure where for each shape
+ * function we pre-compute a bunch of
+ * data that will make later accesses
+ * much cheaper.
+ */
+ struct ShapeFunctionData
+ {
/**
* For each pair (shape
* function,component within
* (e.g. for RT elements it depends
* on the shape of a cell).
*/
- bool is_nonzero_shape_function_component[value_type::n_independent_components];
+ bool is_nonzero_shape_function_component[value_type::n_independent_components];
/**
* For each pair (shape function,
* harder to compute this
* information.
*/
- unsigned int row_index[value_type::n_independent_components];
+ unsigned int row_index[value_type::n_independent_components];
/**
* For each shape function say the
* no components are nonzero then
* store -2.
*/
- int single_nonzero_component;
- unsigned int single_nonzero_component_index;
- };
+ int single_nonzero_component;
+ unsigned int single_nonzero_component_index;
+ };
/**
* Default constructor. Creates an
* invalid object.
*/
- Tensor();
+ Tensor();
/**
* index of the first component of the
* selected symmetric second order tensor.
*/
- Tensor(const FEValuesBase<dim, spacedim> &fe_values_base,
- const unsigned int first_tensor_component);
+ Tensor(const FEValuesBase<dim, spacedim> &fe_values_base,
+ const unsigned int first_tensor_component);
/**
* copying and generate an exception if
* this function is called.
*/
- Tensor &operator=(const Tensor<2, dim, spacedim> &);
+ Tensor &operator=(const Tensor<2, dim, spacedim> &);
/**
* Return the value of the vector
* the quadrature point at which
* function is to be evaluated
*/
- value_type
- value (const unsigned int shape_function,
- const unsigned int q_point) const;
+ value_type
+ value (const unsigned int shape_function,
+ const unsigned int q_point) const;
/**
* Return the vector divergence of
* is as documented for the value()
* function.
*/
- divergence_type
- divergence (const unsigned int shape_function,
- const unsigned int q_point) const;
+ divergence_type
+ divergence (const unsigned int shape_function,
+ const unsigned int q_point) const;
/**
* Return the values of the selected
* function but it only works on the
* selected vector components.
*/
- template <class InputVector>
- void get_function_values (const InputVector &fe_function,
- std::vector<value_type> &values) const;
+ template <class InputVector>
+ void get_function_values (const InputVector &fe_function,
+ std::vector<value_type> &values) const;
/**
* definition of the
* divergence.
*/
- template <class InputVector>
- void get_function_divergences (const InputVector &fe_function,
- std::vector<divergence_type> &divergences) const;
+ template <class InputVector>
+ void get_function_divergences (const InputVector &fe_function,
+ std::vector<divergence_type> &divergences) const;
- private:
- /**
- * A reference to the FEValuesBase object
- * we operate on.
- */
- const FEValuesBase<dim, spacedim> &fe_values;
+ private:
+ /**
+ * A reference to the FEValuesBase object
+ * we operate on.
+ */
+ const FEValuesBase<dim, spacedim> &fe_values;
- /**
- * The first component of the vector
- * this view represents of the
- * FEValuesBase object.
- */
- const unsigned int first_tensor_component;
+ /**
+ * The first component of the vector
+ * this view represents of the
+ * FEValuesBase object.
+ */
+ const unsigned int first_tensor_component;
- /**
- * Store the data about shape
- * functions.
- */
- std::vector<ShapeFunctionData> shape_function_data;
- };
+ /**
+ * Store the data about shape
+ * functions.
+ */
+ std::vector<ShapeFunctionData> shape_function_data;
+ };
}
{
value_type return_value;
for (unsigned int d = 0; d < dim*dim; ++d)
- if (shape_function_data[shape_function].is_nonzero_shape_function_component[d]) {
- const TableIndices<2> indices = dealii::Tensor<2,spacedim>::unrolled_to_component_indices(d);
- return_value[indices]
- = fe_values.shape_values(shape_function_data[shape_function].row_index[d],q_point);
- }
+ if (shape_function_data[shape_function].is_nonzero_shape_function_component[d])
+ {
+ const TableIndices<2> indices = dealii::Tensor<2,spacedim>::unrolled_to_component_indices(d);
+ return_value[indices]
+ = fe_values.shape_values(shape_function_data[shape_function].row_index[d],q_point);
+ }
return return_value;
}
}
// is a first order tensor.
//
// assume the second-order tensor is
- // A with components A_{ij}.
+ // A with components A_{ij}.
// divergence as:
// b_j := \dfrac{\partial phi_{ij}}{\partial x_i}.
//
* construct CellId with a given coarse_cell_index and list of child indices
*/
explicit CellId(unsigned int coarse_cell_id_, std::vector<unsigned char> id_)
- : coarse_cell_id(coarse_cell_id_), id(id_)
+ : coarse_cell_id(coarse_cell_id_), id(id_)
{}
/**
* construct an empty CellId.
*/
CellId()
- : coarse_cell_id(-1)
+ : coarse_cell_id(-1)
{}
/**
* compare two CellIds
*/
- bool operator== (const CellId & other) const;
+ bool operator== (const CellId &other) const;
/**
* compare two CellIds
*/
- bool operator!= (const CellId & other) const;
+ bool operator!= (const CellId &other) const;
- friend std::istream& operator>> (std::istream& is, CellId& cid);
- friend std::ostream& operator<< (std::ostream& os, const CellId& cid);
+ friend std::istream &operator>> (std::istream &is, CellId &cid);
+ friend std::ostream &operator<< (std::ostream &os, const CellId &cid);
private:
unsigned int coarse_cell_id;
std::vector<unsigned char> id;
/**
* output CellId into a stream
*/
-inline std::ostream& operator<< (std::ostream& os, const CellId& cid)
+inline std::ostream &operator<< (std::ostream &os, const CellId &cid)
{
os << cid.coarse_cell_id << '_' << cid.id.size() << ':';
- for (unsigned int i=0;i<cid.id.size();++i)
+ for (unsigned int i=0; i<cid.id.size(); ++i)
os << static_cast<int>(cid.id[i]);
return os;
}
/**
* read CellId from a stream
*/
-inline std::istream& operator>> (std::istream& is, CellId& cid)
+inline std::istream &operator>> (std::istream &is, CellId &cid)
{
unsigned int cellid;
is >> cellid;
char value;
cid.id.clear();
- for (unsigned int i=0;i<idsize;++i)
+ for (unsigned int i=0; i<idsize; ++i)
{
is >> value;
cid.id.push_back(value-'0');
}
inline bool
-CellId::operator== (const CellId & other) const
+CellId::operator== (const CellId &other) const
{
if (this->coarse_cell_id != other.coarse_cell_id)
return false;
*
*/
inline bool
-CellId::operator!= (const CellId & other) const
+CellId::operator!= (const CellId &other) const
{
return !(*this == other);
}
*/
static
void
- extrude_triangulation(const Triangulation<2, 2> & input,
+ extrude_triangulation(const Triangulation<2, 2> &input,
const size_type n_slices,
const double height,
Triangulation<3,3> &result);
/**
* Background style.
*/
- enum Background{
- /// Use transparent value of SVG
- transparent,
- /// Use white background
- white,
- /// Use a gradient from white (top) to steelblue (bottom), and add date and time plus a deal.II logo. Automatically draws a margin.
- dealii};
+ enum Background
+ {
+ /// Use transparent value of SVG
+ transparent,
+ /// Use white background
+ white,
+ /// Use a gradient from white (top) to steelblue (bottom), and add date and time plus a deal.II logo. Automatically draws a margin.
+ dealii
+ };
Background background;
/**
* Cell coloring.
*/
- enum Coloring{
- /// No cell coloring
- none,
- /// Convert the material id into the cell color (default)
- material_id,
- /// Convert the level number into the cell color
- level_number,
- /// Convert the subdomain id into the cell color
- subdomain_id,
- /// Convert the level subdomain id into the cell color
- level_subdomain_id};
-
+ enum Coloring
+ {
+ /// No cell coloring
+ none,
+ /// Convert the material id into the cell color (default)
+ material_id,
+ /// Convert the level number into the cell color
+ level_number,
+ /// Convert the subdomain id into the cell color
+ subdomain_id,
+ /// Convert the level subdomain id into the cell color
+ level_subdomain_id
+ };
+
Coloring coloring;
/// Interpret the level number of the cells as altitude over the x-y-plane (useful in the perpspective view).
/**
* Cell labeling (fixed order).
- *
+ *
* The following booleans determine which properties of the cell
* shall be displayed as text in the middle of each cell.
*/
/**
* Write the triangulation in the SVG format.
- *
- * SVG (Scalable Vector Graphics) is
- * an XML-based vector image format
- * developed and maintained by the
- * World Wide Web Consortium (W3C).
- * This function conforms to the
- * latest specification SVG 1.1,
+ *
+ * SVG (Scalable Vector Graphics) is
+ * an XML-based vector image format
+ * developed and maintained by the
+ * World Wide Web Consortium (W3C).
+ * This function conforms to the
+ * latest specification SVG 1.1,
* released on August 16, 2011.
- *
+ *
* The cells of the triangulation are written as polygons with
* additional lines at the boundary of the triangulation. A coloring
* of the cells is further possible in order to visualize a certain
* colorbar can be drawn to encode the chosen coloring. Moreover, a
* cell label can be added, showing level index, etc.
*
- * @note Yet only implemented for
+ * @note Yet only implemented for
* two-dimensional grids in two
* space dimensions.
- *
+ *
*/
template <int dim, int spacedim>
void write_svg (const Triangulation<dim,spacedim> &tria,
*/
template <int dim>
void write_mathgl (const Triangulation<dim> &tria,
- std::ostream &out) const;
+ std::ostream &out) const;
/**
* Write grid to @p out according to the given data format. This
/**
- * This function projects a three-dimensional point (Point<3> point)
- * onto a two-dimensional image plane, specified by the position of
- * the camera viewing system (Point<3> camera_position), camera
- * direction (Point<3> camera_position), camera horizontal (Point<3>
- * camera_horizontal, necessary for the correct alignment of the
+ * This function projects a three-dimensional point (Point<3> point)
+ * onto a two-dimensional image plane, specified by the position of
+ * the camera viewing system (Point<3> camera_position), camera
+ * direction (Point<3> camera_position), camera horizontal (Point<3>
+ * camera_horizontal, necessary for the correct alignment of the
* later images), and the focus of the camera (float camera_focus).
*
* For SVG output of grids.
*/
- static Point<2> svg_project_point(Point<3> point,
- Point<3> camera_position,
- Point<3> camera_direction,
- Point<3> camera_horizontal,
+ static Point<2> svg_project_point(Point<3> point,
+ Point<3> camera_position,
+ Point<3> camera_direction,
+ Point<3> camera_horizontal,
float camera_focus);
/**
TriaAccessorBase (const Triangulation<dim,spacedim> *parent = 0,
const int level = -1,
const int index = -1,
- const AccessorData * = 0);
+ const AccessorData * = 0);
/**
* Copy constructor. Creates an
* object with exactly the same data.
*/
TriaAccessorBase &operator = (const TriaAccessorBase &);
-
+
/**
* Ordering of accessors. If #structure_dimension is less than
* #dimension, we simply compare the index of such an object. If
* #structure_dimension equals #dimension, we compare the level()
* first, and the index() only if levels are equal.
*/
- bool operator < (const TriaAccessorBase& other) const;
+ bool operator < (const TriaAccessorBase &other) const;
protected:
/**
* for more information.
*/
bool active () const;
-
+
/**
* Ordering of accessors. This function implements a total ordering
* of cells even on a parallel::distributed::Triangulation. This
* and both cells are active, it compares subdomain_id(). If this is
* inconclusive, TriaAccessorBase::operator < () is called.
*/
- bool operator < (const CellAccessor<dim, spacedim>& other) const;
+ bool operator < (const CellAccessor<dim, spacedim> &other) const;
/**
{
// find the 'v'st child of our parent we are
unsigned char v=-1;
- for (unsigned int c=0;c<ptr.parent()->n_children();++c)
+ for (unsigned int c=0; c<ptr.parent()->n_children(); ++c)
{
if (ptr.parent()->child_index(c)==ptr.index())
{
TriaAccessorBase<structdim,dim,spacedim>::operator < (const TriaAccessorBase<structdim,dim,spacedim> &other) const
{
Assert (tria == other.tria, TriaAccessorExceptions::ExcCantCompareIterators());
-
+
if (present_level != other.present_level)
return (present_level < other.present_level);
-
+
return (present_index < other.present_index);
}
CellAccessor<dim,spacedim>::operator < (const CellAccessor<dim,spacedim> &other) const
{
Assert (this->tria == other.tria, TriaAccessorExceptions::ExcCantCompareIterators());
-
+
if (level_subdomain_id() != other.level_subdomain_id())
return (level_subdomain_id() < other.level_subdomain_id());
-
+
if (active() && other.active() &&
(subdomain_id() != other.subdomain_id()))
return (subdomain_id() < other.subdomain_id());
-
+
return TriaAccessorBase<dim,dim,spacedim>::operator < (other);
}
Assert (&accessor.get_triangulation() == &other.accessor.get_triangulation(),
ExcInvalidComparison());
-
+
// Deal with iterators past end
if (state()==IteratorState::past_the_end)
return false;
if (other.state()==IteratorState::past_the_end)
return true;
-
+
return ((**this) < (*other));
}
template <typename Accessor>
template <typename OtherAccessor>
TriaIterator<Accessor>::TriaIterator (const OtherAccessor &a)
-:
-TriaRawIterator<Accessor> (a)
+ :
+ TriaRawIterator<Accessor> (a)
{
#ifdef DEBUG
// do this like this, because:
* DoFs which are constrained by
* hanging nodes, see @ref constraints.
*/
- types::global_dof_index n_dofs () const;
+ types::global_dof_index n_dofs () const;
/**
* The number of multilevel
return *(pointer + 1 + local_index);
else
pointer += static_cast<types::global_dof_index>(
- dof_handler.get_fe()[*pointer]
- .template n_dofs_per_object<dim>() + 1);
+ dof_handler.get_fe()[*pointer]
+ .template n_dofs_per_object<dim>() + 1);
}
}
}
return true;
else
pointer += static_cast<types::global_dof_index>(
- dof_handler.get_fe()[*pointer]
- .template n_dofs_per_object<dim>()+1);
+ dof_handler.get_fe()[*pointer]
+ .template n_dofs_per_object<dim>()+1);
}
}
}
const double dx = factor * fe.JxW(k);
for (unsigned int i=0; i<n_dofs; ++i)
for (unsigned int d1=0; d1<dim; ++d1)
- for (unsigned int d2=0; d2<dim; ++d2)
- {
- result(i) += dx * .25 *
- (input[d1][k][d2] + input[d2][k][d1]) *
- (fe.shape_grad_component(i,k,d1)[d2] + fe.shape_grad_component(i,k,d2)[d1]);
- }
+ for (unsigned int d2=0; d2<dim; ++d2)
+ {
+ result(i) += dx * .25 *
+ (input[d1][k][d2] + input[d2][k][d1]) *
+ (fe.shape_grad_component(i,k,d1)[d2] + fe.shape_grad_component(i,k,d2)[d1]);
+ }
}
}
{
const double u= input[d1][k];
const double v= fe.shape_value_component(i,k,d1);
- const double g= data[d1][k];
- result(i) += dx + 2.*penalty * (u-g) * v;
-
- for (unsigned int d2=0; d2<dim; ++d2)
- {
- // v . nabla u n
- result(i) -= .5*dx* v * Dinput[d1][k][d2] * n(d2);
- // v . (nabla u)^T n
- result(i) -= .5*dx* v * Dinput[d2][k][d1] * n(d2);
- // u nabla v n
- result(i) -= .5*dx * (u-g) * fe.shape_grad_component(i,k,d1)[d2] * n(d2);
- // u (nabla v)^T n
- result(i) -= .5*dx * (u-g) * fe.shape_grad_component(i,k,d2)[d1] * n(d2);
- }
- }
- }
+ const double g= data[d1][k];
+ result(i) += dx + 2.*penalty * (u-g) * v;
+
+ for (unsigned int d2=0; d2<dim; ++d2)
+ {
+ // v . nabla u n
+ result(i) -= .5*dx* v * Dinput[d1][k][d2] * n(d2);
+ // v . (nabla u)^T n
+ result(i) -= .5*dx* v * Dinput[d2][k][d1] * n(d2);
+ // u nabla v n
+ result(i) -= .5*dx * (u-g) * fe.shape_grad_component(i,k,d1)[d2] * n(d2);
+ // u (nabla v)^T n
+ result(i) -= .5*dx * (u-g) * fe.shape_grad_component(i,k,d2)[d1] * n(d2);
+ }
+ }
+ }
}
-
+
/**
* The interior penalty flux
* for symmetric gradients.
double ext_factor = -1.)
{
const unsigned int n1 = fe1.dofs_per_cell;
-
+
AssertDimension(fe1.get_fe().n_components(), dim);
AssertDimension(fe2.get_fe().n_components(), dim);
AssertVectorVectorDimension(input1, dim, fe1.n_quadrature_points);
{
const double dx = fe1.JxW(k);
const Point<dim> &n = fe1.normal_vector(k);
-
+
for (unsigned int i=0; i<n1; ++i)
for (unsigned int d1=0; d1<dim; ++d1)
{
const double v2 = fe2.shape_value_component(i,k,d1);
const double u1 = input1[d1][k];
const double u2 = input2[d1][k];
-
+
result1(i) += dx * penalty * u1*v1;
- result1(i) -= dx * penalty * u2*v1;
- result2(i) -= dx * penalty * u1*v2;
+ result1(i) -= dx * penalty * u2*v1;
+ result2(i) -= dx * penalty * u1*v2;
result2(i) += dx * penalty * u2*v2;
-
- for (unsigned int d2=0; d2<dim; ++d2)
- {
- // v . nabla u n
- result1(i) -= .25*dx* (nu1*Dinput1[d1][k][d2]+nu2*Dinput1[d1][k][d2]) * n(d2) * v1;
- result2(i) += .25*dx* (nu1*Dinput1[d1][k][d2]+nu2*Dinput1[d1][k][d2]) * n(d2) * v1;
- // v . (nabla u)^T n
- result1(i) -= .25*dx* (nu1*Dinput1[d2][k][d1]+nu2*Dinput1[d2][k][d1]) * n(d2) * v1;
- result2(i) += .25*dx* (nu1*Dinput1[d2][k][d1]+nu2*Dinput1[d2][k][d1]) * n(d2) * v1;
- // u nabla v n
- result1(i) -= .25*dx* nu1*fe1.shape_grad_component(i,k,d1)[d2] * n(d2) * (u1-u2);
- result2(i) -= .25*dx* nu2*fe2.shape_grad_component(i,k,d1)[d2] * n(d2) * (u1-u2);
- // u (nabla v)^T n
- result1(i) -= .25*dx* nu1*fe1.shape_grad_component(i,k,d2)[d1] * n(d2) * (u1-u2);
- result2(i) -= .25*dx* nu2*fe2.shape_grad_component(i,k,d2)[d1] * n(d2) * (u1-u2);
- }
+
+ for (unsigned int d2=0; d2<dim; ++d2)
+ {
+ // v . nabla u n
+ result1(i) -= .25*dx* (nu1*Dinput1[d1][k][d2]+nu2*Dinput1[d1][k][d2]) * n(d2) * v1;
+ result2(i) += .25*dx* (nu1*Dinput1[d1][k][d2]+nu2*Dinput1[d1][k][d2]) * n(d2) * v1;
+ // v . (nabla u)^T n
+ result1(i) -= .25*dx* (nu1*Dinput1[d2][k][d1]+nu2*Dinput1[d2][k][d1]) * n(d2) * v1;
+ result2(i) += .25*dx* (nu1*Dinput1[d2][k][d1]+nu2*Dinput1[d2][k][d1]) * n(d2) * v1;
+ // u nabla v n
+ result1(i) -= .25*dx* nu1*fe1.shape_grad_component(i,k,d1)[d2] * n(d2) * (u1-u2);
+ result2(i) -= .25*dx* nu2*fe2.shape_grad_component(i,k,d1)[d2] * n(d2) * (u1-u2);
+ // u (nabla v)^T n
+ result1(i) -= .25*dx* nu1*fe1.shape_grad_component(i,k,d2)[d1] * n(d2) * (u1-u2);
+ result2(i) -= .25*dx* nu2*fe2.shape_grad_component(i,k,d2)[d1] * n(d2) * (u1-u2);
+ }
}
}
}
AssertDimension (eigenvalues.size(), eigenvalues_im.size());
for (size_type i=0; i<eigenvalues.size(); ++i)
- eigenvalues[i] = std::complex<double> (eigenvalues_real[i],
- eigenvalues_im[i]);
+ eigenvalues[i] = std::complex<double> (eigenvalues_real[i],
+ eigenvalues_im[i]);
}
}
--block;
return std::pair<size_type,size_type>(block,
- i-start_indices[block]);
+ i-start_indices[block]);
}
inline
-BlockList::size_type
+BlockList::size_type
BlockList::size() const
{
return index_sets.size();
inline
-BlockList::size_type
+BlockList::size_type
BlockList::block_size(size_type block) const
{
return index_sets[block].size();
inline
-BlockList::size_type
+BlockList::size_type
BlockList::local_index(size_type block, size_type index) const
{
AssertIndexRange(block, index_sets.size());
ExcDimensionMismatch(src.n_blocks(), n_block_rows()));
for (unsigned int row=0; row<n_block_rows(); ++row)
- for (unsigned int col=0; col<n_block_cols(); ++col)
- block(row,col).Tvmult_add (dst.block(col),
- src.block(row));
+ for (unsigned int col=0; col<n_block_cols(); ++col)
+ block(row,col).Tvmult_add (dst.block(col),
+ src.block(row));
}
* of the current kind.
*/
typedef
- const Accessor<number,Constness> & value_type;
+ const Accessor<number,Constness> &value_type;
/**
* Constructor. Create an iterator into the matrix @p matrix for the given
template <typename number>
inline
-typename ChunkSparseMatrix<number>::size_type
+typename ChunkSparseMatrix<number>::size_type
ChunkSparseMatrix<number>::n () const
{
Assert (cols != 0, ExcNotInitialized());
template <typename number>
inline
-typename ChunkSparseMatrix<number>::size_type
+typename ChunkSparseMatrix<number>::size_type
ChunkSparseMatrix<number>::compute_location (const size_type i,
const size_type j) const
{
const size_type chunk_size = cols->get_chunk_size();
const size_type chunk_index
- = cols->sparsity_pattern(i/chunk_size, j/chunk_size);
+ = cols->sparsity_pattern(i/chunk_size, j/chunk_size);
if (chunk_index == ChunkSparsityPattern::invalid_entry)
return ChunkSparsityPattern::invalid_entry;
const size_type n_filled_last_cols = n % chunk_size;
const size_type last_regular_row = n_filled_last_rows > 0 ?
- std::min(m/chunk_size,
- static_cast<size_type>(end_row)) :
- end_row;
+ std::min(m/chunk_size,
+ static_cast<size_type>(end_row)) :
+ end_row;
const size_type irregular_col = n/chunk_size;
typename OutVector::iterator dst_ptr = dst.begin()+chunk_size*begin_row;
// chunks. this entails some padding elements
const size_type chunk_size = cols->get_chunk_size();
const size_type N = cols->sparsity_pattern.n_nonzero_elements() *
- chunk_size * chunk_size;
+ chunk_size * chunk_size;
if (N > max_len || max_len == 0)
{
if (val != 0)
template <typename number>
-typename ChunkSparseMatrix<number>::size_type
+typename ChunkSparseMatrix<number>::size_type
ChunkSparseMatrix<number>::n_nonzero_elements () const
{
Assert (cols != 0, ExcNotInitialized());
template <typename number>
-typename ChunkSparseMatrix<number>::size_type
+typename ChunkSparseMatrix<number>::size_type
ChunkSparseMatrix<number>::n_actually_nonzero_elements () const
{
Assert (cols != 0, ExcNotInitialized());
* chunk_size * chunk_size];
while (val_ptr != end_ptr)
- *val_ptr++ += factor * *matrix_ptr++;
+ *val_ptr++ += factor **matrix_ptr++;
}
const ForwardIterator end,
const size_type chunk_size);
- /**
- * @deprecated This function is deprecated. Use the function
- * without the last argument
- */
+ /**
+ * @deprecated This function is deprecated. Use the function
+ * without the last argument
+ */
template <typename ForwardIterator>
void copy_from (const size_type n_rows,
const size_type n_cols,
const unsigned int row)
:
sparsity_pattern(sparsity_pattern),
- reduced_accessor(row==sparsity_pattern->n_rows() ?
+ reduced_accessor(row==sparsity_pattern->n_rows() ?
*sparsity_pattern->sparsity_pattern.end() :
*sparsity_pattern->sparsity_pattern.
begin(row/sparsity_pattern->get_chunk_size())),
Accessor::is_valid_entry () const
{
return reduced_accessor.is_valid_entry()
- &&
- sparsity_pattern->get_chunk_size()*reduced_accessor.row()+chunk_row <
- sparsity_pattern->n_rows()
- &&
- sparsity_pattern->get_chunk_size()*reduced_accessor.column()+chunk_col <
- sparsity_pattern->n_cols();
+ &&
+ sparsity_pattern->get_chunk_size()*reduced_accessor.row()+chunk_row <
+ sparsity_pattern->n_rows()
+ &&
+ sparsity_pattern->get_chunk_size()*reduced_accessor.column()+chunk_col <
+ sparsity_pattern->n_cols();
}
Assert (is_valid_entry() == true, ExcInvalidIterator());
return sparsity_pattern->get_chunk_size()*reduced_accessor.row() +
- chunk_row;
+ chunk_row;
}
Assert (is_valid_entry() == true, ExcInvalidIterator());
return sparsity_pattern->get_chunk_size()*reduced_accessor.column() +
- chunk_col;
+ chunk_col;
}
return true;
const unsigned int
- global_row = sparsity_pattern->get_chunk_size()*reduced_accessor.row()
- +chunk_row,
- other_global_row = sparsity_pattern->get_chunk_size()*
- other.reduced_accessor.row()+other.chunk_row;
+ global_row = sparsity_pattern->get_chunk_size()*reduced_accessor.row()
+ +chunk_row,
+ other_global_row = sparsity_pattern->get_chunk_size()*
+ other.reduced_accessor.row()+other.chunk_row;
if (global_row < other_global_row)
return true;
else if (global_row > other_global_row)
ChunkSparsityPattern::end (const unsigned int r) const
{
Assert (r<n_rows(), ExcIndexRange(r,0,n_rows()))
- return iterator(this, r+1);
+ return iterator(this, r+1);
}
for (inner_iterator j=i->begin(); j!=end_of_row; ++j)
{
const size_type col
- = internal::SparsityPatternTools::get_column_index_from_iterator(*j);
+ = internal::SparsityPatternTools::get_column_index_from_iterator(*j);
Assert (col < n_cols, ExcInvalidIndex(col,n_cols));
add (row, col);
* @ingroup Exceptions
*/
DeclException2 (ExcIncorrectConstraint,
- int, int,
+ int, int,
<< "While distributing the constraint for DoF "
<< arg1 << ", it turns out that one of the processors "
<< "who own the " << arg2
<< "with the appropriate locally_relevant set so "
<< "that every processor who owns a DoF that constrains "
<< "another DoF also knows about this constraint?");
-
+
private:
/**
// which line in the constraint matrix
// handles this index
std::vector<size_type> distribute (sparsity.n_rows(),
- numbers::invalid_size_type);
+ numbers::invalid_size_type);
for (size_type c=0; c<lines.size(); ++c)
distribute[lines[c].line] = c;
// in the constraint matrix handles this
// index
std::vector<size_type> distribute (sparsity.n_rows(),
- numbers::invalid_size_type);
+ numbers::invalid_size_type);
for (size_type c=0; c<lines.size(); ++c)
distribute[lines[c].line] = c;
{
for (typename std::vector<size_type>::const_iterator it = cm.begin();
it != cm.end(); ++it)
- vec(*it) = 0.;
+ vec(*it) = 0.;
}
template<class VEC>
typedef std::vector<ConstraintLine>::const_iterator constraint_iterator;
for (constraint_iterator it = lines.begin();
- it != lines.end(); ++it)
+ it != lines.end(); ++it)
if (vec_owned_elements.is_element(it->line))
for (unsigned int i=0; i<it->entries.size(); ++i)
if (!vec_owned_elements.is_element(it->entries[i].first))
internal::bool2type<IsBlockVector<VectorType>::value>());
for (constraint_iterator it = lines.begin();
- it != lines.end(); ++it)
+ it != lines.end(); ++it)
if (vec_owned_elements.is_element(it->line))
{
typename VectorType::value_type
- new_value = it->inhomogeneity;
+ new_value = it->inhomogeneity;
for (unsigned int i=0; i<it->entries.size(); ++i)
new_value += (static_cast<typename VectorType::value_type>
(ghosted_vector(it->entries[i].first)) *
for (unsigned int i=0; i<next_constraint->entries.size(); ++i)
new_value += (static_cast<typename VectorType::value_type>
(vec(next_constraint->entries[i].first)) *
- next_constraint->entries[i].second);
+ next_constraint->entries[i].second);
Assert(numbers::is_finite(new_value), ExcNumberNotFinite());
vec(next_constraint->line) = new_value;
}
individual_size[index]++;
}
- size_type
+ size_type
get_size (const size_type index) const
{
return individual_size[index];
const size_type loc_row = global_rows.local_row(i);
typename SparseMatrix<number>::iterator
- matrix_values = sparse_matrix->begin(row);
+ matrix_values = sparse_matrix->begin(row);
const bool optimize_diagonal = sparsity.n_rows() == sparsity.n_cols();
// distinguish three cases about what can
Utilities::lower_bound(active_dofs.begin(),
active_dofs.end()-i+1,
new_index);
- if (*it != new_index)
+ if (*it != new_index)
active_dofs.insert(it, new_index);
}
}
for (size_type block_col=0; block_col<num_blocks; ++block_col)
{
const size_type start_block = block_starts[block_col],
- end_block = block_starts[block_col+1];
+ end_block = block_starts[block_col+1];
if (use_dealii_matrix == false)
{
size_type *col_ptr = &cols[0];
for (size_type block_col=0; block_col<num_blocks; ++block_col)
{
const size_type begin_block = block_starts[block_col],
- end_block = block_starts[block_col+1];
+ end_block = block_starts[block_col+1];
std::vector<size_type>::iterator col_ptr = cols.begin();
internals::resolve_matrix_row (global_rows, i, begin_block,
end_block, dof_mask, col_ptr);
{
const typename MATRIX::const_iterator end_row = M.end(row);
for (typename MATRIX::const_iterator entry = M.begin(row);
- entry != end_row; ++entry)
+ entry != end_row; ++entry)
this->el(row, entry->column()) = entry->value();
}
}
{
const typename MATRIX::const_iterator end_row = M.end(row);
for (typename MATRIX::const_iterator entry = M.begin(row);
- entry != end_row; ++entry)
+ entry != end_row; ++entry)
this->el(entry->column(), row) = entry->value();
}
}
for (size_type i=0; i<this->m(); ++i)
{
for (size_type j=0; j<this->n(); ++j)
- {
- s.width(w);
- s.precision(p);
- s << this->el(i,j);
- }
+ {
+ s.width(w);
+ s.precision(p);
+ s << this->el(i,j);
+ }
s << std::endl;
}
// Compute maximal size of copied block
const size_type rows = std::min (m() - dst_offset_i,
- src.m() - src_offset_i);
+ src.m() - src_offset_i);
const size_type cols = std::min (n() - dst_offset_j,
- src.n() - src_offset_j);
+ src.n() - src_offset_j);
for (size_type i=0; i<rows ; ++i)
for (size_type j=0; j<cols ; ++j)
// Compute maximal size of copied block
const size_type rows = std::min (m() - dst_offset_i, src.n() - src_offset_j);
const size_type cols = std::min (n() - dst_offset_j,
- src.m() - src_offset_i);
+ src.m() - src_offset_i);
for (size_type i=0; i<rows ; ++i)
const somenumber *src_ptr = src.begin();
for (size_type i=0; i<n; ++i, ++dst_ptr, ++src_ptr)
- *dst_ptr = somenumber(om) * *src_ptr / somenumber((*this)(i,i));
+ *dst_ptr = somenumber(om) **src_ptr / somenumber((*this)(i,i));
}
* for other blocks of the system matrix.
*/
typedef PETScWrappers::PreconditionILU PreconditionILU;
-
+
/**
* Typedef for the Incomplete Jacobi decomposition preconditioner used
* for other blocks of the system matrix.
*/
typedef PETScWrappers::PreconditionJacobi PreconditionJacobi;
-
+
}
}
* for other blocks of the system matrix.
*/
typedef TrilinosWrappers::PreconditionILU PreconditionILU;
-
+
/**
* Typedef for the Incomplete Jacobi decomposition preconditioner used
* for other blocks of the system matrix.
inline
-IdentityMatrix::size_type
+IdentityMatrix::size_type
IdentityMatrix::m () const
{
return size;
inline
-IdentityMatrix::size_type
+IdentityMatrix::size_type
IdentityMatrix::n () const
{
return size;
{
const typename MATRIX::const_iterator end_row = M.end(row);
for (typename MATRIX::const_iterator entry = M.begin(row);
- entry != end_row; ++entry)
+ entry != end_row; ++entry)
this->el(row, entry->column()) = entry->value();
}
{
const typename MATRIX::const_iterator end_row = M.end(row);
for (typename MATRIX::const_iterator entry = M.begin(row);
- entry != end_row; ++entry)
+ entry != end_row; ++entry)
{
const size_type i = transpose ? entry->column() : row;
const size_type j = transpose ? row : entry->column();
const std::string &name,
const Options options)
{
- size_type
+ size_type
gridpoints_x = (matrix.n() / options.block_size
+
(matrix.n() % options.block_size != 0 ? 1 : 0)),
if (this->block(0).partitioner->n_mpi_processes() > 1)
return Utilities::MPI::sum (local_result,
this->block(0).partitioner->get_communicator())/
- (real_type)this->size();
+ (real_type)this->size();
else
return local_result/(real_type)this->size();
}
* not owned by the current processor but can be written into or read
* from locally (ghost elements).
*/
- const IndexSet& ghost_elements() const;
+ const IndexSet &ghost_elements() const;
/**
* Returns whether the given global index is a ghost index on the
* vector.
*/
const MPI_Comm &get_mpi_communicator () const;
-
+
/**
* Checks whether the given partitioner is compatible with the
* partitioner used for this vector. Two partitioners are compatible if
else if (partitioner.get() != c.partitioner.get())
{
size_type local_ranges_different_loc = (local_range() !=
- c.local_range());
+ c.local_range());
if ((partitioner->n_mpi_processes() > 1 &&
Utilities::MPI::max(local_ranges_different_loc,
partitioner->get_communicator()) != 0)
else if (partitioner.get() != c.partitioner.get())
{
size_type local_ranges_different_loc = (local_range() !=
- c.local_range());
+ c.local_range());
if ((partitioner->n_mpi_processes() > 1 &&
Utilities::MPI::max(local_ranges_different_loc,
partitioner->get_communicator()) != 0)
Vector<Number>::vectors_equal_local (const Vector<Number2> &v) const
{
return partitioner->local_size()>0 ?
- vector_view.template operator == <Number2>(v.vector_view)
- : true;
+ vector_view.template operator == <Number2>(v.vector_view)
+ : true;
}
return Utilities::MPI::sum (local_result *
(real_type)partitioner->local_size(),
partitioner->get_communicator())
- /(real_type)partitioner->size();
+ /(real_type)partitioner->size();
else
return local_result;
}
template <typename Number>
inline
std::pair<typename Vector<Number>::size_type,
- typename Vector<Number>::size_type>
- Vector<Number>::local_range () const
+ typename Vector<Number>::size_type>
+ Vector<Number>::local_range () const
{
return partitioner->local_range();
}
template <typename Number>
inline
- typename Vector<Number>::size_type
+ typename Vector<Number>::size_type
Vector<Number>::n_ghost_entries () const
{
return partitioner->n_ghost_indices();
template <typename Number>
inline
- const IndexSet&
+ const IndexSet &
Vector<Number>::ghost_elements() const
{
return partitioner->ghost_indices();
template <typename Number>
inline
- const MPI_Comm&
+ const MPI_Comm &
Vector<Number>::get_mpi_communicator() const
{
return partitioner->get_communicator();
class Accessor
{
public:
- /**
- * Declare type for container size.
- */
- typedef types::global_dof_index size_type;
-
+ /**
+ * Declare type for container size.
+ */
+ typedef types::global_dof_index size_type;
+
/**
* Constructor. Since we use
* accessors only for read
* Declare type for container size.
*/
typedef types::global_dof_index size_type;
-
+
/**
* Constructor. Create an iterator
* into the matrix @p matrix for the
* class.
*/
typedef MatrixIterators::const_iterator const_iterator;
-
- /**
- * Declare type for container size.
- */
- typedef types::global_dof_index size_type;
+
+ /**
+ * Declare type for container size.
+ */
+ typedef types::global_dof_index size_type;
/**
* Declare a typedef in analogy to all
* Add the matrix @p other scaled by the factor @p factor to the current
* matrix.
*/
- MatrixBase & add (const MatrixBase &other,
- const PetscScalar factor);
+ MatrixBase &add (const MatrixBase &other,
+ const PetscScalar factor);
/**
* Matrix-vector multiplication:
/**
* purposefully not implemented
*/
- MatrixBase& operator=(const MatrixBase &);
+ MatrixBase &operator=(const MatrixBase &);
/**
* An internal array of integer
inline
- const_iterator::Accessor::size_type
+ const_iterator::Accessor::size_type
const_iterator::Accessor::row() const
{
Assert (a_row < matrix->m(), ExcBeyondEndOfMatrix());
inline
- const_iterator::Accessor::size_type
+ const_iterator::Accessor::size_type
const_iterator::Accessor::column() const
{
Assert (a_row < matrix->m(), ExcBeyondEndOfMatrix());
inline
- const_iterator::Accessor::size_type
+ const_iterator::Accessor::size_type
const_iterator::Accessor::index() const
{
Assert (a_row < matrix->m(), ExcBeyondEndOfMatrix());
* hand in the same vector for the first two arguments.
*/
void reinit(const std::vector<IndexSet> &rows,
- const std::vector<IndexSet> &cols,
- const BlockCompressedSimpleSparsityPattern &bcsp,
- const MPI_Comm &com);
+ const std::vector<IndexSet> &cols,
+ const BlockCompressedSimpleSparsityPattern &bcsp,
+ const MPI_Comm &com);
/**
* Same as above but for a symmetric structure only.
*/
void reinit(const std::vector<IndexSet> &sizes,
- const BlockCompressedSimpleSparsityPattern &bcsp,
- const MPI_Comm &com);
+ const BlockCompressedSimpleSparsityPattern &bcsp,
+ const MPI_Comm &com);
* Same as above, but include ghost elements
*/
BlockVector (const std::vector<IndexSet> ¶llel_partitioning,
- const std::vector<IndexSet> &ghost_indices,
- const MPI_Comm &communicator);
+ const std::vector<IndexSet> &ghost_indices,
+ const MPI_Comm &communicator);
void reinit (const std::vector<IndexSet> ¶llel_partitioning,
const std::vector<IndexSet> &ghost_entries,
const MPI_Comm &communicator);
-
+
/**
* Change the number of blocks to
* <tt>num_blocks</tt>. The individual
inline
BlockVector::BlockVector (const std::vector<IndexSet> ¶llel_partitioning,
- const MPI_Comm &communicator)
+ const MPI_Comm &communicator)
{
reinit(parallel_partitioning, communicator);
}
inline
BlockVector::BlockVector (const std::vector<IndexSet> ¶llel_partitioning,
- const std::vector<IndexSet> &ghost_indices,
- const MPI_Comm &communicator)
+ const std::vector<IndexSet> &ghost_indices,
+ const MPI_Comm &communicator)
{
reinit(parallel_partitioning, ghost_indices, communicator);
}
inline
void
BlockVector::reinit (const std::vector<IndexSet> ¶llel_partitioning,
- const MPI_Comm &communicator)
+ const MPI_Comm &communicator)
{
std::vector<size_type> sizes(parallel_partitioning.size());
for (unsigned int i=0; i<parallel_partitioning.size(); ++i)
}
inline
- void
- BlockVector::reinit (const std::vector<IndexSet> ¶llel_partitioning,
- const std::vector<IndexSet> &ghost_entries,
- const MPI_Comm &communicator)
+ void
+ BlockVector::reinit (const std::vector<IndexSet> ¶llel_partitioning,
+ const std::vector<IndexSet> &ghost_entries,
+ const MPI_Comm &communicator)
{
std::vector<types::global_dof_index> sizes(parallel_partitioning.size());
for (unsigned int i=0; i<parallel_partitioning.size(); ++i)
* Make a copy of the PETSc matrix @p other. It is assumed that both matrices have
* the same SparsityPattern.
*/
- void copy_from(const SparseMatrix & other);
+ void copy_from(const SparseMatrix &other);
/**
* Throw away the present matrix and
* Note that only contiguous IndexSets are supported.
*/
template <typename SparsityType>
- void reinit (const IndexSet & local_rows,
- const IndexSet & local_columns,
+ void reinit (const IndexSet &local_rows,
+ const IndexSet &local_columns,
const SparsityType &sparsity_pattern,
const MPI_Comm &communicator);
* Same as previous functions.
*/
template <typename SparsityType>
- void do_reinit (const IndexSet & local_rows,
- const IndexSet & local_columns,
- const SparsityType &sparsity_pattern);
+ void do_reinit (const IndexSet &local_rows,
+ const IndexSet &local_columns,
+ const SparsityType &sparsity_pattern);
/**
* To allow calling protected
*/
static const bool supports_distributed_data = true;
- /**
- * Default constructor. Initialize the
- * vector as empty.
- */
+ /**
+ * Default constructor. Initialize the
+ * vector as empty.
+ */
Vector ();
/**
* vector.
*/
Vector (const IndexSet &local,
- const IndexSet &ghost,
- const MPI_Comm &communicator);
+ const IndexSet &ghost,
+ const MPI_Comm &communicator);
/**
* Constructs a new parallel PETSc
* convergence has been reached.
*/
static
- PetscErrorCode convergence_test (KSP ksp,
- const PetscInt iteration,
- const PetscReal residual_norm,
- KSPConvergedReason *reason,
- void *solver_control);
+ PetscErrorCode convergence_test (KSP ksp,
+ const PetscInt iteration,
+ const PetscReal residual_norm,
+ KSPConvergedReason *reason,
+ void *solver_control);
/**
* A structure that contains the PETSc
* to see if convergence has been reached.
*/
static
- PetscErrorCode convergence_test (KSP ksp,
- const PetscInt iteration,
- const PetscReal residual_norm,
- KSPConvergedReason *reason,
- void *solver_control);
+ PetscErrorCode convergence_test (KSP ksp,
+ const PetscInt iteration,
+ const PetscReal residual_norm,
+ KSPConvergedReason *reason,
+ void *solver_control);
/**
* A structure that contains the
*/
template <typename SparsityType>
explicit SparseMatrix (const SparsityType &sparsity_pattern,
- const bool preset_nonzero_locations = true);
+ const bool preset_nonzero_locations = true);
/**
* This operator assigns a scalar to
* the constructor of this class with
* the same argument list as the
* present function.
- */
+ */
void reinit (const size_type m,
const size_type n,
const std::vector<size_type> &row_lengths,
/**
* Purposefully not implemented
*/
- SparseMatrix& operator= (const SparseMatrix &);
+ SparseMatrix &operator= (const SparseMatrix &);
/**
* Do the actual work for the
*/
typedef types::global_dof_index size_type;
- private:
+ private:
/**
* Constructor. It is made private so
* as to only allow the actual vector
const size_type index);
- public:
-
+ public:
+
/**
* This looks like a copy operator,
* but does something different than
// only. note: the first entry in each line denotes the diagonal
// element, which we need not check.
typename SparseMatrix<typename MATRIX::value_type>::const_iterator
- it = mat->begin(row)+1;
+ it = mat->begin(row)+1;
for ( ; it < mat->end(row); ++it)
if (it->column() > row)
break;
// attach stream to SolverCG, run it with log report for eigenvalues
std::ostream *old_stream = deallog.has_file() ? &deallog.get_file_stream() :
- static_cast<std::ostream *>(0);
+ static_cast<std::ostream *>(0);
if (old_stream)
deallog.detach();
cg_data.compute_eigenvalues = true;
SolverCG<VECTOR> solver (control, memory, cg_data);
internal::PreconditionChebyshev::DiagonalPreconditioner<VECTOR>
- preconditioner(data.matrix_diagonal_inverse);
+ preconditioner(data.matrix_diagonal_inverse);
try
{
solver.solve(matrix, *dummy, *rhs, preconditioner);
template<class MATRIX, typename inverse_type>
inline
-typename PreconditionBlockJacobi<MATRIX, inverse_type>::size_type
+typename PreconditionBlockJacobi<MATRIX, inverse_type>::size_type
PreconditionBlockJacobi<MATRIX, inverse_type>::const_iterator::Accessor::row() const
{
Assert (a_block < matrix->size(),
{
const size_type column = entry->column();
const size_type inverse_permuted_column = permuted
- ? this->inverse_permutation[column]
- : column;
+ ? this->inverse_permutation[column]
+ : column;
if (inverse_permuted_column >= block_end)
b_cell_row -= entry->value() * dst(column);
else if (!this->inverses_ready() && column >= block_start)
// Apply inverse diagonal
this->inverse_vmult(block, x_cell, b_cell);
#ifdef DEBUG
- for (unsigned int i=0;i<x_cell.size();++i)
- {
- Assert(numbers::is_finite(x_cell(i)), ExcNumberNotFinite());
- }
+ for (unsigned int i=0; i<x_cell.size(); ++i)
+ {
+ Assert(numbers::is_finite(x_cell(i)), ExcNumberNotFinite());
+ }
#endif
// Store in result vector
row=additional_data->block_list.begin(block);
* deal.II's own SolverControl objects to see if convergence has
* been reached.
*/
- void
+ void
get_solver_state (const SolverControl::State state);
/**
* change that.
*/
SolverLAPACK (SolverControl &cn,
- const MPI_Comm &mpi_communicator = PETSC_COMM_SELF,
- const AdditionalData &data = AdditionalData());
+ const MPI_Comm &mpi_communicator = PETSC_COMM_SELF,
+ const AdditionalData &data = AdditionalData());
protected:
// todo: The logic of these functions can be simplified without breaking backward compatibility...
template <typename OutputVector>
- void
- SolverBase::solve (const PETScWrappers::MatrixBase &A,
- std::vector<double> &eigenvalues,
- std::vector<OutputVector> &eigenvectors,
- const size_type n_eigenpairs)
- {
- // Panic if the number of eigenpairs wanted is out of bounds.
- AssertThrow ((n_eigenpairs > 0) && (n_eigenpairs <= A.m ()),
- ExcSLEPcWrappersUsageError());
-
- // Set the matrices of the problem
- set_matrices (A);
-
- // and solve
- size_type n_converged = 0;
- solve (n_eigenpairs, &n_converged);
-
- if (n_converged > n_eigenpairs)
- n_converged = n_eigenpairs;
- AssertThrow (n_converged == n_eigenpairs,
- ExcSLEPcEigenvectorConvergenceMismatchError(n_converged, n_eigenpairs));
-
- AssertThrow (eigenvectors.size() != 0, ExcSLEPcWrappersUsageError());
- eigenvectors.resize (n_converged, eigenvectors.front());
- eigenvalues.resize (n_converged);
-
- for (size_type index=0; index<n_converged; ++index)
- get_eigenpair (index, eigenvalues[index], eigenvectors[index]);
- }
-
+ void
+ SolverBase::solve (const PETScWrappers::MatrixBase &A,
+ std::vector<double> &eigenvalues,
+ std::vector<OutputVector> &eigenvectors,
+ const size_type n_eigenpairs)
+ {
+ // Panic if the number of eigenpairs wanted is out of bounds.
+ AssertThrow ((n_eigenpairs > 0) && (n_eigenpairs <= A.m ()),
+ ExcSLEPcWrappersUsageError());
+
+ // Set the matrices of the problem
+ set_matrices (A);
+
+ // and solve
+ size_type n_converged = 0;
+ solve (n_eigenpairs, &n_converged);
+
+ if (n_converged > n_eigenpairs)
+ n_converged = n_eigenpairs;
+ AssertThrow (n_converged == n_eigenpairs,
+ ExcSLEPcEigenvectorConvergenceMismatchError(n_converged, n_eigenpairs));
+
+ AssertThrow (eigenvectors.size() != 0, ExcSLEPcWrappersUsageError());
+ eigenvectors.resize (n_converged, eigenvectors.front());
+ eigenvalues.resize (n_converged);
+
+ for (size_type index=0; index<n_converged; ++index)
+ get_eigenpair (index, eigenvalues[index], eigenvectors[index]);
+ }
+
template <typename OutputVector>
- void
- SolverBase::solve (const PETScWrappers::MatrixBase &A,
- const PETScWrappers::MatrixBase &B,
- std::vector<double> &eigenvalues,
- std::vector<OutputVector> &eigenvectors,
- const size_type n_eigenpairs)
- {
- // Guard against incompatible matrix sizes:
- AssertThrow (A.m() == B.m (), ExcDimensionMismatch(A.m(), B.m()));
- AssertThrow (A.n() == B.n (), ExcDimensionMismatch(A.n(), B.n()));
-
- // Panic if the number of eigenpairs wanted is out of bounds.
- AssertThrow ((n_eigenpairs>0) && (n_eigenpairs<=A.m ()),
- ExcSLEPcWrappersUsageError());
-
- // Set the matrices of the problem
- set_matrices (A, B);
-
- // and solve
- size_type n_converged = 0;
- solve (n_eigenpairs, &n_converged);
-
- if (n_converged>=n_eigenpairs)
- n_converged = n_eigenpairs;
-
- AssertThrow (n_converged==n_eigenpairs,
- ExcSLEPcEigenvectorConvergenceMismatchError(n_converged, n_eigenpairs));
- AssertThrow (eigenvectors.size() != 0, ExcSLEPcWrappersUsageError());
-
- eigenvectors.resize (n_converged, eigenvectors.front());
- eigenvalues.resize (n_converged);
-
- for (size_type index=0; index<n_converged; ++index)
- get_eigenpair (index, eigenvalues[index], eigenvectors[index]);
- }
+ void
+ SolverBase::solve (const PETScWrappers::MatrixBase &A,
+ const PETScWrappers::MatrixBase &B,
+ std::vector<double> &eigenvalues,
+ std::vector<OutputVector> &eigenvectors,
+ const size_type n_eigenpairs)
+ {
+ // Guard against incompatible matrix sizes:
+ AssertThrow (A.m() == B.m (), ExcDimensionMismatch(A.m(), B.m()));
+ AssertThrow (A.n() == B.n (), ExcDimensionMismatch(A.n(), B.n()));
+
+ // Panic if the number of eigenpairs wanted is out of bounds.
+ AssertThrow ((n_eigenpairs>0) && (n_eigenpairs<=A.m ()),
+ ExcSLEPcWrappersUsageError());
+
+ // Set the matrices of the problem
+ set_matrices (A, B);
+
+ // and solve
+ size_type n_converged = 0;
+ solve (n_eigenpairs, &n_converged);
+
+ if (n_converged>=n_eigenpairs)
+ n_converged = n_eigenpairs;
+
+ AssertThrow (n_converged==n_eigenpairs,
+ ExcSLEPcEigenvectorConvergenceMismatchError(n_converged, n_eigenpairs));
+ AssertThrow (eigenvectors.size() != 0, ExcSLEPcWrappersUsageError());
+
+ eigenvectors.resize (n_converged, eigenvectors.front());
+ eigenvalues.resize (n_converged);
+
+ for (size_type index=0; index<n_converged; ++index)
+ get_eigenpair (index, eigenvalues[index], eigenvectors[index]);
+ }
template <typename OutputVector>
- void
- SolverBase::solve (const PETScWrappers::MatrixBase &A,
- const PETScWrappers::MatrixBase &B,
- std::vector<double> &real_eigenvalues,
- std::vector<double> &imag_eigenvalues,
- std::vector<OutputVector> &real_eigenvectors,
- std::vector<OutputVector> &imag_eigenvectors,
- const size_type n_eigenpairs)
- {
- // Guard against incompatible matrix sizes:
- AssertThrow (A.m() == B.m (), ExcDimensionMismatch(A.m(), B.m()));
- AssertThrow (A.n() == B.n (), ExcDimensionMismatch(A.n(), B.n()));
-
- // and incompatible eigenvalue/eigenvector sizes
- AssertThrow (real_eigenvalues.size() == imag_eigenvalues.size(),
- ExcDimensionMismatch(real_eigenvalues.size(), imag_eigenvalues.size()));
- AssertThrow (real_eigenvectors.size() == imag_eigenvectors.n (),
- ExcDimensionMismatch(real_eigenvectors.size(), imag_eigenvectors.size()));
-
- // Panic if the number of eigenpairs wanted is out of bounds.
- AssertThrow ((n_eigenpairs>0) && (n_eigenpairs<=A.m ()),
- ExcSLEPcWrappersUsageError());
-
- // Set the matrices of the problem
- set_matrices (A, B);
-
- // and solve
- size_type n_converged = 0;
- solve (n_eigenpairs, &n_converged);
-
- if (n_converged>=n_eigenpairs)
- n_converged = n_eigenpairs;
-
- AssertThrow (n_converged==n_eigenpairs,
- ExcSLEPcEigenvectorConvergenceMismatchError(n_converged, n_eigenpairs));
- AssertThrow ((real_eigenvectors.size()!=0) && (imag_eigenvectors.size()!=0),
- ExcSLEPcWrappersUsageError());
-
- real_eigenvectors.resize (n_converged, real_eigenvectors.front());
- imag_eigenvectors.resize (n_converged, imag_eigenvectors.front());
- real_eigenvalues.resize (n_converged);
- imag_eigenvalues.resize (n_converged);
-
- for (size_type index=0; index<n_converged; ++index)
- get_eigenpair (index,
- real_eigenvalues[index], imag_eigenvalues[index],
- real_eigenvectors[index], imag_eigenvectors[index]);
- }
+ void
+ SolverBase::solve (const PETScWrappers::MatrixBase &A,
+ const PETScWrappers::MatrixBase &B,
+ std::vector<double> &real_eigenvalues,
+ std::vector<double> &imag_eigenvalues,
+ std::vector<OutputVector> &real_eigenvectors,
+ std::vector<OutputVector> &imag_eigenvectors,
+ const size_type n_eigenpairs)
+ {
+ // Guard against incompatible matrix sizes:
+ AssertThrow (A.m() == B.m (), ExcDimensionMismatch(A.m(), B.m()));
+ AssertThrow (A.n() == B.n (), ExcDimensionMismatch(A.n(), B.n()));
+
+ // and incompatible eigenvalue/eigenvector sizes
+ AssertThrow (real_eigenvalues.size() == imag_eigenvalues.size(),
+ ExcDimensionMismatch(real_eigenvalues.size(), imag_eigenvalues.size()));
+ AssertThrow (real_eigenvectors.size() == imag_eigenvectors.n (),
+ ExcDimensionMismatch(real_eigenvectors.size(), imag_eigenvectors.size()));
+
+ // Panic if the number of eigenpairs wanted is out of bounds.
+ AssertThrow ((n_eigenpairs>0) && (n_eigenpairs<=A.m ()),
+ ExcSLEPcWrappersUsageError());
+
+ // Set the matrices of the problem
+ set_matrices (A, B);
+
+ // and solve
+ size_type n_converged = 0;
+ solve (n_eigenpairs, &n_converged);
+
+ if (n_converged>=n_eigenpairs)
+ n_converged = n_eigenpairs;
+
+ AssertThrow (n_converged==n_eigenpairs,
+ ExcSLEPcEigenvectorConvergenceMismatchError(n_converged, n_eigenpairs));
+ AssertThrow ((real_eigenvectors.size()!=0) && (imag_eigenvectors.size()!=0),
+ ExcSLEPcWrappersUsageError());
+
+ real_eigenvectors.resize (n_converged, real_eigenvectors.front());
+ imag_eigenvectors.resize (n_converged, imag_eigenvectors.front());
+ real_eigenvalues.resize (n_converged);
+ imag_eigenvalues.resize (n_converged);
+
+ for (size_type index=0; index<n_converged; ++index)
+ get_eigenpair (index,
+ real_eigenvalues[index], imag_eigenvalues[index],
+ real_eigenvectors[index], imag_eigenvectors[index]);
+ }
}
* <code>eigensolver</code>:
* @code
* // Set a transformation, this one shifts the eigenspectrum by 3.142..
- * SLEPcWrappers::TransformationShift::AdditionalData additional_data (3.142);
+ * SLEPcWrappers::TransformationShift::AdditionalData additional_data (3.142);
* SLEPcWrappers::TransformationShift shift (additional_data);
* eigensolver.set_transformation (shift);
* @endcode
* SolverArnoldi system (solver_control, mpi_communicator);
* eigensolver.solve (A, B, lambda, x, size_of_spectrum);
* @endcode
- *
+ *
* @note These options can also be set at the commandline.
*
* @ingroup SLEPcWrappers
precondition.vmult(vv,p);
}
else
- {
+ {
precondition.vmult(p, tmp_vectors[inner_iteration]);
A.vmult(vv,p);
};
SparseLUDecomposition<number>::prebuild_lower_bound()
{
const size_type *const
- column_numbers = this->get_sparsity_pattern().colnums;
+ column_numbers = this->get_sparsity_pattern().colnums;
const std::size_t *const
- rowstart_indices = this->get_sparsity_pattern().rowstart;
+ rowstart_indices = this->get_sparsity_pattern().rowstart;
const size_type N = this->m();
-
- prebuilt_lower_bound.resize (N);
-
- for (size_type row=0; row<N; row++)
- {
- prebuilt_lower_bound[row]
- = Utilities::lower_bound (&column_numbers[rowstart_indices[row]+1],
- &column_numbers[rowstart_indices[row+1]],
- row);
+
+ prebuilt_lower_bound.resize (N);
+
+ for (size_type row=0; row<N; row++)
+ {
+ prebuilt_lower_bound[row]
+ = Utilities::lower_bound (&column_numbers[rowstart_indices[row]+1],
+ &column_numbers[rowstart_indices[row+1]],
+ row);
}
}
{
typename SparseMatrix<number>::iterator index = this->begin(row);
typename SparseMatrix<somenumber>::const_iterator
- in_index = matrix.begin(row);
+ in_index = matrix.begin(row);
index->value() = in_index->value();
++index, ++in_index;
while (index < this->end(row) && in_index < matrix.end(row))
* inverse of the matrix, $A^{-1}$.
*/
void vmult (Vector<double> &dst,
- const Vector<double> &src) const;
+ const Vector<double> &src) const;
/**
* Same as before, but for block vectors.
*/
void vmult (BlockVector<double> &dst,
- const BlockVector<double> &src) const;
+ const BlockVector<double> &src) const;
/**
* Same as before, but uses the transpose of the matrix, i.e. this
* function multiplies with $A^{-T}$.
*/
void Tvmult (Vector<double> &dst,
- const Vector<double> &src) const;
+ const Vector<double> &src) const;
/**
* Same as before, but for block vectors
*/
void Tvmult (BlockVector<double> &dst,
- const BlockVector<double> &src) const;
+ const BlockVector<double> &src) const;
/**
* Same as vmult(), but adding to the previous solution. Not implemented
* yet but necessary for compiling certain other classes.
*/
void vmult_add (Vector<double> &dst,
- const Vector<double> &src) const;
+ const Vector<double> &src) const;
/**
* Same as before, but uses the transpose of the matrix, i.e. this
* function multiplies with $A^{-T}$.
*/
void Tvmult_add (Vector<double> &dst,
- const Vector<double> &src) const;
+ const Vector<double> &src) const;
/**
* @}
/**
* @}
*/
-
+
/**
* One of the UMFPack routines threw an error. The error code is included
* in the output and can be looked up in the UMFPack user manual. The
* object. See MemoryConsumption.
*/
std::size_t memory_consumption () const;
-
+
/**
* Dummy function for compatibility with distributed, parallel matrices.
*/
SparseMatrix<number> &
copy_from (const TrilinosWrappers::SparseMatrix &matrix);
#endif
-
+
/**
* Add <tt>matrix</tt> scaled by <tt>factor</tt> to this matrix, i.e. the
* matrix <tt>factor*matrix</tt> is added to <tt>this</tt>. This function
{
Assert (cols != 0, ExcNotInitialized());
Assert (val != 0, ExcNotInitialized());
-
+
bool hanging_diagonal = false;
number diagonal;
-
+
for (size_type i=0; i<cols->rows; ++i)
{
for (size_type j=cols->rowstart[i]; j<cols->rowstart[i+1]; ++j)
- {
- if (!diagonal_first && i == cols->colnums[j])
- {
- diagonal = val[j];
- hanging_diagonal = true;
- }
- else
- {
- if (hanging_diagonal && cols->colnums[j]>i)
- {
- if (across)
- out << ' ' << i << ',' << i << ':' << diagonal;
- else
- out << '(' << i << ',' << i << ") " << diagonal << std::endl;
- hanging_diagonal = false;
- }
- if (across)
- out << ' ' << i << ',' << cols->colnums[j] << ':' << val[j];
- else
- out << "(" << i << "," << cols->colnums[j] << ") " << val[j] << std::endl;
- }
- }
+ {
+ if (!diagonal_first && i == cols->colnums[j])
+ {
+ diagonal = val[j];
+ hanging_diagonal = true;
+ }
+ else
+ {
+ if (hanging_diagonal && cols->colnums[j]>i)
+ {
+ if (across)
+ out << ' ' << i << ',' << i << ':' << diagonal;
+ else
+ out << '(' << i << ',' << i << ") " << diagonal << std::endl;
+ hanging_diagonal = false;
+ }
+ if (across)
+ out << ' ' << i << ',' << cols->colnums[j] << ':' << val[j];
+ else
+ out << "(" << i << "," << cols->colnums[j] << ") " << val[j] << std::endl;
+ }
+ }
if (hanging_diagonal)
- {
- if (across)
- out << ' ' << i << ',' << i << ':' << diagonal;
- else
- out << '(' << i << ',' << i << ") " << diagonal << std::endl;
- hanging_diagonal = false;
- }
+ {
+ if (across)
+ out << ' ' << i << ',' << i << ':' << diagonal;
+ else
+ out << '(' << i << ',' << i << ") " << diagonal << std::endl;
+ hanging_diagonal = false;
+ }
}
if (across)
out << std::endl;
template <typename number>
-typename SparseMatrix<number>::size_type
+typename SparseMatrix<number>::size_type
SparseMatrix<number>::get_row_length (const size_type row) const
{
Assert (cols != 0, ExcNotInitialized());
template <typename number>
-typename SparseMatrix<number>::size_type
+typename SparseMatrix<number>::size_type
SparseMatrix<number>::n_nonzero_elements () const
{
Assert (cols != 0, ExcNotInitialized());
template <typename number>
-typename SparseMatrix<number>::size_type
+typename SparseMatrix<number>::size_type
SparseMatrix<number>::n_actually_nonzero_elements (const double threshold) const
{
Assert (cols != 0, ExcNotInitialized());
{
Assert (m() == matrix.m(), ExcDimensionMismatch(m(), matrix.m()));
Assert (n() == matrix.n(), ExcDimensionMismatch(n(), matrix.n()));
-
+
// first delete previous content
*this = 0;
// length of the row
int ncols;
int ierr
- = matrix.trilinos_matrix().ExtractGlobalRowCopy
- (row, matrix.row_length(row), ncols,
- &(value_cache[0]),
- reinterpret_cast<TrilinosWrappers::types::int_type*>(&(colnum_cache[0])));
+ = matrix.trilinos_matrix().ExtractGlobalRowCopy
+ (row, matrix.row_length(row), ncols,
+ &(value_cache[0]),
+ reinterpret_cast<TrilinosWrappers::types::int_type *>(&(colnum_cache[0])));
Assert (ierr==0, ExcTrilinosError(ierr));
// resize arrays to the size actually used
// then copy everything in one swoop
this->set(row,
- colnum_cache,
- value_cache);
+ colnum_cache,
+ value_cache);
}
return *this;
const number *const end_ptr = &val[cols->n_nonzero_elements()];
while (val_ptr != end_ptr)
- *val_ptr++ += factor * *matrix_ptr++;
+ *val_ptr++ += factor **matrix_ptr++;
}
{
const size_type col = *rows;
size_type *new_cols = const_cast<size_type *>
- (&sp_B.colnums[sp_B.rowstart[col]]);
+ (&sp_B.colnums[sp_B.rowstart[col]]);
size_type *end_new_cols = const_cast<size_type *>
- (&sp_B.colnums[sp_B.rowstart[col+1]]);
+ (&sp_B.colnums[sp_B.rowstart[col+1]]);
// if B has a diagonal, need to add that manually. this way,
// we maintain sortedness.
&B.val[new_cols-&sp_B.colnums[sp_B.rowstart[0]]];
const numberB *const end_cols = &B.val[sp_B.rowstart[col+1]];
for (; B_val_ptr != end_cols; ++B_val_ptr)
- *new_ptr++ = A_val * *B_val_ptr * (use_vector ? V(col) : 1);
+ *new_ptr++ = A_val **B_val_ptr * (use_vector ? V(col) : 1);
C.add (i, new_ptr-&new_entries[0], new_cols, &new_entries[0],
false, true);
const size_type *const end_rows =
&sp_A.colnums[sp_A.rowstart[i+1]];
// cast away constness to conform with csp.add_entries interface
- size_type *new_cols = const_cast<size_type*>
- (&sp_B.colnums[sp_B.rowstart[i]]);
- size_type *end_new_cols = const_cast<size_type*>
- (&sp_B.colnums[sp_B.rowstart[i+1]]);
+ size_type *new_cols = const_cast<size_type *>
+ (&sp_B.colnums[sp_B.rowstart[i]]);
+ size_type *end_new_cols = const_cast<size_type *>
+ (&sp_B.colnums[sp_B.rowstart[i+1]]);
if (sp_B.n_rows() == sp_B.n_cols())
++new_cols;
const numberB *B_val_ptr =
&B.val[new_cols-&sp_B.colnums[sp_B.rowstart[0]]];
for (; B_val_ptr != end_cols; ++B_val_ptr)
- *new_ptr++ = A_val * *B_val_ptr * (use_vector ? V(i) : 1);
+ *new_ptr++ = A_val **B_val_ptr * (use_vector ? V(i) : 1);
C.add (row, new_ptr-&new_entries[0], new_cols, &new_entries[0],
false, true);
// square matrix by above assertion
if (om != 1.)
for (size_type i=0; i<n; ++i, ++dst_ptr, ++src_ptr, ++rowstart_ptr)
- *dst_ptr = om * *src_ptr / val[*rowstart_ptr];
+ *dst_ptr = om **src_ptr / val[*rowstart_ptr];
else
for (size_type i=0; i<n; ++i, ++dst_ptr, ++src_ptr, ++rowstart_ptr)
*dst_ptr = *src_ptr / val[*rowstart_ptr];
template <typename number>
inline
-typename SparseMatrixEZ<number>::size_type
+typename SparseMatrixEZ<number>::size_type
SparseMatrixEZ<number>::const_iterator::Accessor::row() const
{
return a_row;
template <typename number>
inline
-typename SparseMatrixEZ<number>::size_type
+typename SparseMatrixEZ<number>::size_type
SparseMatrixEZ<number>::const_iterator::Accessor::column() const
{
return matrix->data[matrix->row_info[a_row].start+a_index].column;
{
const typename MATRIX::const_iterator end_row = M.end(row);
for (typename MATRIX::const_iterator entry = M.begin(row);
- entry != end_row; ++entry)
+ entry != end_row; ++entry)
if (entry->value() != 0)
set(row, entry->column(), entry->value());
}
{
const typename MATRIX::const_iterator end_row = M.end(row);
for (typename MATRIX::const_iterator entry = M.begin(row);
- entry != end_row; ++entry)
+ entry != end_row; ++entry)
if (entry->value() != 0)
add(row, entry->column(), factor * entry->value());
}
for (; ri != end; ++dst_ptr, ++src_ptr, ++ri)
{
Assert (ri->diagonal != RowInfo::invalid_diagonal, ExcNoDiagonal());
- *dst_ptr = om * *src_ptr / data[ri->start + ri->diagonal].value;
+ *dst_ptr = om **src_ptr / data[ri->start + ri->diagonal].value;
}
}
template <typename number>
-typename SparseMatrixEZ<number>::size_type
+typename SparseMatrixEZ<number>::size_type
SparseMatrixEZ<number>::get_row_length (const size_type row) const
{
return row_info[row].length;
template <typename number>
-typename SparseMatrixEZ<number>::size_type
+typename SparseMatrixEZ<number>::size_type
SparseMatrixEZ<number>::n_nonzero_elements() const
{
typename std::vector<RowInfo>::const_iterator row = row_info.begin();
#else
const size_type n_inverses = std::count (selected.begin(),
selected.end(),
- true);
+ true);
- const size_type n_inverses_per_thread = std::max(n_inverses / n_threads,
- static_cast<size_type> (1U));
+ const size_type n_inverses_per_thread = std::max(n_inverses / n_threads,
+ static_cast<size_type> (1U));
// set up start and end index
// for each of the
// number of DoFs coupling to
// irow (including irow itself)
for (typename SparseMatrix<number>::const_iterator p=matrix->begin(row);
- p != matrix->end(row); ++p)
+ p != matrix->end(row); ++p)
{
// find out whether this DoF
// (that couples with @p irow,
selected.end(),
true);
- const size_type n_inverses_per_block = std::max(n_inverses / n_blocks,
- static_cast<size_type> (1U));
+ const size_type n_inverses_per_block = std::max(n_inverses / n_blocks,
+ static_cast<size_type> (1U));
// precompute the splitting points
std::vector<std::pair<size_type, size_type> > intervals (n_blocks);
* the copy_from() function, if the inner iterator type points to plain
* unsigned integers.
*/
- size_type
+ size_type
get_column_index_from_iterator (const size_type i);
/**
* unsigned integers and some other value.
*/
template <typename value>
- size_type
+ size_type
get_column_index_from_iterator (const std::pair<size_type, value> &i);
/**
* <tt>std::map</tt>).
*/
template <typename value>
- size_type
+ size_type
get_column_index_from_iterator (const std::pair<const size_type, value> &i);
}
inline
- size_type
+ size_type
Accessor::row() const
{
Assert (is_valid_entry() == true, ExcInvalidIterator());
- const std::size_t * insert_point =
+ const std::size_t *insert_point =
std::upper_bound(sparsity_pattern->rowstart,
sparsity_pattern->rowstart + sparsity_pattern->rows + 1,
index_within_sparsity);
inline
- size_type
+ size_type
Accessor::column() const
{
Assert (is_valid_entry() == true, ExcInvalidIterator());
inline
- size_type
+ size_type
Accessor::index() const
{
Assert (is_valid_entry() == true, ExcInvalidIterator());
inline
-SparsityPattern::size_type
+SparsityPattern::size_type
SparsityPattern::n_rows () const
{
return rows;
inline
-SparsityPattern::size_type
+SparsityPattern::size_type
SparsityPattern::n_cols () const
{
return cols;
inline
-SparsityPattern::size_type
+SparsityPattern::size_type
SparsityPattern::column_number (const size_type row,
const unsigned int index) const
{
namespace internal
{
namespace SparsityPatternTools
- {
+ {
/**
* Declare type for container size.
*/
- typedef types::global_dof_index size_type;
-
+ typedef types::global_dof_index size_type;
+
inline
- size_type
+ size_type
get_column_index_from_iterator (const size_type i)
{
return i;
template <typename value>
inline
- size_type
+ size_type
get_column_index_from_iterator (const std::pair<size_type, value> &i)
{
return i.first;
template <typename value>
inline
- size_type
+ size_type
get_column_index_from_iterator (const std::pair<const size_type, value> &i)
{
return i.first;
* processes.
*/
explicit BlockVector (const std::vector<IndexSet> ¶llel_partitioning,
- const MPI_Comm &communicator = MPI_COMM_WORLD);
+ const MPI_Comm &communicator = MPI_COMM_WORLD);
/**
* Copy-Constructor. Set all the
/**
* Solve the linear system
- * <tt>Ax=b</tt> where <tt>A</tt>
- * is an operator. This function
- * can be used for matrix free
+ * <tt>Ax=b</tt> where <tt>A</tt>
+ * is an operator. This function
+ * can be used for matrix free
* computation. Depending on
* the information provided by
* derived classes and the
/**
* Solve the linear system
* <tt>Ax=b</tt> where <tt>A</tt>
- * is an operator. This function can
+ * is an operator. This function can
* be used for matric free. Depending on the
* information provided by derived
* classes and the object passed as a
Epetra_CombineMode mode = last_action;
if (last_action == Zero)
- {
- if ((operation==::dealii::VectorOperation::add) ||
- (operation==::dealii::VectorOperation::unknown))
- mode = Add;
- else if (operation==::dealii::VectorOperation::insert)
- mode = Insert;
- }
+ {
+ if ((operation==::dealii::VectorOperation::add) ||
+ (operation==::dealii::VectorOperation::unknown))
+ mode = Add;
+ else if (operation==::dealii::VectorOperation::insert)
+ mode = Insert;
+ }
else
- {
- Assert(
+ {
+ Assert(
((last_action == Add) && (operation!=::dealii::VectorOperation::insert))
||
((last_action == Insert) && (operation!=::dealii::VectorOperation::add)),
ExcMessage("operation and argument to compress() do not match"));
- }
+ }
// flush buffers
int ierr;
ierr = matrix->GlobalAssemble (*column_space_map, matrix->RowMap(),
- true, mode);
+ true, mode);
AssertThrow (ierr == 0, ExcTrilinosError(ierr));
if (elide_zero_values == false)
{
col_index_ptr = (TrilinosWrappers::types::int_type *)col_indices;
- col_value_ptr = const_cast<TrilinosScalar*>(values);
+ col_value_ptr = const_cast<TrilinosScalar *>(values);
n_columns = n_cols;
}
else
if (matrix->Filled() == false)
{
ierr = matrix->Epetra_CrsMatrix::InsertGlobalValues(
- static_cast<TrilinosWrappers::types::int_type>(row),
- static_cast<int>(n_columns),const_cast<double *>(col_value_ptr),
- col_index_ptr);
+ static_cast<TrilinosWrappers::types::int_type>(row),
+ static_cast<int>(n_columns),const_cast<double *>(col_value_ptr),
+ col_index_ptr);
// When inserting elements, we do not want to create exceptions in
// the case when inserting non-local data (since that's what we
if (elide_zero_values == false)
{
col_index_ptr = (TrilinosWrappers::types::int_type *)col_indices;
- col_value_ptr = const_cast<TrilinosScalar*>(values);
+ col_value_ptr = const_cast<TrilinosScalar *>(values);
n_columns = n_cols;
#ifdef DEBUG
for (size_type j=0; j<n_cols; ++j)
ExcMessage ("Column map of matrix does not fit with vector map!"));
Assert (out.vector_partitioner().SameAs(m.RangeMap()) == true,
ExcMessage ("Row map of matrix does not fit with vector map!"));
- (void)m;
- (void)in;
- (void)out;
+ (void)m;
+ (void)in;
+ (void)out;
}
}
}
{
Assert (&src != &dst, ExcSourceEqualsDestination());
Assert (matrix->Filled(), ExcMatrixNotCompressed());
- (void)src;
- (void)dst;
+ (void)src;
+ (void)dst;
internal::SparseMatrix::check_vector_map_equality(*matrix, src, dst);
const size_type dst_local_size = dst.end() - dst.begin();
AssertDimension (dst_local_size, static_cast<size_type>(matrix->RangeMap().NumMyElements()));
- (void)dst_local_size;
+ (void)dst_local_size;
const size_type src_local_size = src.end() - src.begin();
AssertDimension (src_local_size, static_cast<size_type>(matrix->DomainMap().NumMyElements()));
- (void)src_local_size;
+ (void)src_local_size;
Epetra_MultiVector tril_dst (View, matrix->RangeMap(), dst.begin(),
matrix->DomainMap().NumMyPoints(), 1);
}
-
+
template <typename VectorType>
inline
void
// from it which we can by evaluating an expression such as when
// multiplying the value produced by 2
Assert (sizeof(TrilinosWrappers::types::int_type) ==
- sizeof((*begin)*2),
- ExcNotImplemented());
+ sizeof((*begin)*2),
+ ExcNotImplemented());
TrilinosWrappers::types::int_type *col_index_ptr =
(TrilinosWrappers::types::int_type *)(&*begin);
namespace
{
#ifndef DEAL_II_USE_LARGE_INDEX_TYPE
- // define a helper function that queries the global ID of local ID of
- // an Epetra_BlockMap object by calling either the 32- or 64-bit
+ // define a helper function that queries the global ID of local ID of
+ // an Epetra_BlockMap object by calling either the 32- or 64-bit
// function necessary.
int gid(const Epetra_BlockMap &map, int i)
{
return map.GID(i);
}
#else
- // define a helper function that queries the global ID of local ID of
- // an Epetra_BlockMap object by calling either the 32- or 64-bit
+ // define a helper function that queries the global ID of local ID of
+ // an Epetra_BlockMap object by calling either the 32- or 64-bit
// function necessary.
long long int gid(const Epetra_BlockMap &map, int i)
{
* values ghost.
*/
void reinit (const IndexSet &local,
- const IndexSet &ghost,
- const MPI_Comm &communicator = MPI_COMM_WORLD);
+ const IndexSet &ghost,
+ const MPI_Comm &communicator = MPI_COMM_WORLD);
/**
* Set all components of the
* details.
*/
Vector (const Epetra_Map ¶llel_partitioning,
- const VectorBase &v);
+ const VectorBase &v);
/**
* Reinitialize from a deal.II
*/
template <typename Number>
Vector (const Epetra_Map ¶llel_partitioning,
- const dealii::Vector<Number> &v);
+ const dealii::Vector<Number> &v);
//@}
/**
* @name Initialization with an IndexSet
* vector.
*/
explicit Vector (const IndexSet ¶llel_partitioning,
- const MPI_Comm &communicator = MPI_COMM_WORLD);
+ const MPI_Comm &communicator = MPI_COMM_WORLD);
/**
* Creates a ghosted parallel vector.
* partitioning details.
*/
Vector (const IndexSet ¶llel_partitioning,
- const VectorBase &v,
- const MPI_Comm &communicator = MPI_COMM_WORLD);
+ const VectorBase &v,
+ const MPI_Comm &communicator = MPI_COMM_WORLD);
/**
* Copy-constructor from deal.II
*/
template <typename Number>
Vector (const IndexSet ¶llel_partitioning,
- const dealii::Vector<Number> &v,
- const MPI_Comm &communicator = MPI_COMM_WORLD);
+ const dealii::Vector<Number> &v,
+ const MPI_Comm &communicator = MPI_COMM_WORLD);
/**
* Reinit functionality. This function
* be generated internally.
*/
explicit Vector (const IndexSet &partitioning,
- const MPI_Comm &communicator = MPI_COMM_WORLD);
+ const MPI_Comm &communicator = MPI_COMM_WORLD);
/**
* This constructor takes a
/**
* Make the Vector class a bit like the <tt>vector<></tt> class of
* the C++ standard library by returning iterators to the start and end
- * of the locally owned elements of this vector. The ordering of local elements corresponds to the one given
+ * of the locally owned elements of this vector. The ordering of local elements corresponds to the one given
*
* It holds that end() - begin() == local_size().
*/
{
const size_type n_indices = vector->Map().NumMyElements();
#ifndef DEAL_II_USE_LARGE_INDEX_TYPE
- unsigned int * vector_indices = (unsigned int*)vector->Map().MyGlobalElements();
+ unsigned int *vector_indices = (unsigned int *)vector->Map().MyGlobalElements();
#else
- size_type * vector_indices = (size_type*)vector->Map().MyGlobalElements64();
+ size_type *vector_indices = (size_type *)vector->Map().MyGlobalElements64();
#endif
is.add_indices(vector_indices, vector_indices+n_indices);
is.compress();
*/
struct DoFInfo
{
-
+
/**
* size_type of the dof_indicies object.
*/
-
+
typedef std::vector<types::global_dof_index>::size_type size_dof;
-
+
/**
* size_type of the constraint_indicators object.
*/
-
+
typedef std::vector<std::pair<unsigned short, unsigned short> >::size_type size_constraint;
/**
* Clears all data fields in this class.
*/
void clear ();
-
+
/**
* Returns a pointer to the first index in the DoF row @p row.
* DoFHandlers.
*/
void compute_renumber_hp_serial (SizeInfo &size_info,
- std::vector<types::global_dof_index> &renumbering,
+ std::vector<types::global_dof_index> &renumbering,
std::vector<unsigned int> &irregular_cells);
/**
#ifndef DOXYGEN
inline
- const types::global_dof_index *
+ const types::global_dof_index *
DoFInfo::begin_indices (const unsigned int row) const
{
AssertIndexRange (row, row_starts.size()-1);
inline
- const types::global_dof_index *
+ const types::global_dof_index *
DoFInfo::end_indices (const unsigned int row) const
{
AssertIndexRange (row, row_starts.size()-1);
template <typename Number>
unsigned short
ConstraintValues<Number>::
- insert_entries (const std::vector<std::pair<types::global_dof_index,double> > &entries)
+ insert_entries (const std::vector<std::pair<types::global_dof_index,double> > &entries)
{
next_constraint.first.resize(entries.size());
if (entries.size() > 0)
void
- DoFInfo::read_dof_indices (const std::vector<types::global_dof_index> &local_indices,
+ DoFInfo::read_dof_indices (const std::vector<types::global_dof_index> &local_indices,
const std::vector<unsigned int> &lexicographic_inv,
const ConstraintMatrix &constraints,
const unsigned int cell_number,
dofs_per_cell[0] : dofs_per_cell[cell_active_fe_index[cell_number]];
for (unsigned int i=0; i<dofs_this_cell; i++)
{
- types::global_dof_index current_dof =
+ types::global_dof_index current_dof =
local_indices[lexicographic_inv[i]];
const std::vector<std::pair<types::global_dof_index,double> >
*entries_ptr =
}
}
row_starts[cell_number+1] = std_cxx1x::tuple<size_dof,
- size_constraint,
- unsigned int>
+ size_constraint,
+ unsigned int>
(dof_indices.size(), constraint_indicator.size(), 0);
// now to the plain indices: in case we have constraints on this cell,
{
for (unsigned int i=0; i<dofs_this_cell; ++i)
{
- types::global_dof_index current_dof =
+ types::global_dof_index current_dof =
local_indices[lexicographic_inv[i]];
if (n_mpi_procs > 1 &&
(current_dof < first_owned ||
// sort ghost dofs and compress out duplicates
const types::global_dof_index n_owned = (vector_partitioner->local_range().second-
- vector_partitioner->local_range().first);
+ vector_partitioner->local_range().first);
const size_dof n_ghosts = ghost_dofs.size();
unsigned int n_unique_ghosts= 0;
#ifdef DEBUG
}
std::sort (ghost_origin.begin(), ghost_origin.end());
- types::global_dof_index last_contiguous_start = ghost_origin[0].first;
+ types::global_dof_index last_contiguous_start = ghost_origin[0].first;
ghost_numbering[ghost_origin[0].second] = 0;
for (size_dof i=1; i<n_ghosts; i++)
{
const unsigned int n_boundary_cells = boundary_cells.size();
for (unsigned int i=0; i<n_boundary_cells; ++i)
{
- types::global_dof_index *data_ptr = const_cast<types::global_dof_index *>
- (begin_indices(boundary_cells[i]));
+ types::global_dof_index *data_ptr = const_cast<types::global_dof_index *>
+ (begin_indices(boundary_cells[i]));
const types::global_dof_index *row_end = end_indices(boundary_cells[i]);
for ( ; data_ptr != row_end; ++data_ptr)
if (row_length_indicators(boundary_cells[i]) > 0)
{
types::global_dof_index *data_ptr = const_cast<types::global_dof_index *>
- (begin_indices_plain(boundary_cells[i]));
+ (begin_indices_plain(boundary_cells[i]));
const types::global_dof_index *row_end =
(end_indices_plain(boundary_cells[i]));
for ( ; data_ptr != row_end; ++data_ptr)
std::vector<types::global_dof_index> &renumbering)
{
std::vector<types::global_dof_index> reverse_numbering (size_info.n_active_cells,
- numbers::invalid_unsigned_int);
+ numbers::invalid_unsigned_int);
const unsigned int n_boundary_cells = boundary_cells.size();
for (unsigned int j=0; j<n_boundary_cells; ++j)
reverse_numbering[boundary_cells[j]] =
std::vector<types::global_dof_index> &renumbering)
{
std::vector<types::global_dof_index> reverse_numbering (size_info.n_active_cells,
- numbers::invalid_unsigned_int);
+ numbers::invalid_unsigned_int);
const unsigned int n_boundary_cells = boundary_cells.size();
for (unsigned int j=0; j<n_boundary_cells; ++j)
reverse_numbering[boundary_cells[j]] = j;
// sanity check 1: all indices should be smaller than the number of dofs
// locally owned plus the number of ghosts
const types::global_dof_index index_range = (vector_partitioner->local_range().second-
- vector_partitioner->local_range().first)
- + vector_partitioner->ghost_indices().n_elements();
+ vector_partitioner->local_range().first)
+ + vector_partitioner->ghost_indices().n_elements();
for (size_dof i=0; i<dof_indices.size(); ++i)
AssertIndexRange (dof_indices[i], index_range);
// sanity check 3: all non-boundary cells should have indices that only
// refer to the locally owned range
const types::global_dof_index local_size = (vector_partitioner->local_range().second-
- vector_partitioner->local_range().first);
+ vector_partitioner->local_range().first);
for (unsigned int row=0; row<size_info.boundary_cells_start; ++row)
{
const types::global_dof_index *ptr = begin_indices(row);
{
AssertDimension (row_starts.size()-1, size_info.n_active_cells);
const types::global_dof_index n_rows =
- (vector_partitioner->local_range().second-
- vector_partitioner->local_range().first)
- + vector_partitioner->ghost_indices().n_elements();
+ (vector_partitioner->local_range().second-
+ vector_partitioner->local_range().first)
+ + vector_partitioner->ghost_indices().n_elements();
const types::global_dof_index n_blocks = (do_blocking == true) ?
- task_info.n_blocks : size_info.n_active_cells;
+ task_info.n_blocks : size_info.n_active_cells;
// first determine row lengths
std::vector<unsigned int> row_lengths(n_rows);
else
{
const types::global_dof_index *it = begin_indices (block),
- * end_cell = end_indices (block);
+ * end_cell = end_indices (block);
for ( ; it != end_cell; ++it)
if (row_lengths[*it] > 0)
{
types::global_dof_index counter = 0;
std::vector<types::global_dof_index>::iterator dof_ind = dof_indices.begin(),
- end_ind = dof_indices.end();
+ end_ind = dof_indices.end();
for ( ; dof_ind != end_ind; ++dof_ind)
{
if (*dof_ind < local_size)
{
out << " Memory row starts indices: ";
size_info.print_memory_statistics
- (out, (row_starts.capacity()*sizeof(std_cxx1x::tuple<size_dof, size_constraint, unsigned int>)));
+ (out, (row_starts.capacity()*sizeof(std_cxx1x::tuple<size_dof, size_constraint, unsigned int>)));
out << " Memory dof indices: ";
size_info.print_memory_statistics
(out, MemoryConsumption::memory_consumption (dof_indices));
{
typedef typename VectorType::BlockType BaseVectorType;
- static BaseVectorType* get_vector_component (VectorType &vec,
+ static BaseVectorType *get_vector_component (VectorType &vec,
const unsigned int component)
{
AssertIndexRange (component, vec.n_blocks());
{
typedef VectorType BaseVectorType;
- static BaseVectorType* get_vector_component (VectorType &vec,
+ static BaseVectorType *get_vector_component (VectorType &vec,
const unsigned int)
{
return &vec;
// select between block vectors and non-block vectors. Note that the number
// of components is checked in the internal data
typename internal::BlockVectorSelector<VectorType,
- IsBlockVector<VectorType>::value>::BaseVectorType *src_data[n_components];
+ IsBlockVector<VectorType>::value>::BaseVectorType *src_data[n_components];
for (unsigned int d=0; d<n_components; ++d)
src_data[d] = internal::BlockVectorSelector<VectorType, IsBlockVector<VectorType>::value>::get_vector_component(const_cast<VectorType &>(src), d);
// select between block vectors and non-block vectors. Note that the number
// of components is checked in the internal data
const typename internal::BlockVectorSelector<VectorType,
- IsBlockVector<VectorType>::value>::BaseVectorType *src_data[n_components];
+ IsBlockVector<VectorType>::value>::BaseVectorType *src_data[n_components];
for (unsigned int d=0; d<n_components; ++d)
src_data[d] = internal::BlockVectorSelector<VectorType, IsBlockVector<VectorType>::value>::get_vector_component(const_cast<VectorType &>(src), d);
// select between block vectors and non-block vectors. Note that the number
// of components is checked in the internal data
typename internal::BlockVectorSelector<VectorType,
- IsBlockVector<VectorType>::value>::BaseVectorType *dst_data[n_components];
+ IsBlockVector<VectorType>::value>::BaseVectorType *dst_data[n_components];
for (unsigned int d=0; d<n_components; ++d)
dst_data[d] = internal::BlockVectorSelector<VectorType, IsBlockVector<VectorType>::value>::get_vector_component(dst, d);
// select between block vectors and non-block vectors. Note that the number
// of components is checked in the internal data
typename internal::BlockVectorSelector<VectorType,
- IsBlockVector<VectorType>::value>::BaseVectorType *dst_data[n_components];
+ IsBlockVector<VectorType>::value>::BaseVectorType *dst_data[n_components];
for (unsigned int d=0; d<n_components; ++d)
dst_data[d] = internal::BlockVectorSelector<VectorType, IsBlockVector<VectorType>::value>::get_vector_component(dst, d);
inline
void
apply_tensor_product_values (const VectorizedArray<Number> *shape_values,
- const VectorizedArray<Number> in [],
- VectorizedArray<Number> out [])
+ const VectorizedArray<Number> in [],
+ VectorizedArray<Number> out [])
{
AssertIndexRange (direction, dim);
const int mm = dof_to_quad ? (fe_degree+1) : n_q_points_1d,
- nn = dof_to_quad ? n_q_points_1d : (fe_degree+1);
+ nn = dof_to_quad ? n_q_points_1d : (fe_degree+1);
const int n_cols = nn / 2;
const int mid = mm / 2;
{
for (int col=0; col<n_cols; ++col)
{
- VectorizedArray<Number> val0, val1, in0, in1, res0, res1;
- if (dof_to_quad == true)
- {
- val0 = shape_values[col];
- val1 = shape_values[nn-1-col];
- }
- else
- {
- val0 = shape_values[col*n_q_points_1d];
- val1 = shape_values[(col+1)*n_q_points_1d-1];
- }
- if (mid > 0)
- {
- in0 = in[0];
- in1 = in[stride*(mm-1)];
- res0 = val0 * in0;
- res1 = val1 * in0;
- res0 += val1 * in1;
- res1 += val0 * in1;
- for (int ind=1; ind<mid; ++ind)
- {
- if (dof_to_quad == true)
- {
- val0 = shape_values[ind*n_q_points_1d+col];
- val1 = shape_values[ind*n_q_points_1d+nn-1-col];
- }
- else
- {
- val0 = shape_values[col*n_q_points_1d+ind];
- val1 = shape_values[(col+1)*n_q_points_1d-1-ind];
- }
- in0 = in[stride*ind];
- in1 = in[stride*(mm-1-ind)];
- res0 += val0 * in0;
- res1 += val1 * in0;
- res0 += val1 * in1;
- res1 += val0 * in1;
- }
- }
- else
- res0 = res1 = VectorizedArray<Number>();
- if (dof_to_quad == true)
- {
- if (mm % 2 == 1)
- {
- val0 = shape_values[mid*n_q_points_1d+col];
- val1 = val0 * in[stride*mid];
- res0 += val1;
- res1 += val1;
- }
- }
- else
- {
- if (mm % 2 == 1 && nn % 2 == 0)
- {
- val0 = shape_values[col*n_q_points_1d+mid];
- val1 = val0 * in[stride*mid];
- res0 += val1;
- res1 += val1;
- }
- }
- if (add == false)
- {
- out[stride*col] = res0;
- out[stride*(nn-1-col)] = res1;
- }
- else
- {
- out[stride*col] += res0;
- out[stride*(nn-1-col)] += res1;
- }
+ VectorizedArray<Number> val0, val1, in0, in1, res0, res1;
+ if (dof_to_quad == true)
+ {
+ val0 = shape_values[col];
+ val1 = shape_values[nn-1-col];
+ }
+ else
+ {
+ val0 = shape_values[col*n_q_points_1d];
+ val1 = shape_values[(col+1)*n_q_points_1d-1];
+ }
+ if (mid > 0)
+ {
+ in0 = in[0];
+ in1 = in[stride*(mm-1)];
+ res0 = val0 * in0;
+ res1 = val1 * in0;
+ res0 += val1 * in1;
+ res1 += val0 * in1;
+ for (int ind=1; ind<mid; ++ind)
+ {
+ if (dof_to_quad == true)
+ {
+ val0 = shape_values[ind*n_q_points_1d+col];
+ val1 = shape_values[ind*n_q_points_1d+nn-1-col];
+ }
+ else
+ {
+ val0 = shape_values[col*n_q_points_1d+ind];
+ val1 = shape_values[(col+1)*n_q_points_1d-1-ind];
+ }
+ in0 = in[stride*ind];
+ in1 = in[stride*(mm-1-ind)];
+ res0 += val0 * in0;
+ res1 += val1 * in0;
+ res0 += val1 * in1;
+ res1 += val0 * in1;
+ }
+ }
+ else
+ res0 = res1 = VectorizedArray<Number>();
+ if (dof_to_quad == true)
+ {
+ if (mm % 2 == 1)
+ {
+ val0 = shape_values[mid*n_q_points_1d+col];
+ val1 = val0 * in[stride*mid];
+ res0 += val1;
+ res1 += val1;
+ }
+ }
+ else
+ {
+ if (mm % 2 == 1 && nn % 2 == 0)
+ {
+ val0 = shape_values[col*n_q_points_1d+mid];
+ val1 = val0 * in[stride*mid];
+ res0 += val1;
+ res1 += val1;
+ }
+ }
+ if (add == false)
+ {
+ out[stride*col] = res0;
+ out[stride*(nn-1-col)] = res1;
+ }
+ else
+ {
+ out[stride*col] += res0;
+ out[stride*(nn-1-col)] += res1;
+ }
}
if ( dof_to_quad == true && nn%2==1 && mm%2==1 )
{
- if (add==false)
- out[stride*n_cols] = in[stride*mid];
- else
- out[stride*n_cols] += in[stride*mid];
+ if (add==false)
+ out[stride*n_cols] = in[stride*mid];
+ else
+ out[stride*n_cols] += in[stride*mid];
}
else if (dof_to_quad == true && nn%2==1)
{
- VectorizedArray<Number> res0;
- VectorizedArray<Number> val0 = shape_values[n_cols];
- if (mid > 0)
- {
- res0 = in[0] + in[stride*(mm-1)];
- res0 *= val0;
- for (int ind=1; ind<mid; ++ind)
- {
- val0 = shape_values[ind*n_q_points_1d+n_cols];
- VectorizedArray<Number> val1 = in[stride*ind] + in[stride*(mm-1-ind)];
- val1 *= val0;
- res0 += val1;
- }
- }
- else
- res0 = VectorizedArray<Number>();
- if (add == false)
- out[stride*n_cols] = res0;
- else
- out[stride*n_cols] += res0;
+ VectorizedArray<Number> res0;
+ VectorizedArray<Number> val0 = shape_values[n_cols];
+ if (mid > 0)
+ {
+ res0 = in[0] + in[stride*(mm-1)];
+ res0 *= val0;
+ for (int ind=1; ind<mid; ++ind)
+ {
+ val0 = shape_values[ind*n_q_points_1d+n_cols];
+ VectorizedArray<Number> val1 = in[stride*ind] + in[stride*(mm-1-ind)];
+ val1 *= val0;
+ res0 += val1;
+ }
+ }
+ else
+ res0 = VectorizedArray<Number>();
+ if (add == false)
+ out[stride*n_cols] = res0;
+ else
+ out[stride*n_cols] += res0;
}
else if (dof_to_quad == false && nn%2 == 1)
{
- VectorizedArray<Number> res0;
- if (mid > 0)
- {
- VectorizedArray<Number> val0 = shape_values[n_cols*n_q_points_1d];
- res0 = in[0] + in[stride*(mm-1)];
- res0 *= val0;
- for (int ind=1; ind<mid; ++ind)
- {
- val0 = shape_values[n_cols*n_q_points_1d+ind];
- VectorizedArray<Number> val1 = in[stride*ind] + in[stride*(mm-1-ind)];
- val1 *= val0;
- res0 += val1;
- }
- if (mm % 2)
- res0 += in[stride*mid];
- }
- else
- res0 = in[0];
- if (add == false)
- out[stride*n_cols] = res0;
- else
- out[stride*n_cols] += res0;
+ VectorizedArray<Number> res0;
+ if (mid > 0)
+ {
+ VectorizedArray<Number> val0 = shape_values[n_cols*n_q_points_1d];
+ res0 = in[0] + in[stride*(mm-1)];
+ res0 *= val0;
+ for (int ind=1; ind<mid; ++ind)
+ {
+ val0 = shape_values[n_cols*n_q_points_1d+ind];
+ VectorizedArray<Number> val1 = in[stride*ind] + in[stride*(mm-1-ind)];
+ val1 *= val0;
+ res0 += val1;
+ }
+ if (mm % 2)
+ res0 += in[stride*mid];
+ }
+ else
+ res0 = in[0];
+ if (add == false)
+ out[stride*n_cols] = res0;
+ else
+ out[stride*n_cols] += res0;
}
// increment: in regular case, just go to the next point in
switch (direction)
{
case 0:
- in += mm;
- out += nn;
- break;
+ in += mm;
+ out += nn;
+ break;
case 1:
case 2:
- ++in;
- ++out;
- break;
+ ++in;
+ ++out;
+ break;
default:
- Assert (false, ExcNotImplemented());
+ Assert (false, ExcNotImplemented());
}
}
if (direction == 1)
inline
void
apply_tensor_product_gradients (const VectorizedArray<Number> *shape_gradients,
- const VectorizedArray<Number> in [],
- VectorizedArray<Number> out [])
+ const VectorizedArray<Number> in [],
+ VectorizedArray<Number> out [])
{
AssertIndexRange (direction, dim);
const int mm = dof_to_quad ? (fe_degree+1) : n_q_points_1d,
- nn = dof_to_quad ? n_q_points_1d : (fe_degree+1);
+ nn = dof_to_quad ? n_q_points_1d : (fe_degree+1);
const int n_cols = nn / 2;
const int mid = mm / 2;
inline
void
apply_tensor_product_hessians (const VectorizedArray<Number> *shape_hessians,
- const VectorizedArray<Number> in [],
- VectorizedArray<Number> out [])
+ const VectorizedArray<Number> in [],
+ VectorizedArray<Number> out [])
{
AssertIndexRange (direction, dim);
const int mm = dof_to_quad ? (fe_degree+1) : n_q_points_1d,
- nn = dof_to_quad ? n_q_points_1d : (fe_degree+1);
+ nn = dof_to_quad ? n_q_points_1d : (fe_degree+1);
const int n_cols = nn / 2;
const int mid = mm / 2;
inline
void
apply_tensor_product_gradients_gl (const VectorizedArray<Number> *shape_gradients,
- const VectorizedArray<Number> in [],
- VectorizedArray<Number> out [])
+ const VectorizedArray<Number> in [],
+ VectorizedArray<Number> out [])
{
AssertIndexRange (direction, dim);
const int mm = fe_degree+1;
dynamic_cast<const FE_Poly<TensorProductPolynomials<dim>,dim,dim>*>
(&fe.base_element(0));
const FE_Poly<TensorProductPolynomials<dim,Polynomials::
- PiecewisePolynomial<double> >,dim,dim> *fe_poly_piece =
+ PiecewisePolynomial<double> >,dim,dim> *fe_poly_piece =
dynamic_cast<const FE_Poly<TensorProductPolynomials<dim,
Polynomials::PiecewisePolynomial<double> >,dim,dim>*>
(&fe.base_element(0));
const FE_Poly<TensorProductPolynomials<dim>,dim,dim> *fe_poly =
dynamic_cast<const FE_Poly<TensorProductPolynomials<dim>,dim,dim>*>(&fe);
const FE_Poly<TensorProductPolynomials<dim,Polynomials::
- PiecewisePolynomial<double> >,dim,dim> *fe_poly_piece =
+ PiecewisePolynomial<double> >,dim,dim> *fe_poly_piece =
dynamic_cast<const FE_Poly<TensorProductPolynomials<dim,
Polynomials::PiecewisePolynomial<double> >,dim,dim>*> (&fe);
Assert (fe_poly != 0 || fe_poly_piece, ExcNotImplemented());
lexicographic = fe_poly != 0 ?
- fe_poly->get_poly_space_numbering_inverse() :
- fe_poly_piece->get_poly_space_numbering_inverse();
+ fe_poly->get_poly_space_numbering_inverse() :
+ fe_poly_piece->get_poly_space_numbering_inverse();
// to evaluate 1D polynomials, evaluate along the line where y=z=0,
// assuming that shape_value(0,Point<dim>()) == 1. otherwise, need
* pointer empty, but setting
* the #aux_local_indices.
*/
- DoFInfo (const DoFHandler<dim, spacedim>& dof_handler);
+ DoFInfo (const DoFHandler<dim, spacedim> &dof_handler);
/**
* Set the current cell and
template <class DHFaceIterator>
void set_face (const DHFaceIterator &f,
const unsigned int face_no);
-
+
/**
* Switch to a new subface of the same cell. Does not change @p
* indices and does not reset data in LocalResults.
* is to be used in the loop. Defaults to <tt>true</tt>.
*/
bool use_face;
-
+
/**
* This error is thrown if one of the virtual functions cell(),
* boundary(), or face() is called without being overloaded in a
* @note This function is usually only called by the assembler.
*/
void initialize_numbers(const unsigned int n);
-
+
/**
* Initialize the vector with vector values.
*
* @note This function is usually only called by the assembler.
*/
void initialize_vectors(const unsigned int n);
-
+
/**
* Allocate @p n local matrices. Additionally, set their block row
* and column coordinates to zero. The matrices themselves are
*/
SmartPointer<const ConstraintMatrix,ResidualSimple<VECTOR> > constraints;
};
-
-
+
+
/**
* Assemble local matrices into a single global matrix. If this global
* matrix has a block structure, this structure is not used, but
inline void
ResidualSimple<MATRIX>::initialize_local_blocks(const BlockIndices &)
{}
-
-
+
+
template <class VECTOR>
template <class DOFINFO>
inline void
}
else
{
- if (info.indices_by_block.size() == 0)
- constraints->distribute_local_to_global(info.vector(k).block(0), info.indices, (*residuals(k)));
- else
- for (unsigned int i=0;i != info.vector(k).n_blocks();++i)
- constraints->distribute_local_to_global(info.vector(k).block(i), info.indices_by_block[i], (*residuals(k)));
- }
- }
+ if (info.indices_by_block.size() == 0)
+ constraints->distribute_local_to_global(info.vector(k).block(0), info.indices, (*residuals(k)));
+ else
+ for (unsigned int i=0; i != info.vector(k).n_blocks(); ++i)
+ constraints->distribute_local_to_global(info.vector(k).block(i), info.indices_by_block[i], (*residuals(k)));
+ }
+ }
}
template <class VECTOR>
}
else
{
- if (info1.indices_by_block.size() == 0 && info2.indices_by_block.size() == 0)
- {
- constraints->distribute_local_to_global
- (info1.vector(k).block(0), info1.indices, (*residuals(k)));
- constraints->distribute_local_to_global
- (info2.vector(k).block(0), info2.indices, (*residuals(k)));
- }
- else if (info1.indices_by_block.size() != 0 && info2.indices_by_block.size() != 0)
- {
- for (unsigned int i=0;i<info1.vector(k).n_blocks();++i)
- {
- constraints->distribute_local_to_global
- (info1.vector(k).block(i), info1.indices_by_block[i], (*residuals(k)));
- constraints->distribute_local_to_global
- (info2.vector(k).block(i), info2.indices_by_block[i], (*residuals(k)));
- }
- }
- else
- {
- Assert(false, ExcNotImplemented());
- }
+ if (info1.indices_by_block.size() == 0 && info2.indices_by_block.size() == 0)
+ {
+ constraints->distribute_local_to_global
+ (info1.vector(k).block(0), info1.indices, (*residuals(k)));
+ constraints->distribute_local_to_global
+ (info2.vector(k).block(0), info2.indices, (*residuals(k)));
+ }
+ else if (info1.indices_by_block.size() != 0 && info2.indices_by_block.size() != 0)
+ {
+ for (unsigned int i=0; i<info1.vector(k).n_blocks(); ++i)
+ {
+ constraints->distribute_local_to_global
+ (info1.vector(k).block(i), info1.indices_by_block[i], (*residuals(k)));
+ constraints->distribute_local_to_global
+ (info2.vector(k).block(i), info2.indices_by_block[i], (*residuals(k)));
+ }
+ }
+ else
+ {
+ Assert(false, ExcNotImplemented());
+ }
}
}
}
info2.indices_by_block[row], info1.indices_by_block[column]);
}
else
- {
- Assert(false, ExcNotImplemented());
- }
+ {
+ Assert(false, ExcNotImplemented());
+ }
}
for (unsigned int j=0; j<i1.size(); ++j)
for (unsigned int k=0; k<i2.size(); ++k)
if (std::fabs(M(j,k)) >= threshold)
- // Enter values into matrix only if j corresponds to a
- // degree of freedom on the refinemenent edge, k does
- // not, and both are not on the boundary. This is part
- // the difference between the complete matrix with no
- // boundary condition at the refinement edge and and
- // the matrix assembled above by assemble().
-
- // Thus the logic is: enter the row if it is
- // constrained by hanging node constraints (actually,
- // the whole refinement edge), but not if it is
- // constrained by a boundary constraint.
+ // Enter values into matrix only if j corresponds to a
+ // degree of freedom on the refinemenent edge, k does
+ // not, and both are not on the boundary. This is part
+ // the difference between the complete matrix with no
+ // boundary condition at the refinement edge and and
+ // the matrix assembled above by assemble().
+
+ // Thus the logic is: enter the row if it is
+ // constrained by hanging node constraints (actually,
+ // the whole refinement edge), but not if it is
+ // constrained by a boundary constraint.
if (mg_constrained_dofs->at_refinement_edge(level, i1[j]) &&
!mg_constrained_dofs->at_refinement_edge(level, i2[k]))
{
template<typename number, class VECTOR>
void
MGCoarseGridHouseholder<number, VECTOR>::operator() (
- const unsigned int /*level*/,
- VECTOR &dst,
- const VECTOR &src) const
+ const unsigned int /*level*/,
+ VECTOR &dst,
+ const VECTOR &src) const
{
householder.least_squares(dst, src);
}
template<typename number, class VECTOR>
void
MGCoarseGridSVD<number, VECTOR>::operator() (
- const unsigned int /*level*/,
- VECTOR &dst,
- const VECTOR &src) const
+ const unsigned int /*level*/,
+ VECTOR &dst,
+ const VECTOR &src) const
{
matrix.vmult(dst, src);
}
class MGConstrainedDoFs : public Subscriptor
{
public:
-
+
typedef std::vector<std::set<types::global_dof_index> >::size_type size_dof;
/**
* Fill the internal data
{
typedef ::dealii::SparsityPattern Sparsity;
typedef ::dealii::SparseMatrix<typename VECTOR::value_type> Matrix;
-
+
template <class CSP, class DH>
- static void reinit(Matrix& matrix, Sparsity& sparsity, int level, const CSP& csp, const DH&)
+ static void reinit(Matrix &matrix, Sparsity &sparsity, int level, const CSP &csp, const DH &)
{
sparsity.copy_from (csp);
matrix.reinit (sparsity);
{
typedef ::dealii::TrilinosWrappers::SparsityPattern Sparsity;
typedef ::dealii::TrilinosWrappers::SparseMatrix Matrix;
-
+
template <class CSP, class DH>
- static void reinit(Matrix& matrix, Sparsity& sparsity, int level, const CSP& csp, DH& dh)
+ static void reinit(Matrix &matrix, Sparsity &sparsity, int level, const CSP &csp, DH &dh)
{
matrix.reinit(dh.locally_owned_mg_dofs(level+1),
- dh.locally_owned_mg_dofs(level),
- csp, MPI_COMM_WORLD, true);
+ dh.locally_owned_mg_dofs(level),
+ csp, MPI_COMM_WORLD, true);
}
-
+
};
template <>
{
typedef ::dealii::TrilinosWrappers::SparsityPattern Sparsity;
typedef ::dealii::TrilinosWrappers::SparseMatrix Matrix;
-
+
template <class CSP, class DH>
- static void reinit(Matrix& matrix, Sparsity& sparsity, int level, const CSP& csp, DH& dh)
+ static void reinit(Matrix &matrix, Sparsity &sparsity, int level, const CSP &csp, DH &dh)
{
}
};
* Memory used by this object.
*/
std::size_t memory_consumption () const;
-
+
/**
* Print all the matrices for debugging purposes.
*/
- void print_matrices(std::ostream& os) const;
+ void print_matrices(std::ostream &os) const;
/**
* Print the copy index fields for debugging purposes.
*/
- void print_indices(std::ostream& os) const;
-
+ void print_indices(std::ostream &os) const;
+
private:
/**
/**
* Mapping for the copy_to_mg() and copy_from_mg() functions. Here only
* index pairs locally owned
- *
+ *
* The data is organized as follows: one vector per level. Each
* element of these vectors contains first the global index, then
* the level index.
*/
std::vector<std::vector<std::pair<types::global_dof_index, unsigned int> > >
copy_indices_from_me;
-
+
/**
* The vector that stores what
// First copy all indices local to this process
if (constraints==0)
- for (IT i= copy_indices[level].begin();
- i != copy_indices[level].end(); ++i)
- dst(i->first) = src[level](i->second);
+ for (IT i= copy_indices[level].begin();
+ i != copy_indices[level].end(); ++i)
+ dst(i->first) = src[level](i->second);
else
- for (IT i= copy_indices[level].begin();
- i != copy_indices[level].end(); ++i)
- constraints->distribute_local_to_global(i->first, src[level](i->second), dst);
-
+ for (IT i= copy_indices[level].begin();
+ i != copy_indices[level].end(); ++i)
+ constraints->distribute_local_to_global(i->first, src[level](i->second), dst);
+
// Do the same for the indices where the level index is local,
// but the global index is not
if (constraints==0)
- for (IT i= copy_indices_from_me[level].begin();
- i != copy_indices_from_me[level].end(); ++i)
- dst(i->first) = src[level](i->second);
+ for (IT i= copy_indices_from_me[level].begin();
+ i != copy_indices_from_me[level].end(); ++i)
+ dst(i->first) = src[level](i->second);
else
- for (IT i= copy_indices_from_me[level].begin();
- i != copy_indices_from_me[level].end(); ++i)
- constraints->distribute_local_to_global(i->first, src[level](i->second), dst);
+ for (IT i= copy_indices_from_me[level].begin();
+ i != copy_indices_from_me[level].end(); ++i)
+ constraints->distribute_local_to_global(i->first, src[level](i->second), dst);
}
}
{
typedef std::vector<std::pair<types::global_dof_index, unsigned int> >::const_iterator IT;
if (constraints==0)
- for (IT i= copy_indices[level].begin();
- i != copy_indices[level].end(); ++i)
- dst(i->first) += src[level](i->second);
+ for (IT i= copy_indices[level].begin();
+ i != copy_indices[level].end(); ++i)
+ dst(i->first) += src[level](i->second);
else
- for (IT i= copy_indices[level].begin();
- i != copy_indices[level].end(); ++i)
- constraints->distribute_local_to_global(i->first, src[level](i->second), dst);
-
+ for (IT i= copy_indices[level].begin();
+ i != copy_indices[level].end(); ++i)
+ constraints->distribute_local_to_global(i->first, src[level](i->second), dst);
+
// Do the same for the indices where the level index is local,
// but the global index is not
if (constraints==0)
- for (IT i= copy_indices_from_me[level].begin();
- i != copy_indices_from_me[level].end(); ++i)
- dst(i->first) += src[level](i->second);
+ for (IT i= copy_indices_from_me[level].begin();
+ i != copy_indices_from_me[level].end(); ++i)
+ dst(i->first) += src[level](i->second);
else
- for (IT i= copy_indices_from_me[level].begin();
- i != copy_indices_from_me[level].end(); ++i)
- constraints->distribute_local_to_global(i->first, src[level](i->second), dst);
+ for (IT i= copy_indices_from_me[level].begin();
+ i != copy_indices_from_me[level].end(); ++i)
+ constraints->distribute_local_to_global(i->first, src[level](i->second), dst);
}
}
typename DH::active_cell_iterator cell = cell_hint.get();
if (cell == dh->end())
cell = dh->begin_active();
-
+
boost::optional<Point<dim> >
qp = get_reference_coordinates (cell, p);
if (!qp)
{
const std::pair<typename DH::active_cell_iterator, Point<dim> > my_pair
= GridTools::find_active_cell_around_point (mapping, *dh, p);
- AssertThrow (my_pair.first->is_locally_owned(),
- ExcPointNotAvailableHere());
-
+ AssertThrow (my_pair.first->is_locally_owned(),
+ ExcPointNotAvailableHere());
+
cell = my_pair.first;
qp = my_pair.second;
}
{
const std::pair<typename DH::active_cell_iterator, Point<dim> > my_pair
= GridTools::find_active_cell_around_point (mapping, *dh, p);
- AssertThrow (my_pair.first->is_locally_owned(),
- ExcPointNotAvailableHere());
-
+ AssertThrow (my_pair.first->is_locally_owned(),
+ ExcPointNotAvailableHere());
+
cell = my_pair.first;
qp = my_pair.second;
}
{
const std::pair<typename DH::active_cell_iterator, Point<dim> > my_pair
= GridTools::find_active_cell_around_point (mapping, *dh, p);
- AssertThrow (my_pair.first->is_locally_owned(),
- ExcPointNotAvailableHere());
-
+ AssertThrow (my_pair.first->is_locally_owned(),
+ ExcPointNotAvailableHere());
+
cell = my_pair.first;
qp = my_pair.second;
}
const std::pair<typename DH::active_cell_iterator, Point<dim> >
my_pair = GridTools::find_active_cell_around_point
(mapping, *dh, points[0]);
- AssertThrow (my_pair.first->is_locally_owned(),
- ExcPointNotAvailableHere());
-
+ AssertThrow (my_pair.first->is_locally_owned(),
+ ExcPointNotAvailableHere());
+
cell = my_pair.first;
qp = my_pair.second;
point_flags[0] = true;
{
const std::pair<typename DH::active_cell_iterator, Point<dim> > my_pair
= GridTools::find_active_cell_around_point (mapping, *dh, points[first_outside]);
- AssertThrow (my_pair.first->is_locally_owned(),
- ExcPointNotAvailableHere());
-
+ AssertThrow (my_pair.first->is_locally_owned(),
+ ExcPointNotAvailableHere());
+
cells.push_back(my_pair.first);
qpoints.push_back(std::vector<Point<dim> >(1, my_pair.second));
maps.push_back(std::vector<unsigned int>(1, first_outside));
// interpolate the boundary values and then condense the matrix and vector
if (constraints_are_compatible)
{
- const Function<spacedim>* dummy = 0;
+ const Function<spacedim> *dummy = 0;
MatrixCreator::create_mass_matrix (mapping, dof, quadrature,
mass_matrix, function, tmp,
dummy, constraints);
subtract_mean_value(VECTOR &v,
const std::vector<bool> &p_select)
{
- if(p_select.size() == 0)
+ if (p_select.size() == 0)
{
// In case of an empty boolean mask operate on the whole vector:
v.add( - v.mean_value() );
// This function is not implemented for distributed vectors, so
// if v is not a boring Vector or BlockVector:
Assert( dynamic_cast<Vector<double> *>(& v)
- || dynamic_cast<Vector<float> *>(& v)
- || dynamic_cast<Vector<long double> *>(& v)
- || dynamic_cast<BlockVector<double> *>(& v)
- || dynamic_cast<BlockVector<float> *>(& v)
- || dynamic_cast<BlockVector<long double> *>(& v),
- ExcNotImplemented());
+ || dynamic_cast<Vector<float> *>(& v)
+ || dynamic_cast<Vector<long double> *>(& v)
+ || dynamic_cast<BlockVector<double> *>(& v)
+ || dynamic_cast<BlockVector<float> *>(& v)
+ || dynamic_cast<BlockVector<long double> *>(& v),
+ ExcNotImplemented());
const unsigned int n = v.size();
Point<dim> q1, q2, q3, q4;
Vector<double>
v1(this->n_components), v2(this->n_components),
- v3(this->n_components), v4(this->n_components);
+ v3(this->n_components), v4(this->n_components);
const double h_inv_12=1./(12*h);
for (unsigned int i=0; i<dim; ++i)
{
// Calculate all missing rate values:
for (unsigned int i = no_rate_entries; i<n; ++i)
{
- if(i == 0)
+ if (i == 0)
{
// no value available for the first row
add_value(rate_key, std::string("-"));
// Calculate all missing rate values:
for (unsigned int i = no_rate_entries; i<n; ++i)
{
- if(i == 0)
+ if (i == 0)
{
// no value available for the first row
add_value(rate_key, std::string("-"));
}
// only add rate_key to the supercolumn once
- if(no_rate_entries == 0)
+ if (no_rate_entries == 0)
{
add_column_to_supercolumn(rate_key, superkey);
}
// Calculate all missing rate values:
for (unsigned int i = no_rate_entries; i<n; ++i)
{
- if(i == 0)
+ if (i == 0)
{
// no value available for the first row
add_value(rate_key, std::string("-"));
// Calculate all missing rate values:
for (unsigned int i = no_rate_entries; i<n; ++i)
{
- if(i == 0)
+ if (i == 0)
{
// no value available for the first row
add_value(rate_key, std::string("-"));
}
// only add rate_key to the supercolumn once
- if(no_rate_entries == 0)
+ if (no_rate_entries == 0)
{
add_column_to_supercolumn(rate_key, superkey);
}
DataOutBase::SvgFlags::SvgFlags (const unsigned int height_vector,
- const int azimuth_angle,
+ const int azimuth_angle,
const int polar_angle,
const unsigned int line_thickness,
const bool margin,
const bool draw_colorbar) :
-height_vector(height_vector),
-azimuth_angle(azimuth_angle),
-polar_angle(polar_angle),
-line_thickness(line_thickness),
-margin(margin),
-draw_colorbar(draw_colorbar)
+ height_vector(height_vector),
+ azimuth_angle(azimuth_angle),
+ polar_angle(polar_angle),
+ line_thickness(line_thickness),
+ margin(margin),
+ draw_colorbar(draw_colorbar)
{}
int i, j;
for (i = 0; i < 2; ++i)
- {
- for (j = 0; j < 2-i; ++j)
{
- if (points[j][2] > points[j + 1][2])
- {
- Point<3> temp = points[j];
- points[j] = points[j+1];
- points[j+1] = temp;
- }
+ for (j = 0; j < 2-i; ++j)
+ {
+ if (points[j][2] > points[j + 1][2])
+ {
+ Point<3> temp = points[j];
+ points[j] = points[j+1];
+ points[j+1] = temp;
+ }
+ }
}
- }
// save the related three-dimensional vectors v_min, v_inter, and v_max
v_min = points[0];
bool col_change = false;
if (A[0][0] == 0)
- {
- col_change = true;
+ {
+ col_change = true;
- A[0][0] = A[0][1];
- A[0][1] = 0;
+ A[0][0] = A[0][1];
+ A[0][1] = 0;
- double temp = A[1][0];
- A[1][0] = A[1][1];
- A[1][1] = temp;
- }
+ double temp = A[1][0];
+ A[1][0] = A[1][1];
+ A[1][1] = temp;
+ }
- for (unsigned int k = 0; k < 1; k++)
- {
- for (unsigned int i = k+1; i < 2; i++)
+ for (unsigned int k = 0; k < 1; k++)
{
- x = A[i][k] / A[k][k];
+ for (unsigned int i = k+1; i < 2; i++)
+ {
+ x = A[i][k] / A[k][k];
- for (unsigned int j = k+1; j < 2; j++) A[i][j] = A[i][j] - A[k][j] * x;
+ for (unsigned int j = k+1; j < 2; j++) A[i][j] = A[i][j] - A[k][j] * x;
- b[i] = b[i] - b[k]*x;
+ b[i] = b[i] - b[k]*x;
+ }
}
- }
b[1] = b[1] / A[1][1];
- for (int i = 0; i >= 0; i--)
- {
- sum = b[i];
+ for (int i = 0; i >= 0; i--)
+ {
+ sum = b[i];
- for (unsigned int j = i+1; j < 2; j++) sum = sum - A[i][j] * b[j];
+ for (unsigned int j = i+1; j < 2; j++) sum = sum - A[i][j] * b[j];
- b[i] = sum / A[i][i];
- }
+ b[i] = sum / A[i][i];
+ }
if (col_change)
- {
- double temp = b[0];
- b[0] = b[1];
- b[1] = temp;
- }
+ {
+ double temp = b[0];
+ b[0] = b[1];
+ b[1] = temp;
+ }
double c = b[0] * (v_max[2] - v_min[2]) + b[1] * (v_inter[2] - v_min[2]) + v_min[2];
col_change = false;
if (A[0][0] == 0)
- {
- col_change = true;
+ {
+ col_change = true;
- A[0][0] = A[0][1];
- A[0][1] = 0;
+ A[0][0] = A[0][1];
+ A[0][1] = 0;
- double temp = A[1][0];
- A[1][0] = A[1][1];
- A[1][1] = temp;
- }
+ double temp = A[1][0];
+ A[1][0] = A[1][1];
+ A[1][1] = temp;
+ }
- for (unsigned int k = 0; k < 1; k++)
- {
- for (unsigned int i = k+1; i < 2; i++)
+ for (unsigned int k = 0; k < 1; k++)
{
- x = A[i][k] / A[k][k];
+ for (unsigned int i = k+1; i < 2; i++)
+ {
+ x = A[i][k] / A[k][k];
- for (unsigned int j = k+1; j < 2; j++) A[i][j] = A[i][j] - A[k][j] * x;
+ for (unsigned int j = k+1; j < 2; j++) A[i][j] = A[i][j] - A[k][j] * x;
- b[i] = b[i] - b[k] * x;
+ b[i] = b[i] - b[k] * x;
+ }
}
- }
b[1]=b[1] / A[1][1];
- for (int i = 0; i >= 0; i--)
- {
- sum = b[i];
+ for (int i = 0; i >= 0; i--)
+ {
+ sum = b[i];
- for (unsigned int j = i+1; j < 2; j++) sum = sum - A[i][j]*b[j];
+ for (unsigned int j = i+1; j < 2; j++) sum = sum - A[i][j]*b[j];
- b[i] = sum / A[i][i];
- }
+ b[i] = sum / A[i][i];
+ }
if (col_change)
- {
- double temp = b[0];
- b[0] = b[1];
- b[1] = temp;
- }
+ {
+ double temp = b[0];
+ b[0] = b[1];
+ b[1] = temp;
+ }
gradient[0] = b[0] * (v_max[2] - v_min[2]) + b[1] * (v_inter[2] - v_min[2]) - c + v_min[2];
col_change = false;
if (A[0][0] == 0)
- {
- col_change = true;
+ {
+ col_change = true;
- A[0][0] = A[0][1];
- A[0][1] = 0;
+ A[0][0] = A[0][1];
+ A[0][1] = 0;
- double temp = A[1][0];
- A[1][0] = A[1][1];
- A[1][1] = temp;
- }
+ double temp = A[1][0];
+ A[1][0] = A[1][1];
+ A[1][1] = temp;
+ }
- for (unsigned int k = 0; k < 1; k++)
- {
- for (unsigned int i = k+1; i < 2; i++)
+ for (unsigned int k = 0; k < 1; k++)
{
- x = A[i][k] / A[k][k];
+ for (unsigned int i = k+1; i < 2; i++)
+ {
+ x = A[i][k] / A[k][k];
- for (unsigned int j = k+1; j < 2; j++) A[i][j] = A[i][j] - A[k][j] * x;
+ for (unsigned int j = k+1; j < 2; j++) A[i][j] = A[i][j] - A[k][j] * x;
- b[i] = b[i] - b[k] * x;
+ b[i] = b[i] - b[k] * x;
+ }
}
- }
b[1] = b[1] / A[1][1];
- for (int i = 0; i >= 0; i--)
- {
- sum = b[i];
+ for (int i = 0; i >= 0; i--)
+ {
+ sum = b[i];
- for (unsigned int j = i+1; j < 2; j++) sum = sum - A[i][j] * b[j];
+ for (unsigned int j = i+1; j < 2; j++) sum = sum - A[i][j] * b[j];
- b[i] = sum / A[i][i];
- }
+ b[i] = sum / A[i][i];
+ }
if (col_change)
- {
- double temp = b[0];
- b[0] = b[1];
- b[1] = temp;
- }
+ {
+ double temp = b[0];
+ b[0] = b[1];
+ b[1] = temp;
+ }
gradient[1] = b[0] * (v_max[2] - v_min[2]) + b[1] * (v_inter[2] - v_min[2]) - c + v_min[2];
gradient_parameters[0] = v_min[0];
gradient_parameters[1] = v_min[1];
-
+
gradient_parameters[2] = v_min[0] + lambda * gradient[0];
gradient_parameters[3] = v_min[1] + lambda * gradient[1];
if (n_metadata > 0)
out << "</FieldData>\n";
-}
+ }
VtuStream vtu_out(out, flags);
// margin around the plotted area
unsigned int margin_in_percent = 0;
- if(flags.margin) margin_in_percent = 5;
+ if (flags.margin) margin_in_percent = 5;
// determine the bounding box in the model space
Point<2> projection_decomposition;
Point<2> projection_decompositions[4];
- compute_node(projected_point, &*patch, 0, 0, 0, n_subdivisions);
-
+ compute_node(projected_point, &*patch, 0, 0, 0, n_subdivisions);
+
Assert ((flags.height_vector < patch->data.n_rows()) ||
patch->data.n_rows() == 0,
ExcIndexRange (flags.height_vector, 0, patch->data.n_rows()));
// iterate over the patches
for (; patch != patches.end(); ++patch)
- {
- n_subdivisions = patch->n_subdivisions;
- n = n_subdivisions + 1;
+ {
+ n_subdivisions = patch->n_subdivisions;
+ n = n_subdivisions + 1;
- for (unsigned int i2 = 0; i2 < n_subdivisions; ++i2)
- {
- for (unsigned int i1 = 0; i1 < n_subdivisions; ++i1)
- {
- compute_node(projected_points[0], &*patch, i1, i2, 0, n_subdivisions);
- compute_node(projected_points[1], &*patch, i1+1, i2, 0, n_subdivisions);
- compute_node(projected_points[2], &*patch, i1, i2+1, 0, n_subdivisions);
- compute_node(projected_points[3], &*patch, i1+1, i2+1, 0, n_subdivisions);
-
- x_min = std::min(x_min, (double)projected_points[0][0]);
- x_min = std::min(x_min, (double)projected_points[1][0]);
- x_min = std::min(x_min, (double)projected_points[2][0]);
- x_min = std::min(x_min, (double)projected_points[3][0]);
-
- x_max = std::max(x_max, (double)projected_points[0][0]);
- x_max = std::max(x_max, (double)projected_points[1][0]);
- x_max = std::max(x_max, (double)projected_points[2][0]);
- x_max = std::max(x_max, (double)projected_points[3][0]);
-
- y_min = std::min(y_min, (double)projected_points[0][1]);
- y_min = std::min(y_min, (double)projected_points[1][1]);
- y_min = std::min(y_min, (double)projected_points[2][1]);
- y_min = std::min(y_min, (double)projected_points[3][1]);
-
- y_max = std::max(y_max, (double)projected_points[0][1]);
- y_max = std::max(y_max, (double)projected_points[1][1]);
- y_max = std::max(y_max, (double)projected_points[2][1]);
- y_max = std::max(y_max, (double)projected_points[3][1]);
-
- Assert ((flags.height_vector < patch->data.n_rows()) ||
- patch->data.n_rows() == 0,
- ExcIndexRange (flags.height_vector, 0, patch->data.n_rows()));
-
- z_min = std::min(z_min, (double)patch->data(flags.height_vector, i1*d1 + i2*d2));
- z_min = std::min(z_min, (double)patch->data(flags.height_vector, (i1+1)*d1 + i2*d2));
- z_min = std::min(z_min, (double)patch->data(flags.height_vector, i1*d1 + (i2+1)*d2));
- z_min = std::min(z_min, (double)patch->data(flags.height_vector, (i1+1)*d1 + (i2+1)*d2));
-
- z_max = std::max(z_max, (double)patch->data(flags.height_vector, i1*d1 + i2*d2));
- z_max = std::max(z_max, (double)patch->data(flags.height_vector, (i1+1)*d1 + i2*d2));
- z_max = std::max(z_max, (double)patch->data(flags.height_vector, i1*d1 + (i2+1)*d2));
- z_max = std::max(z_max, (double)patch->data(flags.height_vector, (i1+1)*d1 + (i2+1)*d2));
- }
+ for (unsigned int i2 = 0; i2 < n_subdivisions; ++i2)
+ {
+ for (unsigned int i1 = 0; i1 < n_subdivisions; ++i1)
+ {
+ compute_node(projected_points[0], &*patch, i1, i2, 0, n_subdivisions);
+ compute_node(projected_points[1], &*patch, i1+1, i2, 0, n_subdivisions);
+ compute_node(projected_points[2], &*patch, i1, i2+1, 0, n_subdivisions);
+ compute_node(projected_points[3], &*patch, i1+1, i2+1, 0, n_subdivisions);
+
+ x_min = std::min(x_min, (double)projected_points[0][0]);
+ x_min = std::min(x_min, (double)projected_points[1][0]);
+ x_min = std::min(x_min, (double)projected_points[2][0]);
+ x_min = std::min(x_min, (double)projected_points[3][0]);
+
+ x_max = std::max(x_max, (double)projected_points[0][0]);
+ x_max = std::max(x_max, (double)projected_points[1][0]);
+ x_max = std::max(x_max, (double)projected_points[2][0]);
+ x_max = std::max(x_max, (double)projected_points[3][0]);
+
+ y_min = std::min(y_min, (double)projected_points[0][1]);
+ y_min = std::min(y_min, (double)projected_points[1][1]);
+ y_min = std::min(y_min, (double)projected_points[2][1]);
+ y_min = std::min(y_min, (double)projected_points[3][1]);
+
+ y_max = std::max(y_max, (double)projected_points[0][1]);
+ y_max = std::max(y_max, (double)projected_points[1][1]);
+ y_max = std::max(y_max, (double)projected_points[2][1]);
+ y_max = std::max(y_max, (double)projected_points[3][1]);
+
+ Assert ((flags.height_vector < patch->data.n_rows()) ||
+ patch->data.n_rows() == 0,
+ ExcIndexRange (flags.height_vector, 0, patch->data.n_rows()));
+
+ z_min = std::min(z_min, (double)patch->data(flags.height_vector, i1*d1 + i2*d2));
+ z_min = std::min(z_min, (double)patch->data(flags.height_vector, (i1+1)*d1 + i2*d2));
+ z_min = std::min(z_min, (double)patch->data(flags.height_vector, i1*d1 + (i2+1)*d2));
+ z_min = std::min(z_min, (double)patch->data(flags.height_vector, (i1+1)*d1 + (i2+1)*d2));
+
+ z_max = std::max(z_max, (double)patch->data(flags.height_vector, i1*d1 + i2*d2));
+ z_max = std::max(z_max, (double)patch->data(flags.height_vector, (i1+1)*d1 + i2*d2));
+ z_max = std::max(z_max, (double)patch->data(flags.height_vector, i1*d1 + (i2+1)*d2));
+ z_max = std::max(z_max, (double)patch->data(flags.height_vector, (i1+1)*d1 + (i2+1)*d2));
+ }
+ }
}
- }
x_dimension = x_max - x_min;
y_dimension = y_max - y_min;
camera_position[1] -= (z_min + 2. * z_dimension) * sin(angle_factor * flags.polar_angle) * cos(angle_factor * flags.azimuth_angle);
-// determine the bounding box on the projection plane
+// determine the bounding box on the projection plane
double x_min_perspective, y_min_perspective;
double x_max_perspective, y_max_perspective;
double x_dimension_perspective, y_dimension_perspective;
Point<3> point(true);
- compute_node(projected_point, &*patch, 0, 0, 0, n_subdivisions);
-
+ compute_node(projected_point, &*patch, 0, 0, 0, n_subdivisions);
+
Assert ((flags.height_vector < patch->data.n_rows()) ||
patch->data.n_rows() == 0,
ExcIndexRange (flags.height_vector, 0, patch->data.n_rows()));
// iterate over the patches
for (; patch != patches.end(); ++patch)
- {
- n_subdivisions = patch->n_subdivisions;
- n = n_subdivisions + 1;
+ {
+ n_subdivisions = patch->n_subdivisions;
+ n = n_subdivisions + 1;
- for (unsigned int i2 = 0; i2 < n_subdivisions; ++i2)
- {
- for (unsigned int i1 = 0; i1 < n_subdivisions; ++i1)
- {
- Point<spacedim> projected_vertices[4];
- Point<3> vertices[4];
-
- compute_node(projected_vertices[0], &*patch, i1, i2, 0, n_subdivisions);
- compute_node(projected_vertices[1], &*patch, i1+1, i2, 0, n_subdivisions);
- compute_node(projected_vertices[2], &*patch, i1, i2+1, 0, n_subdivisions);
- compute_node(projected_vertices[3], &*patch, i1+1, i2+1, 0, n_subdivisions);
-
- Assert ((flags.height_vector < patch->data.n_rows()) ||
- patch->data.n_rows() == 0,
- ExcIndexRange (flags.height_vector, 0, patch->data.n_rows()));
-
- vertices[0][0] = projected_vertices[0][0];
- vertices[0][1] = projected_vertices[0][1];
- vertices[0][2] = patch->data.n_rows() != 0 ? patch->data(0,i1*d1 + i2*d2) : 0;
-
- vertices[1][0] = projected_vertices[1][0];
- vertices[1][1] = projected_vertices[1][1];
- vertices[1][2] = patch->data.n_rows() != 0 ? patch->data(0,(i1+1)*d1 + i2*d2) : 0;
-
- vertices[2][0] = projected_vertices[2][0];
- vertices[2][1] = projected_vertices[2][1];
- vertices[2][2] = patch->data.n_rows() != 0 ? patch->data(0,i1*d1 + (i2+1)*d2) : 0;
-
- vertices[3][0] = projected_vertices[3][0];
- vertices[3][1] = projected_vertices[3][1];
- vertices[3][2] = patch->data.n_rows() != 0 ? patch->data(0,(i1+1)*d1 + (i2+1)*d2) : 0;
-
- projection_decompositions[0] = svg_project_point(vertices[0], camera_position, camera_direction, camera_horizontal, camera_focus);
- projection_decompositions[1] = svg_project_point(vertices[1], camera_position, camera_direction, camera_horizontal, camera_focus);
- projection_decompositions[2] = svg_project_point(vertices[2], camera_position, camera_direction, camera_horizontal, camera_focus);
- projection_decompositions[3] = svg_project_point(vertices[3], camera_position, camera_direction, camera_horizontal, camera_focus);
-
- x_min_perspective = std::min(x_min_perspective, (double)projection_decompositions[0][0]);
- x_min_perspective = std::min(x_min_perspective, (double)projection_decompositions[1][0]);
- x_min_perspective = std::min(x_min_perspective, (double)projection_decompositions[2][0]);
- x_min_perspective = std::min(x_min_perspective, (double)projection_decompositions[3][0]);
-
- x_max_perspective = std::max(x_max_perspective, (double)projection_decompositions[0][0]);
- x_max_perspective = std::max(x_max_perspective, (double)projection_decompositions[1][0]);
- x_max_perspective = std::max(x_max_perspective, (double)projection_decompositions[2][0]);
- x_max_perspective = std::max(x_max_perspective, (double)projection_decompositions[3][0]);
-
- y_min_perspective = std::min(y_min_perspective, (double)projection_decompositions[0][1]);
- y_min_perspective = std::min(y_min_perspective, (double)projection_decompositions[1][1]);
- y_min_perspective = std::min(y_min_perspective, (double)projection_decompositions[2][1]);
- y_min_perspective = std::min(y_min_perspective, (double)projection_decompositions[3][1]);
-
- y_max_perspective = std::max(y_max_perspective, (double)projection_decompositions[0][1]);
- y_max_perspective = std::max(y_max_perspective, (double)projection_decompositions[1][1]);
- y_max_perspective = std::max(y_max_perspective, (double)projection_decompositions[2][1]);
- y_max_perspective = std::max(y_max_perspective, (double)projection_decompositions[3][1]);
- }
+ for (unsigned int i2 = 0; i2 < n_subdivisions; ++i2)
+ {
+ for (unsigned int i1 = 0; i1 < n_subdivisions; ++i1)
+ {
+ Point<spacedim> projected_vertices[4];
+ Point<3> vertices[4];
+
+ compute_node(projected_vertices[0], &*patch, i1, i2, 0, n_subdivisions);
+ compute_node(projected_vertices[1], &*patch, i1+1, i2, 0, n_subdivisions);
+ compute_node(projected_vertices[2], &*patch, i1, i2+1, 0, n_subdivisions);
+ compute_node(projected_vertices[3], &*patch, i1+1, i2+1, 0, n_subdivisions);
+
+ Assert ((flags.height_vector < patch->data.n_rows()) ||
+ patch->data.n_rows() == 0,
+ ExcIndexRange (flags.height_vector, 0, patch->data.n_rows()));
+
+ vertices[0][0] = projected_vertices[0][0];
+ vertices[0][1] = projected_vertices[0][1];
+ vertices[0][2] = patch->data.n_rows() != 0 ? patch->data(0,i1*d1 + i2*d2) : 0;
+
+ vertices[1][0] = projected_vertices[1][0];
+ vertices[1][1] = projected_vertices[1][1];
+ vertices[1][2] = patch->data.n_rows() != 0 ? patch->data(0,(i1+1)*d1 + i2*d2) : 0;
+
+ vertices[2][0] = projected_vertices[2][0];
+ vertices[2][1] = projected_vertices[2][1];
+ vertices[2][2] = patch->data.n_rows() != 0 ? patch->data(0,i1*d1 + (i2+1)*d2) : 0;
+
+ vertices[3][0] = projected_vertices[3][0];
+ vertices[3][1] = projected_vertices[3][1];
+ vertices[3][2] = patch->data.n_rows() != 0 ? patch->data(0,(i1+1)*d1 + (i2+1)*d2) : 0;
+
+ projection_decompositions[0] = svg_project_point(vertices[0], camera_position, camera_direction, camera_horizontal, camera_focus);
+ projection_decompositions[1] = svg_project_point(vertices[1], camera_position, camera_direction, camera_horizontal, camera_focus);
+ projection_decompositions[2] = svg_project_point(vertices[2], camera_position, camera_direction, camera_horizontal, camera_focus);
+ projection_decompositions[3] = svg_project_point(vertices[3], camera_position, camera_direction, camera_horizontal, camera_focus);
+
+ x_min_perspective = std::min(x_min_perspective, (double)projection_decompositions[0][0]);
+ x_min_perspective = std::min(x_min_perspective, (double)projection_decompositions[1][0]);
+ x_min_perspective = std::min(x_min_perspective, (double)projection_decompositions[2][0]);
+ x_min_perspective = std::min(x_min_perspective, (double)projection_decompositions[3][0]);
+
+ x_max_perspective = std::max(x_max_perspective, (double)projection_decompositions[0][0]);
+ x_max_perspective = std::max(x_max_perspective, (double)projection_decompositions[1][0]);
+ x_max_perspective = std::max(x_max_perspective, (double)projection_decompositions[2][0]);
+ x_max_perspective = std::max(x_max_perspective, (double)projection_decompositions[3][0]);
+
+ y_min_perspective = std::min(y_min_perspective, (double)projection_decompositions[0][1]);
+ y_min_perspective = std::min(y_min_perspective, (double)projection_decompositions[1][1]);
+ y_min_perspective = std::min(y_min_perspective, (double)projection_decompositions[2][1]);
+ y_min_perspective = std::min(y_min_perspective, (double)projection_decompositions[3][1]);
+
+ y_max_perspective = std::max(y_max_perspective, (double)projection_decompositions[0][1]);
+ y_max_perspective = std::max(y_max_perspective, (double)projection_decompositions[1][1]);
+ y_max_perspective = std::max(y_max_perspective, (double)projection_decompositions[2][1]);
+ y_max_perspective = std::max(y_max_perspective, (double)projection_decompositions[3][1]);
+ }
+ }
}
- }
x_dimension_perspective = x_max_perspective - x_min_perspective;
y_dimension_perspective = y_max_perspective - y_min_perspective;
// iterate over the patches
for (patch = patches.begin(); patch != patches.end(); ++patch)
- {
- n_subdivisions = patch->n_subdivisions;
- n = n_subdivisions + 1;
+ {
+ n_subdivisions = patch->n_subdivisions;
+ n = n_subdivisions + 1;
- for (unsigned int i2 = 0; i2 < n_subdivisions; ++i2)
- {
- for (unsigned int i1 = 0; i1 < n_subdivisions; ++i1)
- {
- Point<spacedim> projected_vertices[4];
- SvgCell cell;
-
- compute_node(projected_vertices[0], &*patch, i1, i2, 0, n_subdivisions);
- compute_node(projected_vertices[1], &*patch, i1+1, i2, 0, n_subdivisions);
- compute_node(projected_vertices[2], &*patch, i1, i2+1, 0, n_subdivisions);
- compute_node(projected_vertices[3], &*patch, i1+1, i2+1, 0, n_subdivisions);
-
- Assert ((flags.height_vector < patch->data.n_rows()) ||
- patch->data.n_rows() == 0,
- ExcIndexRange (flags.height_vector, 0, patch->data.n_rows()));
-
- cell.vertices[0][0] = projected_vertices[0][0];
- cell.vertices[0][1] = projected_vertices[0][1];
- cell.vertices[0][2] = patch->data.n_rows() != 0 ? patch->data(0,i1*d1 + i2*d2) : 0;
-
- cell.vertices[1][0] = projected_vertices[1][0];
- cell.vertices[1][1] = projected_vertices[1][1];
- cell.vertices[1][2] = patch->data.n_rows() != 0 ? patch->data(0,(i1+1)*d1 + i2*d2) : 0;
-
- cell.vertices[2][0] = projected_vertices[2][0];
- cell.vertices[2][1] = projected_vertices[2][1];
- cell.vertices[2][2] = patch->data.n_rows() != 0 ? patch->data(0,i1*d1 + (i2+1)*d2) : 0;
-
- cell.vertices[3][0] = projected_vertices[3][0];
- cell.vertices[3][1] = projected_vertices[3][1];
- cell.vertices[3][2] = patch->data.n_rows() != 0 ? patch->data(0,(i1+1)*d1 + (i2+1)*d2) : 0;
-
- cell.projected_vertices[0] = svg_project_point(cell.vertices[0], camera_position, camera_direction, camera_horizontal, camera_focus);
- cell.projected_vertices[1] = svg_project_point(cell.vertices[1], camera_position, camera_direction, camera_horizontal, camera_focus);
- cell.projected_vertices[2] = svg_project_point(cell.vertices[2], camera_position, camera_direction, camera_horizontal, camera_focus);
- cell.projected_vertices[3] = svg_project_point(cell.vertices[3], camera_position, camera_direction, camera_horizontal, camera_focus);
-
- cell.center = .25 * (cell.vertices[0] + cell.vertices[1] + cell.vertices[2] + cell.vertices[3]);
- cell.projected_center = svg_project_point(cell.center, camera_position, camera_direction, camera_horizontal, camera_focus);
-
- cell.depth = cell.center.distance(camera_position);
-
- cells.insert(cell);
- }
+ for (unsigned int i2 = 0; i2 < n_subdivisions; ++i2)
+ {
+ for (unsigned int i1 = 0; i1 < n_subdivisions; ++i1)
+ {
+ Point<spacedim> projected_vertices[4];
+ SvgCell cell;
+
+ compute_node(projected_vertices[0], &*patch, i1, i2, 0, n_subdivisions);
+ compute_node(projected_vertices[1], &*patch, i1+1, i2, 0, n_subdivisions);
+ compute_node(projected_vertices[2], &*patch, i1, i2+1, 0, n_subdivisions);
+ compute_node(projected_vertices[3], &*patch, i1+1, i2+1, 0, n_subdivisions);
+
+ Assert ((flags.height_vector < patch->data.n_rows()) ||
+ patch->data.n_rows() == 0,
+ ExcIndexRange (flags.height_vector, 0, patch->data.n_rows()));
+
+ cell.vertices[0][0] = projected_vertices[0][0];
+ cell.vertices[0][1] = projected_vertices[0][1];
+ cell.vertices[0][2] = patch->data.n_rows() != 0 ? patch->data(0,i1*d1 + i2*d2) : 0;
+
+ cell.vertices[1][0] = projected_vertices[1][0];
+ cell.vertices[1][1] = projected_vertices[1][1];
+ cell.vertices[1][2] = patch->data.n_rows() != 0 ? patch->data(0,(i1+1)*d1 + i2*d2) : 0;
+
+ cell.vertices[2][0] = projected_vertices[2][0];
+ cell.vertices[2][1] = projected_vertices[2][1];
+ cell.vertices[2][2] = patch->data.n_rows() != 0 ? patch->data(0,i1*d1 + (i2+1)*d2) : 0;
+
+ cell.vertices[3][0] = projected_vertices[3][0];
+ cell.vertices[3][1] = projected_vertices[3][1];
+ cell.vertices[3][2] = patch->data.n_rows() != 0 ? patch->data(0,(i1+1)*d1 + (i2+1)*d2) : 0;
+
+ cell.projected_vertices[0] = svg_project_point(cell.vertices[0], camera_position, camera_direction, camera_horizontal, camera_focus);
+ cell.projected_vertices[1] = svg_project_point(cell.vertices[1], camera_position, camera_direction, camera_horizontal, camera_focus);
+ cell.projected_vertices[2] = svg_project_point(cell.vertices[2], camera_position, camera_direction, camera_horizontal, camera_focus);
+ cell.projected_vertices[3] = svg_project_point(cell.vertices[3], camera_position, camera_direction, camera_horizontal, camera_focus);
+
+ cell.center = .25 * (cell.vertices[0] + cell.vertices[1] + cell.vertices[2] + cell.vertices[3]);
+ cell.projected_center = svg_project_point(cell.center, camera_position, camera_direction, camera_horizontal, camera_focus);
+
+ cell.depth = cell.center.distance(camera_position);
+
+ cells.insert(cell);
+ }
+ }
}
- }
// write the svg file
width = static_cast<unsigned int>(.5 + height * (x_dimension_perspective / y_dimension_perspective));
unsigned int additional_width = 0;
- if(flags.draw_colorbar) additional_width = static_cast<unsigned int>(.5 + height * .3); // additional width for colorbar
-
+ if (flags.draw_colorbar) additional_width = static_cast<unsigned int>(.5 + height * .3); // additional width for colorbar
+
// basic svg header and background rectangle
- out << "<svg width=\"" << width + additional_width << "\" height=\"" << height << "\" xmlns=\"http://www.w3.org/2000/svg\" version=\"1.1\">" << '\n'
+ out << "<svg width=\"" << width + additional_width << "\" height=\"" << height << "\" xmlns=\"http://www.w3.org/2000/svg\" version=\"1.1\">" << '\n'
<< " <rect width=\"" << width + additional_width << "\" height=\"" << height << "\" style=\"fill:white\"/>" << '\n' << '\n';
unsigned int triangle_counter = 0;
-
+
// write the cells in the correct order
for (typename std::multiset<SvgCell>::const_iterator cell = cells.begin(); cell != cells.end(); ++cell)
- {
- Point<3> points3d_triangle[3];
-
- for (unsigned int triangle_index = 0; triangle_index < 4; triangle_index++)
{
- switch (triangle_index)
- {
- case 0: points3d_triangle[0] = cell->vertices[0], points3d_triangle[1] = cell->vertices[1], points3d_triangle[2] = cell->center; break;
- case 1: points3d_triangle[0] = cell->vertices[1], points3d_triangle[1] = cell->vertices[3], points3d_triangle[2] = cell->center; break;
- case 2: points3d_triangle[0] = cell->vertices[3], points3d_triangle[1] = cell->vertices[2], points3d_triangle[2] = cell->center; break;
- case 3: points3d_triangle[0] = cell->vertices[2], points3d_triangle[1] = cell->vertices[0], points3d_triangle[2] = cell->center; break;
- default: break;
- }
+ Point<3> points3d_triangle[3];
- Point<6> gradient_param = svg_get_gradient_parameters(points3d_triangle);
+ for (unsigned int triangle_index = 0; triangle_index < 4; triangle_index++)
+ {
+ switch (triangle_index)
+ {
+ case 0:
+ points3d_triangle[0] = cell->vertices[0], points3d_triangle[1] = cell->vertices[1], points3d_triangle[2] = cell->center;
+ break;
+ case 1:
+ points3d_triangle[0] = cell->vertices[1], points3d_triangle[1] = cell->vertices[3], points3d_triangle[2] = cell->center;
+ break;
+ case 2:
+ points3d_triangle[0] = cell->vertices[3], points3d_triangle[1] = cell->vertices[2], points3d_triangle[2] = cell->center;
+ break;
+ case 3:
+ points3d_triangle[0] = cell->vertices[2], points3d_triangle[1] = cell->vertices[0], points3d_triangle[2] = cell->center;
+ break;
+ default:
+ break;
+ }
- double start_h = .667 - ((gradient_param[4] - z_min) / z_dimension) * .667;
- double stop_h = .667 - ((gradient_param[5] - z_min) / z_dimension) * .667;
+ Point<6> gradient_param = svg_get_gradient_parameters(points3d_triangle);
- unsigned int start_r = 0;
- unsigned int start_g = 0;
- unsigned int start_b = 0;
+ double start_h = .667 - ((gradient_param[4] - z_min) / z_dimension) * .667;
+ double stop_h = .667 - ((gradient_param[5] - z_min) / z_dimension) * .667;
- unsigned int stop_r = 0;
- unsigned int stop_g = 0;
- unsigned int stop_b = 0;
+ unsigned int start_r = 0;
+ unsigned int start_g = 0;
+ unsigned int start_b = 0;
- unsigned int start_i = static_cast<unsigned int>(start_h * 6.);
- unsigned int stop_i = static_cast<unsigned int>(stop_h * 6.);
+ unsigned int stop_r = 0;
+ unsigned int stop_g = 0;
+ unsigned int stop_b = 0;
- double start_f = start_h * 6. - start_i;
- double start_q = 1. - start_f;
+ unsigned int start_i = static_cast<unsigned int>(start_h * 6.);
+ unsigned int stop_i = static_cast<unsigned int>(stop_h * 6.);
- double stop_f = stop_h * 6. - stop_i;
- double stop_q = 1. - stop_f;
+ double start_f = start_h * 6. - start_i;
+ double start_q = 1. - start_f;
- switch (start_i % 6)
- {
- case 0: start_r = 255, start_g = static_cast<unsigned int>(.5 + 255. * start_f); break;
- case 1: start_r = static_cast<unsigned int>(.5 + 255. * start_q), start_g = 255; break;
- case 2: start_g = 255, start_b = static_cast<unsigned int>(.5 + 255. * start_f); break;
- case 3: start_g = static_cast<unsigned int>(.5 + 255. * start_q), start_b = 255; break;
- case 4: start_r = static_cast<unsigned int>(.5 + 255. * start_f), start_b = 255; break;
- case 5: start_r = 255, start_b = static_cast<unsigned int>(.5 + 255. * start_q); break;
- default: break;
- }
+ double stop_f = stop_h * 6. - stop_i;
+ double stop_q = 1. - stop_f;
- switch (stop_i % 6)
- {
- case 0: stop_r = 255, stop_g = static_cast<unsigned int>(.5 + 255. * stop_f); break;
- case 1: stop_r = static_cast<unsigned int>(.5 + 255. * stop_q), stop_g = 255; break;
- case 2: stop_g = 255, stop_b = static_cast<unsigned int>(.5 + 255. * stop_f); break;
- case 3: stop_g = static_cast<unsigned int>(.5 + 255. * stop_q), stop_b = 255; break;
- case 4: stop_r = static_cast<unsigned int>(.5 + 255. * stop_f), stop_b = 255; break;
- case 5: stop_r = 255, stop_b = static_cast<unsigned int>(.5 + 255. * stop_q); break;
- default: break;
- }
+ switch (start_i % 6)
+ {
+ case 0:
+ start_r = 255, start_g = static_cast<unsigned int>(.5 + 255. * start_f);
+ break;
+ case 1:
+ start_r = static_cast<unsigned int>(.5 + 255. * start_q), start_g = 255;
+ break;
+ case 2:
+ start_g = 255, start_b = static_cast<unsigned int>(.5 + 255. * start_f);
+ break;
+ case 3:
+ start_g = static_cast<unsigned int>(.5 + 255. * start_q), start_b = 255;
+ break;
+ case 4:
+ start_r = static_cast<unsigned int>(.5 + 255. * start_f), start_b = 255;
+ break;
+ case 5:
+ start_r = 255, start_b = static_cast<unsigned int>(.5 + 255. * start_q);
+ break;
+ default:
+ break;
+ }
- Point<3> gradient_start_point_3d, gradient_stop_point_3d;
-
- gradient_start_point_3d[0] = gradient_param[0];
- gradient_start_point_3d[1] = gradient_param[1];
- gradient_start_point_3d[2] = gradient_param[4];
-
- gradient_stop_point_3d[0] = gradient_param[2];
- gradient_stop_point_3d[1] = gradient_param[3];
- gradient_stop_point_3d[2] = gradient_param[5];
-
- Point<2> gradient_start_point = svg_project_point(gradient_start_point_3d, camera_position, camera_direction, camera_horizontal, camera_focus);
- Point<2> gradient_stop_point = svg_project_point(gradient_stop_point_3d, camera_position, camera_direction, camera_horizontal, camera_focus);
-
- // define linear gradient
- out << " <linearGradient id=\"" << triangle_counter << "\" gradientUnits=\"userSpaceOnUse\" "
- << "x1=\""
- << static_cast<unsigned int>(.5 + ((gradient_start_point[0] - x_min_perspective) / x_dimension_perspective) * (width - (width/100.) * 2. * margin_in_percent) + ((width/100.) * margin_in_percent))
- << "\" "
- << "y1=\""
- << static_cast<unsigned int>(.5 + height - (height/100.) * margin_in_percent - ((gradient_start_point[1] - y_min_perspective) / y_dimension_perspective) * (height - (height/100.) * 2. * margin_in_percent))
- << "\" "
- << "x2=\""
- << static_cast<unsigned int>(.5 + ((gradient_stop_point[0] - x_min_perspective) / x_dimension_perspective) * (width - (width/100.) * 2. * margin_in_percent) + ((width/100.) * margin_in_percent))
- << "\" "
- << "y2=\""
- << static_cast<unsigned int>(.5 + height - (height/100.) * margin_in_percent - ((gradient_stop_point[1] - y_min_perspective) / y_dimension_perspective) * (height - (height/100.) * 2. * margin_in_percent))
- << "\""
- << ">" << '\n'
- << " <stop offset=\"0\" style=\"stop-color:rgb(" << start_r << "," << start_g << "," << start_b << ")\"/>" << '\n'
- << " <stop offset=\"1\" style=\"stop-color:rgb(" << stop_r << "," << stop_g << "," << stop_b << ")\"/>" << '\n'
- << " </linearGradient>" << '\n';
-
- // draw current triangle
- double x1 = 0, y1 = 0, x2 = 0, y2 = 0;
- double x3 = cell->projected_center[0];
- double y3 = cell->projected_center[1];
-
- switch (triangle_index)
- {
- case 0: x1 = cell->projected_vertices[0][0], y1 = cell->projected_vertices[0][1], x2 = cell->projected_vertices[1][0], y2 = cell->projected_vertices[1][1]; break;
- case 1: x1 = cell->projected_vertices[1][0], y1 = cell->projected_vertices[1][1], x2 = cell->projected_vertices[3][0], y2 = cell->projected_vertices[3][1]; break;
- case 2: x1 = cell->projected_vertices[3][0], y1 = cell->projected_vertices[3][1], x2 = cell->projected_vertices[2][0], y2 = cell->projected_vertices[2][1]; break;
- case 3: x1 = cell->projected_vertices[2][0], y1 = cell->projected_vertices[2][1], x2 = cell->projected_vertices[0][0], y2 = cell->projected_vertices[0][1]; break;
- default: break;
- }
+ switch (stop_i % 6)
+ {
+ case 0:
+ stop_r = 255, stop_g = static_cast<unsigned int>(.5 + 255. * stop_f);
+ break;
+ case 1:
+ stop_r = static_cast<unsigned int>(.5 + 255. * stop_q), stop_g = 255;
+ break;
+ case 2:
+ stop_g = 255, stop_b = static_cast<unsigned int>(.5 + 255. * stop_f);
+ break;
+ case 3:
+ stop_g = static_cast<unsigned int>(.5 + 255. * stop_q), stop_b = 255;
+ break;
+ case 4:
+ stop_r = static_cast<unsigned int>(.5 + 255. * stop_f), stop_b = 255;
+ break;
+ case 5:
+ stop_r = 255, stop_b = static_cast<unsigned int>(.5 + 255. * stop_q);
+ break;
+ default:
+ break;
+ }
- out << " <path d=\"M "
- << static_cast<unsigned int>(.5 + ((x1 - x_min_perspective) / x_dimension_perspective) * (width - (width/100.) * 2. * margin_in_percent) + ((width/100.) * margin_in_percent))
- << ' '
- << static_cast<unsigned int>(.5 + height - (height/100.) * margin_in_percent - ((y1 - y_min_perspective) / y_dimension_perspective) * (height - (height/100.) * 2. * margin_in_percent))
- << " L "
- << static_cast<unsigned int>(.5 + ((x2 - x_min_perspective) / x_dimension_perspective) * (width - (width/100.) * 2. * margin_in_percent) + ((width/100.) * margin_in_percent))
- << ' '
- << static_cast<unsigned int>(.5 + height - (height/100.) * margin_in_percent - ((y2 - y_min_perspective) / y_dimension_perspective) * (height - (height/100.) * 2. * margin_in_percent))
- << " L "
- << static_cast<unsigned int>(.5 + ((x3 - x_min_perspective) / x_dimension_perspective) * (width - (width/100.) * 2. * margin_in_percent) + ((width/100.) * margin_in_percent))
- << ' '
- << static_cast<unsigned int>(.5 + height - (height/100.) * margin_in_percent - ((y3 - y_min_perspective) / y_dimension_perspective) * (height - (height/100.) * 2. * margin_in_percent))
- << " L "
- << static_cast<unsigned int>(.5 + ((x1 - x_min_perspective) / x_dimension_perspective) * (width - (width/100.) * 2. * margin_in_percent) + ((width/100.) * margin_in_percent))
- << ' '
- << static_cast<unsigned int>(.5 + height - (height/100.) * margin_in_percent - ((y1 - y_min_perspective) / y_dimension_perspective) * (height - (height/100.) * 2. * margin_in_percent))
- << "\" style=\"stroke:black; fill:url(#" << triangle_counter << "); stroke-width:" << flags.line_thickness << "\"/>" << '\n';
-
- triangle_counter++;
+ Point<3> gradient_start_point_3d, gradient_stop_point_3d;
+
+ gradient_start_point_3d[0] = gradient_param[0];
+ gradient_start_point_3d[1] = gradient_param[1];
+ gradient_start_point_3d[2] = gradient_param[4];
+
+ gradient_stop_point_3d[0] = gradient_param[2];
+ gradient_stop_point_3d[1] = gradient_param[3];
+ gradient_stop_point_3d[2] = gradient_param[5];
+
+ Point<2> gradient_start_point = svg_project_point(gradient_start_point_3d, camera_position, camera_direction, camera_horizontal, camera_focus);
+ Point<2> gradient_stop_point = svg_project_point(gradient_stop_point_3d, camera_position, camera_direction, camera_horizontal, camera_focus);
+
+ // define linear gradient
+ out << " <linearGradient id=\"" << triangle_counter << "\" gradientUnits=\"userSpaceOnUse\" "
+ << "x1=\""
+ << static_cast<unsigned int>(.5 + ((gradient_start_point[0] - x_min_perspective) / x_dimension_perspective) * (width - (width/100.) * 2. * margin_in_percent) + ((width/100.) * margin_in_percent))
+ << "\" "
+ << "y1=\""
+ << static_cast<unsigned int>(.5 + height - (height/100.) * margin_in_percent - ((gradient_start_point[1] - y_min_perspective) / y_dimension_perspective) * (height - (height/100.) * 2. * margin_in_percent))
+ << "\" "
+ << "x2=\""
+ << static_cast<unsigned int>(.5 + ((gradient_stop_point[0] - x_min_perspective) / x_dimension_perspective) * (width - (width/100.) * 2. * margin_in_percent) + ((width/100.) * margin_in_percent))
+ << "\" "
+ << "y2=\""
+ << static_cast<unsigned int>(.5 + height - (height/100.) * margin_in_percent - ((gradient_stop_point[1] - y_min_perspective) / y_dimension_perspective) * (height - (height/100.) * 2. * margin_in_percent))
+ << "\""
+ << ">" << '\n'
+ << " <stop offset=\"0\" style=\"stop-color:rgb(" << start_r << "," << start_g << "," << start_b << ")\"/>" << '\n'
+ << " <stop offset=\"1\" style=\"stop-color:rgb(" << stop_r << "," << stop_g << "," << stop_b << ")\"/>" << '\n'
+ << " </linearGradient>" << '\n';
+
+ // draw current triangle
+ double x1 = 0, y1 = 0, x2 = 0, y2 = 0;
+ double x3 = cell->projected_center[0];
+ double y3 = cell->projected_center[1];
+
+ switch (triangle_index)
+ {
+ case 0:
+ x1 = cell->projected_vertices[0][0], y1 = cell->projected_vertices[0][1], x2 = cell->projected_vertices[1][0], y2 = cell->projected_vertices[1][1];
+ break;
+ case 1:
+ x1 = cell->projected_vertices[1][0], y1 = cell->projected_vertices[1][1], x2 = cell->projected_vertices[3][0], y2 = cell->projected_vertices[3][1];
+ break;
+ case 2:
+ x1 = cell->projected_vertices[3][0], y1 = cell->projected_vertices[3][1], x2 = cell->projected_vertices[2][0], y2 = cell->projected_vertices[2][1];
+ break;
+ case 3:
+ x1 = cell->projected_vertices[2][0], y1 = cell->projected_vertices[2][1], x2 = cell->projected_vertices[0][0], y2 = cell->projected_vertices[0][1];
+ break;
+ default:
+ break;
+ }
+
+ out << " <path d=\"M "
+ << static_cast<unsigned int>(.5 + ((x1 - x_min_perspective) / x_dimension_perspective) * (width - (width/100.) * 2. * margin_in_percent) + ((width/100.) * margin_in_percent))
+ << ' '
+ << static_cast<unsigned int>(.5 + height - (height/100.) * margin_in_percent - ((y1 - y_min_perspective) / y_dimension_perspective) * (height - (height/100.) * 2. * margin_in_percent))
+ << " L "
+ << static_cast<unsigned int>(.5 + ((x2 - x_min_perspective) / x_dimension_perspective) * (width - (width/100.) * 2. * margin_in_percent) + ((width/100.) * margin_in_percent))
+ << ' '
+ << static_cast<unsigned int>(.5 + height - (height/100.) * margin_in_percent - ((y2 - y_min_perspective) / y_dimension_perspective) * (height - (height/100.) * 2. * margin_in_percent))
+ << " L "
+ << static_cast<unsigned int>(.5 + ((x3 - x_min_perspective) / x_dimension_perspective) * (width - (width/100.) * 2. * margin_in_percent) + ((width/100.) * margin_in_percent))
+ << ' '
+ << static_cast<unsigned int>(.5 + height - (height/100.) * margin_in_percent - ((y3 - y_min_perspective) / y_dimension_perspective) * (height - (height/100.) * 2. * margin_in_percent))
+ << " L "
+ << static_cast<unsigned int>(.5 + ((x1 - x_min_perspective) / x_dimension_perspective) * (width - (width/100.) * 2. * margin_in_percent) + ((width/100.) * margin_in_percent))
+ << ' '
+ << static_cast<unsigned int>(.5 + height - (height/100.) * margin_in_percent - ((y1 - y_min_perspective) / y_dimension_perspective) * (height - (height/100.) * 2. * margin_in_percent))
+ << "\" style=\"stroke:black; fill:url(#" << triangle_counter << "); stroke-width:" << flags.line_thickness << "\"/>" << '\n';
+
+ triangle_counter++;
+ }
}
- }
// draw the colorbar
if (flags.draw_colorbar)
- {
- out << '\n' << " <!-- colorbar -->" << '\n';
+ {
+ out << '\n' << " <!-- colorbar -->" << '\n';
- unsigned int element_height = static_cast<unsigned int>(((height/100.) * (71. - 2.*margin_in_percent)) / 4);
- unsigned int element_width = static_cast<unsigned int>(.5 + (height/100.) * 2.5);
+ unsigned int element_height = static_cast<unsigned int>(((height/100.) * (71. - 2.*margin_in_percent)) / 4);
+ unsigned int element_width = static_cast<unsigned int>(.5 + (height/100.) * 2.5);
- additional_width = 0;
- if (!flags.margin) additional_width = static_cast<unsigned int>(.5 + (height/100.) * 2.5);
+ additional_width = 0;
+ if (!flags.margin) additional_width = static_cast<unsigned int>(.5 + (height/100.) * 2.5);
- for (unsigned int index = 0; index < 4; index++)
- {
- double start_h = .667 - ((index+1) / 4.) * .667;
- double stop_h = .667 - (index / 4.) * .667;
+ for (unsigned int index = 0; index < 4; index++)
+ {
+ double start_h = .667 - ((index+1) / 4.) * .667;
+ double stop_h = .667 - (index / 4.) * .667;
- unsigned int start_r = 0;
- unsigned int start_g = 0;
- unsigned int start_b = 0;
+ unsigned int start_r = 0;
+ unsigned int start_g = 0;
+ unsigned int start_b = 0;
- unsigned int stop_r = 0;
- unsigned int stop_g = 0;
- unsigned int stop_b = 0;
+ unsigned int stop_r = 0;
+ unsigned int stop_g = 0;
+ unsigned int stop_b = 0;
- unsigned int start_i = static_cast<unsigned int>(start_h * 6.);
- unsigned int stop_i = static_cast<unsigned int>(stop_h * 6.);
+ unsigned int start_i = static_cast<unsigned int>(start_h * 6.);
+ unsigned int stop_i = static_cast<unsigned int>(stop_h * 6.);
- double start_f = start_h * 6. - start_i;
- double start_q = 1. - start_f;
+ double start_f = start_h * 6. - start_i;
+ double start_q = 1. - start_f;
- double stop_f = stop_h * 6. - stop_i;
- double stop_q = 1. - stop_f;
+ double stop_f = stop_h * 6. - stop_i;
+ double stop_q = 1. - stop_f;
- switch (start_i % 6)
- {
- case 0: start_r = 255, start_g = static_cast<unsigned int>(.5 + 255. * start_f); break;
- case 1: start_r = static_cast<unsigned int>(.5 + 255. * start_q), start_g = 255; break;
- case 2: start_g = 255, start_b = static_cast<unsigned int>(.5 + 255. * start_f); break;
- case 3: start_g = static_cast<unsigned int>(.5 + 255. * start_q), start_b = 255; break;
- case 4: start_r = static_cast<unsigned int>(.5 + 255. * start_f), start_b = 255; break;
- case 5: start_r = 255, start_b = static_cast<unsigned int>(.5 + 255. * start_q); break;
- default: break;
- }
+ switch (start_i % 6)
+ {
+ case 0:
+ start_r = 255, start_g = static_cast<unsigned int>(.5 + 255. * start_f);
+ break;
+ case 1:
+ start_r = static_cast<unsigned int>(.5 + 255. * start_q), start_g = 255;
+ break;
+ case 2:
+ start_g = 255, start_b = static_cast<unsigned int>(.5 + 255. * start_f);
+ break;
+ case 3:
+ start_g = static_cast<unsigned int>(.5 + 255. * start_q), start_b = 255;
+ break;
+ case 4:
+ start_r = static_cast<unsigned int>(.5 + 255. * start_f), start_b = 255;
+ break;
+ case 5:
+ start_r = 255, start_b = static_cast<unsigned int>(.5 + 255. * start_q);
+ break;
+ default:
+ break;
+ }
- switch (stop_i % 6)
- {
- case 0: stop_r = 255, stop_g = static_cast<unsigned int>(.5 + 255. * stop_f); break;
- case 1: stop_r = static_cast<unsigned int>(.5 + 255. * stop_q), stop_g = 255; break;
- case 2: stop_g = 255, stop_b = static_cast<unsigned int>(.5 + 255. * stop_f); break;
- case 3: stop_g = static_cast<unsigned int>(.5 + 255. * stop_q), stop_b = 255; break;
- case 4: stop_r = static_cast<unsigned int>(.5 + 255. * stop_f), stop_b = 255; break;
- case 5: stop_r = 255, stop_b = static_cast<unsigned int>(.5 + 255. * stop_q); break;
- default: break;
- }
+ switch (stop_i % 6)
+ {
+ case 0:
+ stop_r = 255, stop_g = static_cast<unsigned int>(.5 + 255. * stop_f);
+ break;
+ case 1:
+ stop_r = static_cast<unsigned int>(.5 + 255. * stop_q), stop_g = 255;
+ break;
+ case 2:
+ stop_g = 255, stop_b = static_cast<unsigned int>(.5 + 255. * stop_f);
+ break;
+ case 3:
+ stop_g = static_cast<unsigned int>(.5 + 255. * stop_q), stop_b = 255;
+ break;
+ case 4:
+ stop_r = static_cast<unsigned int>(.5 + 255. * stop_f), stop_b = 255;
+ break;
+ case 5:
+ stop_r = 255, stop_b = static_cast<unsigned int>(.5 + 255. * stop_q);
+ break;
+ default:
+ break;
+ }
- // define gradient
- out << " <linearGradient id=\"colorbar_" << index << "\" gradientUnits=\"userSpaceOnUse\" "
- << "x1=\"" << width + additional_width << "\" "
- << "y1=\"" << static_cast<unsigned int>(.5 + (height/100.) * (margin_in_percent + 29)) + (3-index) * element_height << "\" "
- << "x2=\"" << width + additional_width << "\" "
- << "y2=\"" << static_cast<unsigned int>(.5 + (height/100.) * (margin_in_percent + 29)) + (4-index) * element_height << "\""
- << ">" << '\n'
- << " <stop offset=\"0\" style=\"stop-color:rgb(" << start_r << "," << start_g << "," << start_b << ")\"/>" << '\n'
- << " <stop offset=\"1\" style=\"stop-color:rgb(" << stop_r << "," << stop_g << "," << stop_b << ")\"/>" << '\n'
- << " </linearGradient>" << '\n';
-
- // draw box corresponding to the gradient above
- out << " <rect"
- << " x=\"" << width + additional_width
- << "\" y=\"" << static_cast<unsigned int>(.5 + (height/100.) * (margin_in_percent + 29)) + (3-index) * element_height
- << "\" width=\"" << element_width
- << "\" height=\"" << element_height
- << "\" style=\"stroke:black; stroke-width:2; fill:url(#colorbar_" << index << ")\"/>" << '\n';
- }
-
- for (unsigned int index = 0; index < 5; index++)
- {
- out << " <text x=\"" << width + additional_width + static_cast<unsigned int>(1.5 * element_width)
- << "\" y=\"" << static_cast<unsigned int>(.5 + (height/100.) * (margin_in_percent + 29) + (4.-index) * element_height + 30.) << "\""
- << " style=\"text-anchor:start; font-size:80; font-family:Helvetica";
+ // define gradient
+ out << " <linearGradient id=\"colorbar_" << index << "\" gradientUnits=\"userSpaceOnUse\" "
+ << "x1=\"" << width + additional_width << "\" "
+ << "y1=\"" << static_cast<unsigned int>(.5 + (height/100.) * (margin_in_percent + 29)) + (3-index) * element_height << "\" "
+ << "x2=\"" << width + additional_width << "\" "
+ << "y2=\"" << static_cast<unsigned int>(.5 + (height/100.) * (margin_in_percent + 29)) + (4-index) * element_height << "\""
+ << ">" << '\n'
+ << " <stop offset=\"0\" style=\"stop-color:rgb(" << start_r << "," << start_g << "," << start_b << ")\"/>" << '\n'
+ << " <stop offset=\"1\" style=\"stop-color:rgb(" << stop_r << "," << stop_g << "," << stop_b << ")\"/>" << '\n'
+ << " </linearGradient>" << '\n';
+
+ // draw box corresponding to the gradient above
+ out << " <rect"
+ << " x=\"" << width + additional_width
+ << "\" y=\"" << static_cast<unsigned int>(.5 + (height/100.) * (margin_in_percent + 29)) + (3-index) * element_height
+ << "\" width=\"" << element_width
+ << "\" height=\"" << element_height
+ << "\" style=\"stroke:black; stroke-width:2; fill:url(#colorbar_" << index << ")\"/>" << '\n';
+ }
+
+ for (unsigned int index = 0; index < 5; index++)
+ {
+ out << " <text x=\"" << width + additional_width + static_cast<unsigned int>(1.5 * element_width)
+ << "\" y=\"" << static_cast<unsigned int>(.5 + (height/100.) * (margin_in_percent + 29) + (4.-index) * element_height + 30.) << "\""
+ << " style=\"text-anchor:start; font-size:80; font-family:Helvetica";
- if (index == 0 || index == 4) out << "; font-weight:bold";
+ if (index == 0 || index == 4) out << "; font-weight:bold";
- out << "\">" << (float)(((int)((z_min + index * (z_dimension / 4.))*10000))/10000.);
+ out << "\">" << (float)(((int)((z_min + index * (z_dimension / 4.))*10000))/10000.);
- if (index == 4) out << " max";
- if (index == 0) out << " min";
+ if (index == 4) out << " max";
+ if (index == 0) out << " min";
- out << "</text>" << '\n';
+ out << "</text>" << '\n';
+ }
}
- }
// finalize the svg file
out << '\n' << "</svg>";
if (at_newline)
print_line_head();
- if(p == p_flush)
+ if (p == p_flush)
at_newline = false;
- if(p == p_endl)
+ if (p == p_endl)
at_newline = true;
if (get_prefixes().size() <= std_depth)
// If this is a new locally stored stack, copy the "blessed" prefixes
// from the initial thread that created logstream.
- if(! exists)
+ if (! exists)
{
const tbb::enumerable_thread_specific<std::stack<std::string> > &impl
= prefixes.get_implementation();
MPI_InitFinalize::MPI_InitFinalize (int &argc,
- char ** &argv,
- unsigned int max_num_threads)
- :
- owns_mpi (true)
+ char ** &argv,
+ unsigned int max_num_threads)
+ :
+ owns_mpi (true)
{
do_init(argc, argv, max_num_threads);
}
void
MPI_InitFinalize::do_init(int &argc,
- char ** &argv,
- unsigned int max_num_threads)
+ char ** &argv,
+ unsigned int max_num_threads)
{
static bool constructor_has_already_run = false;
Assert (constructor_has_already_run == false,
// or just end PETSc.
PetscFinalize();
# endif
- }
-#endif
+ }
+#endif
// only MPI_Finalize if we are running with MPI and we are not using PETSc
void MultithreadInfo::set_thread_limit(const unsigned int max_threads)
{
unsigned int max_threads_env = numbers::invalid_unsigned_int;
- char* penv;
+ char *penv;
penv = getenv ("DEAL_II_NUM_THREADS");
if (penv!=NULL)
if (n_max_threads == numbers::invalid_unsigned_int)
n_max_threads = tbb::task_scheduler_init::default_num_threads();
else
- {
- static tbb::task_scheduler_init dummy (n_max_threads);
- }
+ {
+ static tbb::task_scheduler_init dummy (n_max_threads);
+ }
}
bool MultithreadInfo::is_running_single_threaded()
for (unsigned int i=0; i<s.size(); ++i)
{
static const std::string allowed_characters
- ("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789");
+ ("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789");
if ((! mangle_whole_string)
- &&
- (allowed_characters.find (s[i]) != std::string::npos))
- u.push_back (s[i]);
+ &&
+ (allowed_characters.find (s[i]) != std::string::npos))
+ u.push_back (s[i]);
else
- {
- u.push_back ('_');
- static const char hex[16]
- = { '0','1','2','3','4','5','6','7','8','9','a','b','c','d','e','f'};
- u.push_back (hex[static_cast<unsigned char>(s[i])/16]);
- u.push_back (hex[static_cast<unsigned char>(s[i])%16]);
- }
+ {
+ u.push_back ('_');
+ static const char hex[16]
+ = { '0','1','2','3','4','5','6','7','8','9','a','b','c','d','e','f'};
+ u.push_back (hex[static_cast<unsigned char>(s[i])/16]);
+ u.push_back (hex[static_cast<unsigned char>(s[i])%16]);
+ }
}
return u;
ExcIndexRange(expanded_import_indices[i], local_range_data.first,
local_range_data.second));
types::global_dof_index new_index = (expanded_import_indices[i] -
- local_range_data.first);
+ local_range_data.first);
if (new_index == last_index+1)
compressed_import_indices.back().second++;
else
}
}
else
- {
+ {
const double offset = step * interval;
if (x<offset || x>offset+step)
{
{
if (std::uncaught_exception() == false)
{
- std::string infostring;
- for (map_iterator it = counter_map.begin(); it != counter_map.end(); ++it)
- {
- if (it->second > 0)
- infostring += std::string("\n from Subscriber ")
- + std::string(it->first);
- }
-
- if (infostring == "")
- infostring = "<none>";
-
+ std::string infostring;
+ for (map_iterator it = counter_map.begin(); it != counter_map.end(); ++it)
+ {
+ if (it->second > 0)
+ infostring += std::string("\n from Subscriber ")
+ + std::string(it->first);
+ }
+
+ if (infostring == "")
+ infostring = "<none>";
+
Assert (counter == 0,
ExcInUse (counter, object_info->name(), infostring));
}
void compute_tensor_index(const unsigned int,
const unsigned int,
const unsigned int,
- unsigned int (&)[dim])
+ unsigned int ( &)[dim])
{
Assert(false, ExcNotImplemented());
}
TimerOutput::TimerOutput (std::ostream &stream,
const enum OutputFrequency output_frequency,
const enum OutputType output_type)
- :
- output_frequency (output_frequency),
- output_type (output_type),
- out_stream (stream, true),
- output_is_enabled (true)
+:
+output_frequency (output_frequency),
+ output_type (output_type),
+ out_stream (stream, true),
+ output_is_enabled (true)
#ifdef DEAL_II_WITH_MPI
- , mpi_communicator (MPI_COMM_SELF)
+ , mpi_communicator (MPI_COMM_SELF)
#endif
{}
TimerOutput::TimerOutput (ConditionalOStream &stream,
const enum OutputFrequency output_frequency,
const enum OutputType output_type)
- :
- output_frequency (output_frequency),
- output_type (output_type),
- out_stream (stream),
- output_is_enabled (true)
+:
+output_frequency (output_frequency),
+output_type (output_type),
+out_stream (stream),
+output_is_enabled (true)
#ifdef DEAL_II_WITH_MPI
- , mpi_communicator (MPI_COMM_SELF)
+, mpi_communicator (MPI_COMM_SELF)
#endif
{}
std::ostream &stream,
const enum OutputFrequency output_frequency,
const enum OutputType output_type)
- :
- output_frequency (output_frequency),
- output_type (output_type),
- out_stream (stream, true),
- output_is_enabled (true),
- mpi_communicator (mpi_communicator)
+:
+output_frequency (output_frequency),
+output_type (output_type),
+out_stream (stream, true),
+output_is_enabled (true),
+mpi_communicator (mpi_communicator)
{}
ConditionalOStream &stream,
const enum OutputFrequency output_frequency,
const enum OutputType output_type)
- :
- output_frequency (output_frequency),
- output_type (output_type),
- out_stream (stream),
- output_is_enabled (true),
- mpi_communicator (mpi_communicator)
+:
+output_frequency (output_frequency),
+output_type (output_type),
+out_stream (stream),
+output_is_enabled (true),
+mpi_communicator (mpi_communicator)
{}
#endif
leave_subsection();
if ( (output_frequency == summary || output_frequency == every_call_and_summary)
- && output_is_enabled == true)
+ && output_is_enabled == true)
print_summary();
}
void *ptr = static_cast<char *>(q->p.user_data) + offset;
typename parallel::distributed::Triangulation<dim,spacedim>::CellStatus
status = * static_cast<
- typename parallel::distributed::Triangulation<dim,spacedim>::CellStatus*
+ typename parallel::distributed::Triangulation<dim,spacedim>::CellStatus *
>(q->p.user_data);
switch (status)
{
// do not allow anisotropic refinement
#ifdef DEBUG
for (typename Triangulation<dim,spacedim>::active_cell_iterator
- cell = this->begin_active();
- cell != this->end(); ++cell)
- if (cell->is_locally_owned() && cell->refine_flag_set())
- Assert (cell->refine_flag_set() ==
- RefinementPossibilities<dim>::isotropic_refinement,
- ExcMessage ("This class does not support anisotropic refinement"));
+ cell = this->begin_active();
+ cell != this->end(); ++cell)
+ if (cell->is_locally_owned() && cell->refine_flag_set())
+ Assert (cell->refine_flag_set() ==
+ RefinementPossibilities<dim>::isotropic_refinement,
+ ExcMessage ("This class does not support anisotropic refinement"));
#endif
if (this->n_levels()==dealii::internal::p4est::functions<dim>::max_level)
{
for (typename Triangulation<dim,spacedim>::active_cell_iterator
- cell = this->begin_active(dealii::internal::p4est::functions<dim>::max_level-1);
- cell != this->end(dealii::internal::p4est::functions<dim>::max_level-1); ++cell)
+ cell = this->begin_active(dealii::internal::p4est::functions<dim>::max_level-1);
+ cell != this->end(dealii::internal::p4est::functions<dim>::max_level-1); ++cell)
{
AssertThrow(!(cell->refine_flag_set()),
- ExcMessage("Fatal Error: maximum refinement level of p4est reached."));
+ ExcMessage("Fatal Error: maximum refinement level of p4est reached."));
}
}
try
{
dealii::Triangulation<dim,spacedim>::
- copy_triangulation (old_tria);
- }
+ copy_triangulation (old_tria);
+ }
catch (const typename dealii::Triangulation<dim,spacedim>::DistortedCellList &)
{
// the underlying
"if they are not refined!"));
if (const dealii::parallel::distributed::Triangulation<dim,spacedim> *
- old_tria_x = dynamic_cast<const dealii::parallel::distributed::Triangulation<dim,spacedim> *>(&old_tria))
+ old_tria_x = dynamic_cast<const dealii::parallel::distributed::Triangulation<dim,spacedim> *>(&old_tria))
{
Assert (!old_tria_x->refinement_in_progress,
ExcMessage ("Parallel distributed triangulations can only "
max_couplings_between_dofs (const DoFHandler<1,spacedim> &dof_handler)
{
return std::min(static_cast<types::global_dof_index>(3*dof_handler.selected_fe->dofs_per_vertex +
- 2*dof_handler.selected_fe->dofs_per_line),
+ 2*dof_handler.selected_fe->dofs_per_line),
dof_handler.n_dofs());
}
const bool check_validity)
{
for (typename std::vector<typename DoFHandler<1,spacedim>::MGVertexDoFs>::iterator
- i=dof_handler.mg_vertex_dofs.begin();
+ i=dof_handler.mg_vertex_dofs.begin();
i!=dof_handler.mg_vertex_dofs.end();
- ++i)
+ ++i)
// if the present vertex lives on
// the current level
if ((i->get_coarsest_level() <= level) &&
for (std::vector<types::global_dof_index>::iterator
- i=dof_handler.mg_levels[level]->dof_object.dofs.begin();
+ i=dof_handler.mg_levels[level]->dof_object.dofs.begin();
i!=dof_handler.mg_levels[level]->dof_object.dofs.end();
- ++i)
+ ++i)
{
if (*i != DoFHandler<1>::invalid_dof_index)
{
static
void
renumber_mg_dofs (const std::vector<dealii::types::global_dof_index> &,
- const IndexSet &,
- DoFHandler<3,spacedim> &,
+ const IndexSet &,
+ DoFHandler<3,spacedim> &,
const unsigned int ,
const bool )
{
const typename dealii::internal::p4est::types<dim>::quadrant &p4est_cell,
const typename DoFHandler<dim,spacedim>::level_cell_iterator &dealii_cell,
const typename dealii::internal::p4est::types<dim>::quadrant &quadrant,
- dealii::types::global_dof_index* dofs,
+ dealii::types::global_dof_index *dofs,
unsigned int level)
{
if (internal::p4est::quadrant_is_equal<dim>(p4est_cell, quadrant))
=reinterpret_cast<typename dealii::internal::p4est::types<dim>::quadrant *>(ptr);
ptr+=cells*sizeof(typename dealii::internal::p4est::types<dim>::quadrant);
dealii::types::global_dof_index *dofs
- = reinterpret_cast<dealii::types::global_dof_index*>(ptr);
+ = reinterpret_cast<dealii::types::global_dof_index *>(ptr);
- // the dofs pointer contains for each cell the number of dofs
- // on that cell (dofs[0]) followed by the dof indices itself.
+ // the dofs pointer contains for each cell the number of dofs
+ // on that cell (dofs[0]) followed by the dof indices itself.
for (unsigned int c=0; c<cells; ++c, dofs+=1+dofs[0])
{
typename DoFHandler<dim,spacedim>::level_cell_iterator
=reinterpret_cast<typename dealii::internal::p4est::types<dim>::quadrant *>(ptr);
ptr+=cells*sizeof(typename dealii::internal::p4est::types<dim>::quadrant);
dealii::types::global_dof_index *dofs
- = reinterpret_cast<dealii::types::global_dof_index*>(ptr);
+ = reinterpret_cast<dealii::types::global_dof_index *>(ptr);
- // the dofs pointer contains for each cell the number of dofs
- // on that cell (dofs[0]) followed by the dof indices itself.
+ // the dofs pointer contains for each cell the number of dofs
+ // on that cell (dofs[0]) followed by the dof indices itself.
for (unsigned int c=0; c<cells; ++c, dofs+=1+dofs[0])
{
typename DoFHandler<dim,spacedim>::level_cell_iterator
{
//TODO: Merge with previous function
std::vector<types::global_dof_index> renumbering (dof_handler.n_dofs(),
- hp::DoFHandler<dim>::invalid_dof_index);
+ hp::DoFHandler<dim>::invalid_dof_index);
typename hp::DoFHandler<dim>::active_cell_iterator
start = dof_handler.begin_active();
ExcNotInitialized());
std::vector<types::global_dof_index> renumbering (dof_handler.n_dofs(level),
- DH::invalid_dof_index);
+ DH::invalid_dof_index);
typename DH::level_cell_iterator start =dof_handler.begin(level);
typename DH::level_cell_iterator end = dof_handler.end(level);
Utilities::MPI::n_mpi_processes (tria->get_communicator()));
MPI_Allgather ( &local_dof_count[0],
- n_buckets, DEAL_II_DOF_INDEX_MPI_TYPE,
- &all_dof_counts[0],
n_buckets, DEAL_II_DOF_INDEX_MPI_TYPE,
- tria->get_communicator());
+ &all_dof_counts[0],
+ n_buckets, DEAL_II_DOF_INDEX_MPI_TYPE,
+ tria->get_communicator());
for (unsigned int i=0; i<n_buckets; ++i)
Assert (all_dof_counts[n_buckets*tria->locally_owned_subdomain()+i]
block_wise (DoFHandler<dim,spacedim> &dof_handler)
{
std::vector<types::global_dof_index> renumbering (dof_handler.n_locally_owned_dofs(),
- DoFHandler<dim>::invalid_dof_index);
+ DoFHandler<dim>::invalid_dof_index);
typename DoFHandler<dim,spacedim>::active_cell_iterator
start = dof_handler.begin_active();
ExcNotInitialized());
std::vector<types::global_dof_index> renumbering (dof_handler.n_dofs(level),
- DoFHandler<dim>::invalid_dof_index);
+ DoFHandler<dim>::invalid_dof_index);
typename DoFHandler<dim>::level_cell_iterator
start =dof_handler.begin(level);
all_dof_counts(fe_collection.n_components() *
Utilities::MPI::n_mpi_processes (tria->get_communicator()));
- Assert (sizeof(types::global_dof_index) == sizeof(unsigned int),
- ExcNotImplemented());
+ Assert (sizeof(types::global_dof_index) == sizeof(unsigned int),
+ ExcNotImplemented());
MPI_Allgather ( &local_dof_count[0],
- n_buckets, DEAL_II_DOF_INDEX_MPI_TYPE,
- &all_dof_counts[0],
n_buckets, DEAL_II_DOF_INDEX_MPI_TYPE,
- tria->get_communicator());
+ &all_dof_counts[0],
+ n_buckets, DEAL_II_DOF_INDEX_MPI_TYPE,
+ tria->get_communicator());
for (unsigned int i=0; i<n_buckets; ++i)
Assert (all_dof_counts[n_buckets*tria->locally_owned_subdomain()+i]
hierarchical (DoFHandler<dim> &dof_handler)
{
std::vector<types::global_dof_index> renumbering (dof_handler.n_locally_owned_dofs(),
- DoFHandler<dim>::invalid_dof_index);
+ DoFHandler<dim>::invalid_dof_index);
typename DoFHandler<dim>::level_cell_iterator cell;
ExcNotInitialized());
std::vector<types::global_dof_index> renumbering(dof_handler.n_dofs(level),
- DH::invalid_dof_index);
+ DH::invalid_dof_index);
compute_sort_selected_dofs_back(renumbering, dof_handler, selected_dofs, level);
dof_handler.renumber_dofs(level, renumbering);
if (!cell->neighbor(face)->active()
||
(cell->neighbor(face)->subdomain_id() !=
- cell->subdomain_id()))
+ cell->subdomain_id()))
constraints.add_entries_local_to_global
(dofs_on_other_cell, dofs_on_this_cell,
sparsity, keep_constrained_dofs);
for (unsigned int i=0; i<dofs_per_face; ++i)
if (!constraint_matrix.is_constrained(dofs_2[i]))
if ((component_mask.n_selected_components(fe.n_components())
- == fe.n_components())
- ||
- component_mask[fe.face_system_to_component_index(i).first])
+ == fe.n_components())
+ ||
+ component_mask[fe.face_system_to_component_index(i).first])
{
constraint_matrix.add_line(dofs_2[i]);
for (unsigned int jj=0; jj<dofs_per_face; ++jj)
const unsigned int n_components = dof_handler.get_fe().n_components();
std::fill (dofs_per_component.begin(), dofs_per_component.end(),
- types::global_dof_index(0));
+ types::global_dof_index(0));
// If the empty vector was given as default argument, set up this
// vector as identity.
||
(std::accumulate (dofs_per_component.begin(),
dofs_per_component.end(),
- types::global_dof_index(0))
+ types::global_dof_index(0))
== dof_handler.n_locally_owned_dofs()),
ExcInternalError());
MPI_Allreduce ( &local_dof_count[0], &dofs_per_component[0], n_target_components,
DEAL_II_DOF_INDEX_MPI_TYPE,
- MPI_SUM, tria->get_communicator());
+ MPI_SUM, tria->get_communicator());
}
#endif
}
{
const FiniteElement<DH::dimension,DH::space_dimension> &fe = fe_collection[this_fe];
std::fill (dofs_per_block.begin(), dofs_per_block.end(),
- types::global_dof_index(0));
+ types::global_dof_index(0));
// If the empty vector was given as default argument, set up this
// vector as identity.
{
std::vector<types::global_dof_index> local_dof_count = dofs_per_block;
MPI_Allreduce ( &local_dof_count[0], &dofs_per_block[0],
- n_target_blocks,
+ n_target_blocks,
DEAL_II_DOF_INDEX_MPI_TYPE,
- MPI_SUM, tria->get_communicator());
+ MPI_SUM, tria->get_communicator());
}
#endif
#endif
if (global_parameter_representation(i) != 0)
{
const types::global_dof_index wi = parameter_dof_indices[local_dof],
- wj = weight_mapping[i];
+ wj = weight_mapping[i];
weights[wi][wj] = global_parameter_representation(i);
};
}
// global numbers of dofs
const types::global_dof_index n_coarse_dofs = coarse_grid.n_dofs(),
- n_fine_dofs = fine_grid.n_dofs();
+ n_fine_dofs = fine_grid.n_dofs();
// local numbers of dofs
const unsigned int fine_dofs_per_cell = fine_fe.dofs_per_cell;
// global numbers of dofs
const types::global_dof_index n_coarse_dofs = coarse_grid.n_dofs(),
- n_fine_dofs = fine_grid.n_dofs();
+ n_fine_dofs = fine_grid.n_dofs();
// get an array in which we store which dof on the coarse grid is a
j = weights[row].find(col);
if ((j != weights[row].end()) && (j->second != 0))
constraint_line.push_back (std::pair<types::global_dof_index,double>(representants[row],
- j->second));
+ j->second));
};
constraints.add_entries (global_dof, constraint_line);
// first construct the inverse mapping of weight_mapping
std::vector<types::global_dof_index> inverse_weight_mapping (n_global_parm_dofs,
- DoFHandler<dim,spacedim>::invalid_dof_index);
+ DoFHandler<dim,spacedim>::invalid_dof_index);
for (types::global_dof_index i=0; i<weight_mapping.size(); ++i)
{
const types::global_dof_index parameter_dof = weight_mapping[i];
// Initialize quadratures to obtain
// quadrature points later on.
const QGauss<dim - 1> reference_edge_quadrature (degree + 1);
- const unsigned int&
+ const unsigned int &
n_edge_points = reference_edge_quadrature.size ();
const unsigned int n_boundary_points
= GeometryInfo<dim>::lines_per_cell * n_edge_points;
const QGauss<1> edge_quadrature (2 * this->degree);
const std::vector<Point<1> > &edge_quadrature_points
= edge_quadrature.get_points ();
- const unsigned int&
+ const unsigned int &
n_edge_quadrature_points = edge_quadrature.size ();
const unsigned int
index = RefinementCase<dim>::isotropic_refinement - 1;
{
const QGauss<1>
reference_edge_quadrature (this->degree);
- const unsigned int&
+ const unsigned int &
n_edge_points = reference_edge_quadrature.size ();
// Let us begin with the
// the interior shape
// functions.
const QGauss<dim> reference_quadrature (this->degree);
- const unsigned int&
+ const unsigned int &
n_interior_points = reference_quadrature.size ();
// We create the
// Let us begin with the
// interpolation part.
const QGauss<dim - 1> reference_edge_quadrature (this->degree);
- const unsigned int&
+ const unsigned int &
n_edge_points = reference_edge_quadrature.size ();
for (unsigned int i = 0; i < 2; ++i)
// vertical, interior
// shape functions.
const QGauss<dim> reference_quadrature (this->degree);
- const unsigned int&
+ const unsigned int &
n_interior_points = reference_quadrature.size ();
const std::vector<Polynomials::Polynomial<double> > &
legendre_polynomials
// Let us begin with the
// interpolation part.
const QGauss<1> reference_edge_quadrature (this->degree);
- const unsigned int&
+ const unsigned int &
n_edge_points = reference_edge_quadrature.size ();
for (unsigned int q_point = 0; q_point < n_edge_points; ++q_point)
void
- get_face_sign_change_rt (const Triangulation<3>::cell_iterator & cell,
+ get_face_sign_change_rt (const Triangulation<3>::cell_iterator &cell,
const unsigned int dofs_per_face,
std::vector<double> &face_sign)
{
std::fill (face_sign.begin (), face_sign.end (), 1.0);
//TODO: think about what it would take here
}
-
+
void
get_face_sign_change_nedelec (const Triangulation<1>::cell_iterator &,
const unsigned int ,
void
- get_face_sign_change_nedelec (const Triangulation<3>::cell_iterator & cell,
+ get_face_sign_change_nedelec (const Triangulation<3>::cell_iterator &cell,
const unsigned int dofs_per_face,
std::vector<double> &face_sign)
{
// Compute eventual sign changes depending on the neighborhood
// between two faces.
std::vector<double> sign_change (this->dofs_per_cell, 1.0);
-
+
if (mapping_type == mapping_raviart_thomas)
get_face_sign_change_rt (cell, this->dofs_per_face, sign_change);
-
- else
- if (mapping_type == mapping_nedelec)
- get_face_sign_change_nedelec (cell, this->dofs_per_face, sign_change);
+
+ else if (mapping_type == mapping_nedelec)
+ get_face_sign_change_nedelec (cell, this->dofs_per_face, sign_change);
// for Piola mapping, the similarity
// concept cannot be used because of
// possible sign changes from one cell to
// the next.
if ( (mapping_type == mapping_piola) || (mapping_type == mapping_raviart_thomas)
- || (mapping_type == mapping_nedelec))
+ || (mapping_type == mapping_nedelec))
if (cell_similarity == CellSimilarity::translation)
cell_similarity = CellSimilarity::none;
// Compute eventual sign changes depending
// on the neighborhood between two faces.
std::vector<double> sign_change (this->dofs_per_cell, 1.0);
-
+
if (mapping_type == mapping_raviart_thomas)
get_face_sign_change_rt (cell, this->dofs_per_face, sign_change);
-
- else
- if (mapping_type == mapping_nedelec)
- get_face_sign_change_nedelec (cell, this->dofs_per_face, sign_change);
+
+ else if (mapping_type == mapping_nedelec)
+ get_face_sign_change_nedelec (cell, this->dofs_per_face, sign_change);
for (unsigned int i=0; i<this->dofs_per_cell; ++i)
{
for (unsigned int k = 0; k < n_q_points; ++k)
for (unsigned int d = 0; d < dim; ++d)
data.shape_gradients[first + d][k] = sign_change[i] * shape_grads1[k][d];
-
+
break;
}
// Compute eventual sign changes depending
// on the neighborhood between two faces.
std::vector<double> sign_change (this->dofs_per_cell, 1.0);
-
+
if (mapping_type == mapping_raviart_thomas)
get_face_sign_change_rt (cell, this->dofs_per_face, sign_change);
-
- else
- if (mapping_type == mapping_nedelec)
- get_face_sign_change_nedelec (cell, this->dofs_per_face, sign_change);
+
+ else if (mapping_type == mapping_nedelec)
+ get_face_sign_change_nedelec (cell, this->dofs_per_face, sign_change);
for (unsigned int i=0; i<this->dofs_per_cell; ++i)
{
template <class POLY, int dim, int spacedim>
-const FullMatrix<double>&
+const FullMatrix<double> &
FE_Q_Base<POLY,dim,spacedim>
::get_prolongation_matrix (const unsigned int child,
const RefinementCase<dim> &refinement_case) const
// almost negligible also for high order elements
const unsigned int dofs1d = this->degree+1;
std::vector<Table<2,double> >
- subcell_evaluations (dim, Table<2,double>(dofs1d, dofs1d));
+ subcell_evaluations (dim, Table<2,double>(dofs1d, dofs1d));
const std::vector<unsigned int> &index_map_inverse =
this->poly_space.get_numbering_inverse();
template <class POLY, int dim, int spacedim>
-const FullMatrix<double>&
+const FullMatrix<double> &
FE_Q_Base<POLY,dim,spacedim>
::get_restriction_matrix (const unsigned int child,
const RefinementCase<dim> &refinement_case) const
x_source_fe.dofs_per_cell));
this->FE_Q_Base<TensorProductPolynomialsConst<dim>,dim,spacedim>::
- get_interpolation_matrix(x_source_fe, interpolation_matrix);
+ get_interpolation_matrix(x_source_fe, interpolation_matrix);
}
:
FE_Q_Base<TensorProductPolynomials<dim,Polynomials::PiecewisePolynomial<double> >, dim, spacedim> (
TensorProductPolynomials<dim,Polynomials::PiecewisePolynomial<double> >
- (Polynomials::generate_complete_Lagrange_basis_on_subdivisions(subdivisions, 1)),
+ (Polynomials::generate_complete_Lagrange_basis_on_subdivisions(subdivisions, 1)),
FiniteElementData<dim>(this->get_dpo_vector(subdivisions),
1, subdivisions,
FiniteElementData<dim>::H1),
template <int dim, int spacedim>
-const FullMatrix<double>&
+const FullMatrix<double> &
FESystem<dim,spacedim>
::get_restriction_matrix (const unsigned int child,
const RefinementCase<dim> &refinement_case) const
// shortcut for accessing local restrictions further down
std::vector<const FullMatrix<double> *>
- base_matrices(this->n_base_elements());
+ base_matrices(this->n_base_elements());
for (unsigned int i=0; i<this->n_base_elements(); ++i)
{
// so get the common base element and the indices therein:
const unsigned int
- base = this->system_to_base_table[i].first.first;
+ base = this->system_to_base_table[i].first.first;
const unsigned int
- base_index_i = this->system_to_base_table[i].second,
- base_index_j = this->system_to_base_table[j].second;
+ base_index_i = this->system_to_base_table[i].second,
+ base_index_j = this->system_to_base_table[j].second;
// if we are sure that DoFs i and j may couple, then copy
// entries of the matrices:
template <int dim, int spacedim>
-const FullMatrix<double>&
+const FullMatrix<double> &
FESystem<dim,spacedim>
::get_prolongation_matrix (const unsigned int child,
const RefinementCase<dim> &refinement_case) const
bool do_prolongation = true;
std::vector<const FullMatrix<double> *>
- base_matrices(this->n_base_elements());
+ base_matrices(this->n_base_elements());
for (unsigned int i=0; i<this->n_base_elements(); ++i)
{
base_matrices[i] =
this->system_to_base_table[j].first)
continue;
const unsigned int
- base = this->system_to_base_table[i].first.first;
+ base = this->system_to_base_table[i].first.first;
const unsigned int
- base_index_i = this->system_to_base_table[i].second,
- base_index_j = this->system_to_base_table[j].second;
+ base_index_i = this->system_to_base_table[i].second,
+ base_index_j = this->system_to_base_table[j].second;
prolongate(i,j) = (*base_matrices[base])(base_index_i,base_index_j);
}
prolongate.swap(const_cast<FullMatrix<double> &>
unsigned int index = 0;
for (unsigned int index=0; index<fes.size(); ++index)
if (multiplicities[index]>0)
- {
- total_conformity = fes[index]->conforming_space;
- break;
- }
+ {
+ total_conformity = fes[index]->conforming_space;
+ break;
+ }
for (; index<fes.size(); ++index)
if (multiplicities[index]>0)
- total_conformity =
- typename FiniteElementData<dim>::Conformity(total_conformity
- &
- fes[index]->conforming_space);
+ total_conformity =
+ typename FiniteElementData<dim>::Conformity(total_conformity
+ &
+ fes[index]->conforming_space);
}
std::vector<unsigned int> dpo;
if (dim>2) dpo.push_back(multiplied_dofs_per_hex);
return FiniteElementData<dim> (dpo,
- multiplied_n_components,
- degree,
- total_conformity,
- summed_multiplicities);
+ multiplied_n_components,
+ degree,
+ total_conformity,
+ summed_multiplicities);
}
DoFTools::extract_locally_relevant_dofs (dof2,
dof2_locally_relevant_dofs);
- parallel::distributed::Vector<Number>
- u2 (dof2_locally_owned_dofs,
- dof2_locally_relevant_dofs,
- u1.get_mpi_communicator());
+ parallel::distributed::Vector<Number>
+ u2 (dof2_locally_owned_dofs,
+ dof2_locally_relevant_dofs,
+ u1.get_mpi_communicator());
interpolate(dof1, u1, dof2, constraints2, u2);
u2.update_ghost_values ();
template <int dim, int spacedim>
Tensor<2, dim, spacedim>::
Tensor(const FEValuesBase<dim, spacedim> &fe_values,
- const unsigned int first_tensor_component)
+ const unsigned int first_tensor_component)
:
fe_values(fe_values),
first_tensor_component(first_tensor_component),
const double *shape_value_ptr =
&shape_values(shape_function_data[shape_function].row_index, 0);
for (unsigned int q_point=0; q_point<n_quadrature_points; ++q_point)
- values[q_point] += value * *shape_value_ptr++;
+ values[q_point] += value **shape_value_ptr++;
}
}
const dealii::Tensor<order,spacedim> *shape_derivative_ptr =
&shape_derivatives[shape_function_data[shape_function].row_index][0];
for (unsigned int q_point=0; q_point<n_quadrature_points; ++q_point)
- derivatives[q_point] += value * *shape_derivative_ptr++;
+ derivatives[q_point] += value **shape_derivative_ptr++;
}
}
shape_function_data[shape_function].single_nonzero_component_index;
const double *shape_value_ptr = &shape_values(snc,0);
for (unsigned int q_point=0; q_point<n_quadrature_points; ++q_point)
- values[q_point][comp] += value * *shape_value_ptr++;
+ values[q_point][comp] += value **shape_value_ptr++;
}
else
for (unsigned int d=0; d<spacedim; ++d)
const double *shape_value_ptr =
&shape_values(shape_function_data[shape_function].row_index[d],0);
for (unsigned int q_point=0; q_point<n_quadrature_points; ++q_point)
- values[q_point][d] += value * *shape_value_ptr++;
+ values[q_point][d] += value **shape_value_ptr++;
}
}
}
const dealii::Tensor<order,spacedim> *shape_derivative_ptr =
&shape_derivatives[snc][0];
for (unsigned int q_point=0; q_point<n_quadrature_points; ++q_point)
- derivatives[q_point][comp] += value * *shape_derivative_ptr++;
+ derivatives[q_point][comp] += value **shape_derivative_ptr++;
}
else
for (unsigned int d=0; d<spacedim; ++d)
&shape_derivatives[shape_function_data[shape_function].
row_index[d]][0];
for (unsigned int q_point=0; q_point<n_quadrature_points; ++q_point)
- derivatives[q_point][d] += value * *shape_derivative_ptr++;
+ derivatives[q_point][d] += value **shape_derivative_ptr++;
}
}
}
AssertDimension (laplacians.size(), n_quadrature_points);
std::fill (laplacians.begin(), laplacians.end(),
- dealii::Tensor<1,spacedim>());
+ dealii::Tensor<1,spacedim>());
for (unsigned int shape_function=0;
shape_function<dofs_per_cell; ++shape_function)
(shape_function_data[shape_function].single_nonzero_component_index);
const double *shape_value_ptr = &shape_values(snc,0);
for (unsigned int q_point=0; q_point<n_quadrature_points; ++q_point)
- values[q_point][comp] += value * *shape_value_ptr++;
+ values[q_point][comp] += value **shape_value_ptr++;
}
else
for (unsigned int d=0;
const double *shape_value_ptr =
&shape_values(shape_function_data[shape_function].row_index[d],0);
for (unsigned int q_point=0; q_point<n_quadrature_points; ++q_point)
- values[q_point][comp] += value * *shape_value_ptr++;
+ values[q_point][comp] += value **shape_value_ptr++;
}
}
}
AssertDimension (divergences.size(), n_quadrature_points);
std::fill (divergences.begin(), divergences.end(),
- dealii::Tensor<1,spacedim>());
+ dealii::Tensor<1,spacedim>());
for (unsigned int shape_function=0;
shape_function<dofs_per_cell; ++shape_function)
// ---------------------- non-symmetric tensor part ------------------------
- template <int dim, int spacedim>
- void
- do_function_values (const ::dealii::Vector<double> &dof_values,
- const Table<2,double> &shape_values,
- const std::vector<typename Tensor<2,dim,spacedim>::ShapeFunctionData> &shape_function_data,
- std::vector<dealii::Tensor<2,spacedim> > &values)
+ template <int dim, int spacedim>
+ void
+ do_function_values (const ::dealii::Vector<double> &dof_values,
+ const Table<2,double> &shape_values,
+ const std::vector<typename Tensor<2,dim,spacedim>::ShapeFunctionData> &shape_function_data,
+ std::vector<dealii::Tensor<2,spacedim> > &values)
+ {
+ const unsigned int dofs_per_cell = dof_values.size();
+ const unsigned int n_quadrature_points = dofs_per_cell > 0 ?
+ shape_values.n_cols() : values.size();
+ AssertDimension (values.size(), n_quadrature_points);
+
+ std::fill (values.begin(), values.end(),
+ dealii::Tensor<2,spacedim>());
+
+ for (unsigned int shape_function=0;
+ shape_function<dofs_per_cell; ++shape_function)
{
- const unsigned int dofs_per_cell = dof_values.size();
- const unsigned int n_quadrature_points = dofs_per_cell > 0 ?
- shape_values.n_cols() : values.size();
- AssertDimension (values.size(), n_quadrature_points);
+ const int snc = shape_function_data[shape_function].single_nonzero_component;
- std::fill (values.begin(), values.end(),
- dealii::Tensor<2,spacedim>());
+ if (snc == -2)
+ // shape function is zero for the selected components
+ continue;
- for (unsigned int shape_function=0;
- shape_function<dofs_per_cell; ++shape_function)
- {
- const int snc = shape_function_data[shape_function].single_nonzero_component;
+ const double value = dof_values(shape_function);
+ if (value == 0.)
+ continue;
- if (snc == -2)
- // shape function is zero for the selected components
- continue;
+ if (snc != -1)
+ {
+ const unsigned int comp =
+ shape_function_data[shape_function].single_nonzero_component_index;
- const double value = dof_values(shape_function);
- if (value == 0.)
- continue;
+ const TableIndices<2> indices = dealii::Tensor<2,spacedim>::unrolled_to_component_indices(comp);
- if (snc != -1)
+ const double *shape_value_ptr = &shape_values(snc,0);
+ for (unsigned int q_point=0; q_point<n_quadrature_points; ++q_point)
+ values[q_point][indices] += value **shape_value_ptr++;
+ }
+ else
+ for (unsigned int d=0;
+ d<dim*dim; ++d)
+ if (shape_function_data[shape_function].is_nonzero_shape_function_component[d])
{
- const unsigned int comp =
- shape_function_data[shape_function].single_nonzero_component_index;
+ const TableIndices<2> indices = dealii::Tensor<2,spacedim>::unrolled_to_component_indices(d);
- const TableIndices<2> indices = dealii::Tensor<2,spacedim>::unrolled_to_component_indices(comp);
-
- const double *shape_value_ptr = &shape_values(snc,0);
+ const double *shape_value_ptr =
+ &shape_values(shape_function_data[shape_function].row_index[d],0);
for (unsigned int q_point=0; q_point<n_quadrature_points; ++q_point)
- values[q_point][indices] += value * *shape_value_ptr++;
+ values[q_point][indices] += value **shape_value_ptr++;
}
- else
- for (unsigned int d=0;
- d<dim*dim; ++d)
- if (shape_function_data[shape_function].is_nonzero_shape_function_component[d])
- {
- const TableIndices<2> indices = dealii::Tensor<2,spacedim>::unrolled_to_component_indices(d);
-
- const double *shape_value_ptr =
- &shape_values(shape_function_data[shape_function].row_index[d],0);
- for (unsigned int q_point=0; q_point<n_quadrature_points; ++q_point)
- values[q_point][indices] += value * *shape_value_ptr++;
- }
- }
}
+ }
- template <int dim, int spacedim>
- void
- do_function_divergences (const ::dealii::Vector<double> &dof_values,
- const std::vector<std::vector<dealii::Tensor<1,spacedim> > > &shape_gradients,
- const std::vector<typename Tensor<2,dim,spacedim>::ShapeFunctionData> &shape_function_data,
- std::vector<dealii::Tensor<1,spacedim> > &divergences)
- {
- const unsigned int dofs_per_cell = dof_values.size();
- const unsigned int n_quadrature_points = dofs_per_cell > 0 ?
- shape_gradients[0].size() : divergences.size();
- AssertDimension (divergences.size(), n_quadrature_points);
+ template <int dim, int spacedim>
+ void
+ do_function_divergences (const ::dealii::Vector<double> &dof_values,
+ const std::vector<std::vector<dealii::Tensor<1,spacedim> > > &shape_gradients,
+ const std::vector<typename Tensor<2,dim,spacedim>::ShapeFunctionData> &shape_function_data,
+ std::vector<dealii::Tensor<1,spacedim> > &divergences)
+ {
+ const unsigned int dofs_per_cell = dof_values.size();
+ const unsigned int n_quadrature_points = dofs_per_cell > 0 ?
+ shape_gradients[0].size() : divergences.size();
+ AssertDimension (divergences.size(), n_quadrature_points);
- std::fill (divergences.begin(), divergences.end(),
- dealii::Tensor<1,spacedim>());
+ std::fill (divergences.begin(), divergences.end(),
+ dealii::Tensor<1,spacedim>());
- for (unsigned int shape_function=0;
- shape_function<dofs_per_cell; ++shape_function)
- {
- const int snc = shape_function_data[shape_function].single_nonzero_component;
+ for (unsigned int shape_function=0;
+ shape_function<dofs_per_cell; ++shape_function)
+ {
+ const int snc = shape_function_data[shape_function].single_nonzero_component;
- if (snc == -2)
- // shape function is zero for the selected components
- continue;
+ if (snc == -2)
+ // shape function is zero for the selected components
+ continue;
- const double value = dof_values(shape_function);
- if (value == 0.)
- continue;
+ const double value = dof_values(shape_function);
+ if (value == 0.)
+ continue;
- if (snc != -1)
- {
- const unsigned int comp =
- shape_function_data[shape_function].single_nonzero_component_index;
+ if (snc != -1)
+ {
+ const unsigned int comp =
+ shape_function_data[shape_function].single_nonzero_component_index;
- const dealii::Tensor < 1, spacedim> *shape_gradient_ptr =
- &shape_gradients[snc][0];
+ const dealii::Tensor < 1, spacedim> *shape_gradient_ptr =
+ &shape_gradients[snc][0];
- const TableIndices<2> indices = dealii::Tensor<2,spacedim>::unrolled_to_component_indices(comp);
- const unsigned int ii = indices[0];
- const unsigned int jj = indices[1];
+ const TableIndices<2> indices = dealii::Tensor<2,spacedim>::unrolled_to_component_indices(comp);
+ const unsigned int ii = indices[0];
+ const unsigned int jj = indices[1];
- for (unsigned int q_point = 0; q_point < n_quadrature_points;
- ++q_point, ++shape_gradient_ptr)
- {
- divergences[q_point][jj] += value * (*shape_gradient_ptr)[ii];
- }
- }
- else
+ for (unsigned int q_point = 0; q_point < n_quadrature_points;
+ ++q_point, ++shape_gradient_ptr)
{
- for (unsigned int d = 0;
- d < dim*dim; ++d)
- if (shape_function_data[shape_function].is_nonzero_shape_function_component[d])
- {
- Assert (false, ExcNotImplemented());
- }
+ divergences[q_point][jj] += value * (*shape_gradient_ptr)[ii];
}
}
+ else
+ {
+ for (unsigned int d = 0;
+ d < dim*dim; ++d)
+ if (shape_function_data[shape_function].is_nonzero_shape_function_component[d])
+ {
+ Assert (false, ExcNotImplemented());
+ }
+ }
}
+ }
} // end of namespace internal
#endif
new (&second_order_tensors[component])
dealii::FEValuesViews::Tensor<2, dim, spacedim > (fe_values,
- component);
+ component);
}
}
}
const double *shape_value_ptr = &shape_values(shape_func, 0);
for (unsigned int point=0; point<n_quadrature_points; ++point)
- values[point] += value * *shape_value_ptr++;
+ values[point] += value **shape_value_ptr++;
}
}
{
VectorType &values_comp = values[comp];
for (unsigned int point=0; point<n_quadrature_points; ++point)
- values_comp[point] += value * *shape_value_ptr++;
+ values_comp[point] += value **shape_value_ptr++;
}
else
for (unsigned int point=0; point<n_quadrature_points; ++point)
- values[point][comp] += value * *shape_value_ptr++;
+ values[point][comp] += value **shape_value_ptr++;
}
else
for (unsigned int c=0; c<n_components; ++c)
VectorType &values_comp = values[comp];
for (unsigned int point=0; point<n_quadrature_points;
++point)
- values_comp[point] += value * *shape_value_ptr++;
+ values_comp[point] += value **shape_value_ptr++;
}
else
for (unsigned int point=0; point<n_quadrature_points; ++point)
- values[point][comp] += value * *shape_value_ptr++;
+ values[point][comp] += value **shape_value_ptr++;
}
}
}
const Tensor<order,spacedim> *shape_derivative_ptr
= &shape_derivatives[shape_func][0];
for (unsigned int point=0; point<n_quadrature_points; ++point)
- derivatives[point] += value * *shape_derivative_ptr++;
+ derivatives[point] += value **shape_derivative_ptr++;
}
}
if (quadrature_points_fastest)
for (unsigned int point=0; point<n_quadrature_points; ++point)
- derivatives[comp][point] += value * *shape_derivative_ptr++;
+ derivatives[comp][point] += value **shape_derivative_ptr++;
else
for (unsigned int point=0; point<n_quadrature_points; ++point)
- derivatives[point][comp] += value * *shape_derivative_ptr++;
+ derivatives[point][comp] += value **shape_derivative_ptr++;
}
else
for (unsigned int c=0; c<n_components; ++c)
if (quadrature_points_fastest)
for (unsigned int point=0; point<n_quadrature_points; ++point)
- derivatives[comp][point] += value * *shape_derivative_ptr++;
+ derivatives[comp][point] += value **shape_derivative_ptr++;
else
for (unsigned int point=0; point<n_quadrature_points; ++point)
- derivatives[point][comp] += value * *shape_derivative_ptr++;
+ derivatives[point][comp] += value **shape_derivative_ptr++;
}
}
}
const double half_length)
{
// Determine number of cells and vertices
- const size_type
+ const size_type
n_cells = static_cast<size_type>(std::floor (half_length /
- std::max (radius_0,
- radius_1) +
- 0.5));
+ std::max (radius_0,
+ radius_1) +
+ 0.5));
const size_type n_vertices = 4 * (n_cells + 1);
std::vector<Point<3> > vertices_tmp(n_vertices);
{
cell->face(i)->set_boundary_indicator(1);
for (unsigned int j=0; j<GeometryInfo<3>::lines_per_face; ++j)
- {
- const Point<3> vertices[2]
- = { cell->face(i)->line(j)->vertex(0),
- cell->face(i)->line(j)->vertex(1) };
- if ((std::fabs(vertices[0].distance(center)-radius) >
- 1e-5*radius)
- ||
- (std::fabs(vertices[1].distance(center)-radius) >
- 1e-5*radius))
- cell->face(i)->line(j)->set_boundary_indicator(1);
- }
+ {
+ const Point<3> vertices[2]
+ = { cell->face(i)->line(j)->vertex(0),
+ cell->face(i)->line(j)->vertex(1)
+ };
+ if ((std::fabs(vertices[0].distance(center)-radius) >
+ 1e-5*radius)
+ ||
+ (std::fabs(vertices[1].distance(center)-radius) >
+ 1e-5*radius))
+ cell->face(i)->line(j)->set_boundary_indicator(1);
+ }
}
}
++cell;
{
quad.boundary_id = cell->face(f)->boundary_indicator();
bid = std::max(bid, quad.boundary_id);
- for (size_type slice=0; slice<n_slices-1; ++slice)
- {
- quad.vertices[0] = cell->face(f)->vertex_index(0)+slice*input.n_vertices();
- quad.vertices[1] = cell->face(f)->vertex_index(1)+slice*input.n_vertices();
- quad.vertices[2] = cell->face(f)->vertex_index(0)+(slice+1)*input.n_vertices();
- quad.vertices[3] = cell->face(f)->vertex_index(1)+(slice+1)*input.n_vertices();
- s.boundary_quads.push_back(quad);
- }
+ for (size_type slice=0; slice<n_slices-1; ++slice)
+ {
+ quad.vertices[0] = cell->face(f)->vertex_index(0)+slice*input.n_vertices();
+ quad.vertices[1] = cell->face(f)->vertex_index(1)+slice*input.n_vertices();
+ quad.vertices[2] = cell->face(f)->vertex_index(0)+(slice+1)*input.n_vertices();
+ quad.vertices[3] = cell->face(f)->vertex_index(1)+(slice+1)*input.n_vertices();
+ s.boundary_quads.push_back(quad);
+ }
}
}
// auxiliary array for the level subdomains being used
int level_subdomains[256];
- for(int level_subdomain_index = 0; level_subdomain_index < 256; level_subdomain_index++)
+ for (int level_subdomain_index = 0; level_subdomain_index < 256; level_subdomain_index++)
level_subdomains[level_subdomain_index] = 0;
// We use an active cell iterator to determine the
materials[(unsigned int)cell->material_id()] = 1;
levels[(unsigned int)cell->level()] = 1;
if (cell->active())
- subdomains[cell->subdomain_id()+2] = 1;
+ subdomains[cell->subdomain_id()+2] = 1;
level_subdomains[cell->level_subdomain_id()+2] = 1;
}
}
// count the level subdomains being used
- for(int level_subdomain_index = 0; level_subdomain_index < 256; level_subdomain_index++)
- {
- if(level_subdomains[level_subdomain_index]) n_level_subdomains++;
- }
+ for (int level_subdomain_index = 0; level_subdomain_index < 256; level_subdomain_index++)
+ {
+ if (level_subdomains[level_subdomain_index]) n_level_subdomains++;
+ }
switch (svg_flags.coloring)
{
- case GridOutFlags::Svg::material_id:
+ case GridOutFlags::Svg::material_id:
n = n_materials;
break;
case GridOutFlags::Svg::level_number:
case GridOutFlags::Svg::subdomain_id:
n = n_subdomains;
break;
- case GridOutFlags::Svg::level_subdomain_id: n = n_level_subdomains;
+ case GridOutFlags::Svg::level_subdomain_id:
+ n = n_level_subdomains;
break;
default:
break;
case GridOutFlags::Svg::subdomain_id:
while (!subdomains[labeling_index]) labeling_index++;
break;
- case GridOutFlags::Svg::level_subdomain_id:
- while(!level_subdomains[labeling_index]) labeling_index++;
- break;
+ case GridOutFlags::Svg::level_subdomain_id:
+ while (!level_subdomains[labeling_index]) labeling_index++;
+ break;
default:
break;
}
out << (unsigned int)cell->level();
break;
case GridOutFlags::Svg::subdomain_id:
- if (cell->active())
- out << cell->subdomain_id() + 2;
- else
- out << 'X';
+ if (cell->active())
+ out << cell->subdomain_id() + 2;
+ else
+ out << 'X';
+ break;
+ case GridOutFlags::Svg::level_subdomain_id:
+ out << cell->level_subdomain_id() + 2;
break;
- case GridOutFlags::Svg::level_subdomain_id:
- out << cell->level_subdomain_id() + 2;
- break;
default:
break;
}
if (svg_flags.label_subdomain_id)
{
if (svg_flags.label_level_number
- || svg_flags.label_cell_index
- || svg_flags.label_material_id)
- out << ',';
- if (cell->active())
- out << static_cast<int>(cell->subdomain_id());
- else
- out << 'X';
+ || svg_flags.label_cell_index
+ || svg_flags.label_material_id)
+ out << ',';
+ if (cell->active())
+ out << static_cast<int>(cell->subdomain_id());
+ else
+ out << 'X';
}
- if(svg_flags.label_level_subdomain_id)
- {
- if(svg_flags.label_level_number
- || svg_flags.label_cell_index
- || svg_flags.label_material_id
- || svg_flags.label_subdomain_id)
- out << ',';
- out << static_cast<int>(cell->level_subdomain_id());
- }
+ if (svg_flags.label_level_subdomain_id)
+ {
+ if (svg_flags.label_level_number
+ || svg_flags.label_cell_index
+ || svg_flags.label_material_id
+ || svg_flags.label_subdomain_id)
+ out << ',';
+ out << static_cast<int>(cell->level_subdomain_id());
+ }
out << "</text>" << '\n';
}
<< "\">"
<< "subdomain_id";
- if(svg_flags.label_level_subdomain_id)
- out << ',';
+ if (svg_flags.label_level_subdomain_id)
+ out << ',';
out << "</text>" << '\n';
}
- if(svg_flags.label_level_subdomain_id)
- {
- out << " <text x= \"" << width + additional_width + static_cast<unsigned int>(.5 + (height/100.) * 2.)
- << "\" y=\"" << static_cast<unsigned int>(.5 + (height/100.) * margin_in_percent + (++line_offset) * 1.5 * font_size )
- << "\" style=\"text-anchor:start; font-style:oblique; font-size:" << font_size
- << "\">"
- << "level_subdomain_id"
- << "</text>" << '\n';
- }
+ if (svg_flags.label_level_subdomain_id)
+ {
+ out << " <text x= \"" << width + additional_width + static_cast<unsigned int>(.5 + (height/100.) * 2.)
+ << "\" y=\"" << static_cast<unsigned int>(.5 + (height/100.) * margin_in_percent + (++line_offset) * 1.5 * font_size )
+ << "\" style=\"text-anchor:start; font-style:oblique; font-size:" << font_size
+ << "\">"
+ << "level_subdomain_id"
+ << "</text>" << '\n';
+ }
}
// show azimuth angle and polar angle as text below the explanation of the cell labeling
case 3:
out << "subdomain_id";
break;
- case 4: out << "level_subdomain_id";
- break;
+ case 4:
+ out << "level_subdomain_id";
+ break;
default:
break;
}
case GridOutFlags::Svg::subdomain_id:
while (!subdomains[labeling_index]) labeling_index++;
break;
- case GridOutFlags::Svg::level_subdomain_id: while(!level_subdomains[labeling_index]) labeling_index++;
- break;
+ case GridOutFlags::Svg::level_subdomain_id:
+ while (!level_subdomains[labeling_index]) labeling_index++;
+ break;
default:
break;
}
{
case 2:
out << "\nlist f 0 1 2 3"
- << "\n";
+ << "\n";
break;
case 3:
out << "\nlist f 0 2 4 6 | 1 3 5 7 | 0 4 1 5 | 2 6 3 7 | 0 1 2 3 | 4 5 6 7"
- << "\n";
+ << "\n";
break;
default:
Assert (false, ExcNotImplemented ());
{
// get the last used cell
cell_iterator cell = last();
-
+
if (cell != end())
{
// then move to the last active one
if (cell->active()==true)
- return cell;
+ return cell;
while ((--cell).state() == IteratorState::valid)
- if (cell->active()==true)
- return cell;
+ if (cell->active()==true)
+ return cell;
}
return cell;
}
// get the last used cell on this level
cell_iterator cell = last(level);
- if (cell != end(level))
- {
- // then move to the last active one
- if (cell->active()==true)
- return cell;
- while ((--cell).state() == IteratorState::valid)
- if (cell->active()==true)
- return cell;
- }
+ if (cell != end(level))
+ {
+ // then move to the last active one
+ if (cell->active()==true)
+ return cell;
+ while ((--cell).state() == IteratorState::valid)
+ if (cell->active()==true)
+ return cell;
+ }
return cell;
}
{
case 1:
Assert (level<n_global_levels() || level<levels.size(), ExcInvalidLevel(level));
-
+
if (level >= levels.size() || levels[level]->cells.cells.size() == 0)
return end_line();
case 3:
{
Assert (level<n_global_levels() || level<levels.size(), ExcInvalidLevel(level));
-
+
if (level >= levels.size() || levels[level]->cells.cells.size() == 0)
return end_hex();
-
+
return raw_hex_iterator (const_cast<Triangulation<dim,spacedim>*>(this),
level,
0);
if ((line_center(0) == this->center(0))
&&
((std::fabs(vertices[0].distance(this->center)-this->radius) >
- 1e-5*this->radius)
+ 1e-5*this->radius)
||
(std::fabs(vertices[1].distance(this->center)-this->radius) >
- 1e-5*this->radius)))
+ 1e-5*this->radius)))
return line_center;
else
return HyperBallBoundary<dim>::get_new_point_on_line (line);
// non-invalid value later on
dof_handler.faces->lines.dof_offsets
= std::vector<types::global_dof_index> (dof_handler.tria->n_raw_lines(),
- DoFHandler<dim,spacedim>::invalid_dof_index);
+ DoFHandler<dim,spacedim>::invalid_dof_index);
dof_handler.faces->lines.dofs
= std::vector<types::global_dof_index> (n_line_slots,
DoFHandler<dim,spacedim>::invalid_dof_index);
{
dof_handler.levels[level]->dof_object.dof_offsets
= std::vector<types::global_dof_index> (dof_handler.tria->n_raw_hexs(level),
- DoFHandler<dim,spacedim>::invalid_dof_index);
+ DoFHandler<dim,spacedim>::invalid_dof_index);
types::global_dof_index next_free_dof = 0;
for (typename DoFHandler<dim,spacedim>::active_cell_iterator
{
dof_handler.faces->quads.dof_offsets
= std::vector<types::global_dof_index> (dof_handler.tria->n_raw_quads(),
- DoFHandler<dim,spacedim>::invalid_dof_index);
+ DoFHandler<dim,spacedim>::invalid_dof_index);
dof_handler.faces->quads.dofs
= std::vector<types::global_dof_index> (n_quad_slots,
DoFHandler<dim,spacedim>::invalid_dof_index);
max_couplings_between_dofs (const DoFHandler<1,spacedim> &dof_handler)
{
return std::min(static_cast<types::global_dof_index> (3*
- dof_handler.finite_elements->max_dofs_per_vertex() +
- 2*dof_handler.finite_elements->max_dofs_per_line()),
- dof_handler.n_dofs());
+ dof_handler.finite_elements->max_dofs_per_vertex() +
+ 2*dof_handler.finite_elements->max_dofs_per_line()),
+ dof_handler.n_dofs());
}
ExcMessage ("Global number of degrees of freedom is too large."));
number_cache.n_locally_owned_dofs_per_processor
= std::vector<types::global_dof_index> (1,
- (types::global_dof_index) number_cache.n_global_dofs);
+ (types::global_dof_index) number_cache.n_global_dofs);
number_cache.locally_owned_dofs_per_processor
= std::vector<IndexSet> (1,
void
ChunkSparsityPattern::reinit (
- const size_type m,
- const size_type n,
- const VectorSlice<const std::vector<size_type> > &row_lengths,
- const size_type chunk_size)
+ const size_type m,
+ const size_type n,
+ const VectorSlice<const std::vector<size_type> > &row_lengths,
+ const size_type chunk_size)
{
Assert (row_lengths.size() == m, ExcInvalidNumber (m));
Assert (chunk_size > 0, ExcInvalidNumber (chunk_size));
for (size_type row=0; row<matrix.m(); ++row)
{
for (size_type col=0; col<matrix.n(); ++col)
- if (matrix(row,col) != 0)
- ++entries_per_row[row];
+ if (matrix(row,col) != 0)
+ ++entries_per_row[row];
if ((matrix.m() == matrix.n())
- &&
- (matrix(row,row) == 0))
- ++entries_per_row[row];
+ &&
+ (matrix(row,row) == 0))
+ ++entries_per_row[row];
}
reinit (matrix.m(), matrix.n(),
-ChunkSparsityPattern::size_type
+ChunkSparsityPattern::size_type
ChunkSparsityPattern::max_entries_per_row () const
{
return sparsity_pattern.max_entries_per_row() * chunk_size;
-ChunkSparsityPattern::size_type
+ChunkSparsityPattern::size_type
ChunkSparsityPattern::row_length (const size_type i) const
{
Assert (i<rows, ExcIndexRange(i,0,rows));
-ChunkSparsityPattern::size_type
+ChunkSparsityPattern::size_type
ChunkSparsityPattern::n_nonzero_elements () const
{
if ((n_rows() % chunk_size == 0)
else
{
- // if columns don't align, then just iterate over all chunks and see
- // what this leads to. follow the advice in the documentation of the
- // sparsity pattern iterators to do the loop over individual rows,
- // rather than all elements
- size_type n = 0;
-
- for (size_type row = 0; row < sparsity_pattern.n_rows(); ++row)
- {
- SparsityPattern::const_iterator p = sparsity_pattern.begin(row);
- for (; p!=sparsity_pattern.end(row); ++p)
- if ((row != sparsity_pattern.n_rows() - 1)
- &&
- (p->column() != sparsity_pattern.n_cols() - 1))
- n += chunk_size * chunk_size;
- else if ((row == sparsity_pattern.n_rows() - 1)
- &&
- (p->column() != sparsity_pattern.n_cols() - 1))
- // last chunk row, but not last chunk column. only a smaller
- // number (n_rows % chunk_size) of rows actually exist
- n += (n_rows() % chunk_size) * chunk_size;
- else if ((row != sparsity_pattern.n_rows() - 1)
- &&
- (p->column() == sparsity_pattern.n_cols() - 1))
- // last chunk column, but not row
- n += (n_cols() % chunk_size) * chunk_size;
- else
- // bottom right chunk
- n += (n_cols() % chunk_size) *
- (n_rows() % chunk_size);
- }
-
- return n;
+ // if columns don't align, then just iterate over all chunks and see
+ // what this leads to. follow the advice in the documentation of the
+ // sparsity pattern iterators to do the loop over individual rows,
+ // rather than all elements
+ size_type n = 0;
+
+ for (size_type row = 0; row < sparsity_pattern.n_rows(); ++row)
+ {
+ SparsityPattern::const_iterator p = sparsity_pattern.begin(row);
+ for (; p!=sparsity_pattern.end(row); ++p)
+ if ((row != sparsity_pattern.n_rows() - 1)
+ &&
+ (p->column() != sparsity_pattern.n_cols() - 1))
+ n += chunk_size * chunk_size;
+ else if ((row == sparsity_pattern.n_rows() - 1)
+ &&
+ (p->column() != sparsity_pattern.n_cols() - 1))
+ // last chunk row, but not last chunk column. only a smaller
+ // number (n_rows % chunk_size) of rows actually exist
+ n += (n_rows() % chunk_size) * chunk_size;
+ else if ((row != sparsity_pattern.n_rows() - 1)
+ &&
+ (p->column() == sparsity_pattern.n_cols() - 1))
+ // last chunk column, but not row
+ n += (n_cols() % chunk_size) * chunk_size;
+ else
+ // bottom right chunk
+ n += (n_cols() % chunk_size) *
+ (n_rows() % chunk_size);
+ }
+
+ return n;
}
}
}
-ChunkSparsityPattern::size_type
+ChunkSparsityPattern::size_type
ChunkSparsityPattern::bandwidth () const
{
// calculate the bandwidth from that of the underlying sparsity
-CompressedSetSparsityPattern::size_type
+CompressedSetSparsityPattern::size_type
CompressedSetSparsityPattern::max_entries_per_row () const
{
size_type m = 0;
-CompressedSetSparsityPattern::size_type
+CompressedSetSparsityPattern::size_type
CompressedSetSparsityPattern::bandwidth () const
{
size_type b=0;
-CompressedSetSparsityPattern::size_type
+CompressedSetSparsityPattern::size_type
CompressedSetSparsityPattern::n_nonzero_elements () const
{
size_type n=0;
-CompressedSimpleSparsityPattern::size_type
+CompressedSimpleSparsityPattern::size_type
CompressedSimpleSparsityPattern::max_entries_per_row () const
{
size_type m = 0;
-CompressedSimpleSparsityPattern::size_type
+CompressedSimpleSparsityPattern::size_type
CompressedSimpleSparsityPattern::bandwidth () const
{
size_type b=0;
-CompressedSimpleSparsityPattern::size_type
+CompressedSimpleSparsityPattern::size_type
CompressedSimpleSparsityPattern::n_nonzero_elements () const
{
size_type n=0;
-CompressedSparsityPattern::size_type
+CompressedSparsityPattern::size_type
CompressedSparsityPattern::max_entries_per_row () const
{
size_type m = 0;
-CompressedSparsityPattern::size_type
+CompressedSparsityPattern::size_type
CompressedSparsityPattern::bandwidth () const
{
size_type b=0;
-CompressedSparsityPattern::size_type
+CompressedSparsityPattern::size_type
CompressedSparsityPattern::n_nonzero_elements () const
{
size_type n=0;
// won't modify the size any more after this point.
{
std::vector<size_type> new_lines (lines_cache.size(),
- numbers::invalid_size_type);
+ numbers::invalid_size_type);
size_type counter = 0;
for (std::vector<ConstraintLine>::const_iterator line=lines.begin();
line!=lines.end(); ++line, ++counter)
for (ConstraintLine::Entries::const_iterator j=other_line->begin();
j!=other_line->end(); ++j)
tmp.push_back (std::pair<size_type,double>(j->first,
- j->second*weight));
+ j->second*weight));
line->inhomogeneity += other_constraints.get_inhomogeneity(line->entries[i].first) *
weight;
q!=lines[distribute[column]].entries.size();
++q)
{
- const size_type
+ const size_type
new_col = lines[distribute[column]].entries[q].first;
sparsity.add (row, new_col);
q!=lines[distribute[column]].entries.size();
++q)
{
- const size_type
+ const size_type
new_col = lines[distribute[column]].entries[q].first;
sparsity.add (row, new_col);
// otherwise, the number states which line in the constraint matrix
// handles this index
std::vector<size_type> distribute(sparsity.n_rows(),
- numbers::invalid_size_type);
+ numbers::invalid_size_type);
for (size_type c=0; c<lines.size(); ++c)
distribute[lines[c].line] = c;
q!=lines[distribute[column]].entries.size();
++q)
{
- const size_type
+ const size_type
new_col = lines[distribute[column]].entries[q].first;
sparsity.add (row, new_col);
// otherwise, the number states which line in the constraint matrix
// handles this index
std::vector<size_type> distribute (sparsity.n_rows(),
- numbers::invalid_size_type);
+ numbers::invalid_size_type);
for (size_type c=0; c<lines.size(); ++c)
distribute[lines[c].line] = c;
// otherwise, the number states which line in the constraint matrix
// handles this index
std::vector<size_type> distribute (sparsity.n_rows(),
- numbers::invalid_size_type);
+ numbers::invalid_size_type);
for (size_type c=0; c<lines.size(); ++c)
distribute[lines[c].line] = static_cast<signed int>(c);
// otherwise, the number states which line in the constraint matrix
// handles this index
std::vector<size_type> distribute (sparsity.n_rows(),
- numbers::invalid_size_type);
+ numbers::invalid_size_type);
for (size_type c=0; c<lines.size(); ++c)
distribute[lines[c].line] = static_cast<signed int>(c);
// otherwise, the number states which line in the constraint matrix
// handles this index
std::vector<size_type> distribute (sparsity.n_rows(),
- numbers::invalid_size_type);
+ numbers::invalid_size_type);
for (size_type c=0; c<lines.size(); ++c)
distribute[lines[c].line] = static_cast<signed int>(c);
if (distribute[global_col] == numbers::invalid_size_type)
// distribute entry at irregular row @p{row} and
// regular column global_col.
- { for (size_type q=0;
+ {
+ for (size_type q=0;
q!=lines[distribute[row]].entries.size(); ++q)
sparsity.add (lines[distribute[row]].entries[q].first,
global_col);
else
// distribute entry at irregular row @p{row} and
// irregular column @p{global_col}
- { for (size_type p=0;
+ {
+ for (size_type p=0;
p!=lines[distribute[row]].entries.size(); ++p)
for (size_type q=0; q!=lines[distribute[global_col]].entries.size(); ++q)
sparsity.add (lines[distribute[row]].entries[p].first,
-ConstraintMatrix::size_type
+ConstraintMatrix::size_type
ConstraintMatrix::max_constraint_indirections () const
{
size_type return_value = 0;
- MatrixBase::size_type
+ MatrixBase::size_type
MatrixBase::n () const
{
PetscInt n_rows, n_cols;
- MatrixBase::size_type
+ MatrixBase::size_type
MatrixBase::local_size () const
{
PetscInt n_rows, n_cols;
- MatrixBase::size_type
+ MatrixBase::size_type
MatrixBase::n_nonzero_elements () const
{
MatInfo mat_info;
- MatrixBase::size_type
+ MatrixBase::size_type
MatrixBase::
row_length (const size_type row) const
{
MatrixBase &
MatrixBase::add (const MatrixBase &other,
- const PetscScalar factor)
+ const PetscScalar factor)
{
const int ierr = MatAXPY (matrix, factor,
- other, DIFFERENT_NONZERO_PATTERN);
+ other, DIFFERENT_NONZERO_PATTERN);
Assert (ierr == 0, ExcPETScError(ierr));
return *this;
}
-
+
void
MatrixBase::vmult (VectorBase &dst,
void
MatrixBase::print (std::ostream &out,
- const bool alternative_output) const
+ const bool alternative_output) const
{
std::pair<MatrixBase::size_type, MatrixBase::size_type>
loc_range = local_range();
void
BlockSparseMatrix::
reinit(const std::vector<IndexSet> &rows,
- const std::vector<IndexSet> &cols,
- const BlockCompressedSimpleSparsityPattern &bcsp,
- const MPI_Comm &com)
+ const std::vector<IndexSet> &cols,
+ const BlockCompressedSimpleSparsityPattern &bcsp,
+ const MPI_Comm &com)
{
Assert(rows.size() == bcsp.n_block_rows(), ExcMessage("invalid size"));
Assert(cols.size() == bcsp.n_block_cols(), ExcMessage("invalid size"));
clear();
this->sub_objects.reinit (bcsp.n_block_rows(),
- bcsp.n_block_cols());
+ bcsp.n_block_cols());
std::vector<types::global_dof_index> row_sizes;
for (unsigned int r=0; r<bcsp.n_block_rows(); ++r)
- row_sizes.push_back( bcsp.block(r,0).n_rows() );
+ row_sizes.push_back( bcsp.block(r,0).n_rows() );
this->row_block_indices.reinit (row_sizes);
std::vector<types::global_dof_index> col_sizes;
for (unsigned int c=0; c<bcsp.n_block_cols(); ++c)
- col_sizes.push_back( bcsp.block(0,c).n_cols() );
+ col_sizes.push_back( bcsp.block(0,c).n_cols() );
this->column_block_indices.reinit (col_sizes);
for (unsigned int r=0; r<this->n_block_rows(); ++r)
- for (unsigned int c=0; c<this->n_block_cols(); ++c)
- {
- Assert(rows[r].size() == bcsp.block(r,c).n_rows(), ExcMessage("invalid size"));
- Assert(cols[c].size() == bcsp.block(r,c).n_cols(), ExcMessage("invalid size"));
-
- BlockType *p = new BlockType();
- p->reinit(rows[r],
- cols[c],
- bcsp.block(r,c),
- com);
- this->sub_objects[r][c] = p;
- }
+ for (unsigned int c=0; c<this->n_block_cols(); ++c)
+ {
+ Assert(rows[r].size() == bcsp.block(r,c).n_rows(), ExcMessage("invalid size"));
+ Assert(cols[c].size() == bcsp.block(r,c).n_cols(), ExcMessage("invalid size"));
+
+ BlockType *p = new BlockType();
+ p->reinit(rows[r],
+ cols[c],
+ bcsp.block(r,c),
+ com);
+ this->sub_objects[r][c] = p;
+ }
collect_sizes();
}
void
BlockSparseMatrix::
reinit(const std::vector<IndexSet> &sizes,
- const BlockCompressedSimpleSparsityPattern &bcsp,
- const MPI_Comm &com)
+ const BlockCompressedSimpleSparsityPattern &bcsp,
+ const MPI_Comm &com)
{
reinit(sizes, sizes, bcsp, com);
}
template <typename SparsityType>
void
SparseMatrix::
- reinit (const IndexSet & local_rows,
- const IndexSet & local_columns,
- const SparsityType &sparsity_pattern,
- const MPI_Comm &communicator)
+ reinit (const IndexSet &local_rows,
+ const IndexSet &local_columns,
+ const SparsityType &sparsity_pattern,
+ const MPI_Comm &communicator)
{
this->communicator = communicator;
template <typename SparsityType>
void
SparseMatrix::
- do_reinit (const IndexSet & local_rows,
- const IndexSet & local_columns,
- const SparsityType &sparsity_pattern)
+ do_reinit (const IndexSet &local_rows,
+ const IndexSet &local_columns,
+ const SparsityType &sparsity_pattern)
{
Assert(sparsity_pattern.n_rows()==local_rows.size(),
- ExcMessage("SparsityPattern and IndexSet have different number of rows"));
+ ExcMessage("SparsityPattern and IndexSet have different number of rows"));
Assert(sparsity_pattern.n_cols()==local_columns.size(),
- ExcMessage("SparsityPattern and IndexSet have different number of columns"));
+ ExcMessage("SparsityPattern and IndexSet have different number of columns"));
Assert(local_rows.is_contiguous() && local_columns.is_contiguous(),
- ExcMessage("PETSc only supports contiguous row/column ranges"));
+ ExcMessage("PETSc only supports contiguous row/column ranges"));
- // create the matrix. We do not set row length but set the
- // correct SparsityPattern later.
- int ierr;
+ // create the matrix. We do not set row length but set the
+ // correct SparsityPattern later.
+ int ierr;
+
+ ierr = MatCreate(communicator,&matrix);
+ AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+ ierr = MatSetSizes(matrix,
+ local_rows.n_elements(),
+ local_columns.n_elements(),
+ sparsity_pattern.n_rows(),
+ sparsity_pattern.n_cols());
+ AssertThrow (ierr == 0, ExcPETScError(ierr));
- ierr = MatCreate(communicator,&matrix);
- AssertThrow (ierr == 0, ExcPETScError(ierr));
+ ierr = MatSetType(matrix,MATMPIAIJ);
+ AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+
+ // next preset the exact given matrix
+ // entries with zeros. this doesn't avoid any
+ // memory allocations, but it at least
+ // avoids some searches later on. the
+ // key here is that we can use the
+ // matrix set routines that set an
+ // entire row at once, not a single
+ // entry at a time
+ //
+ // for the usefulness of this option
+ // read the documentation of this
+ // class.
+ //if (preset_nonzero_locations == true)
+ if (local_rows.n_elements()>0)
+ {
+ Assert(local_columns.n_elements()>0, ExcInternalError());
+ // MatMPIAIJSetPreallocationCSR
+ // can be used to allocate the sparsity
+ // pattern of a matrix
- ierr = MatSetSizes(matrix,
- local_rows.n_elements(),
- local_columns.n_elements(),
- sparsity_pattern.n_rows(),
- sparsity_pattern.n_cols());
- AssertThrow (ierr == 0, ExcPETScError(ierr));
+ const PetscInt local_row_start = local_rows.nth_index_in_set(0);
+ const PetscInt
+ local_row_end = local_row_start + local_rows.n_elements();
- ierr = MatSetType(matrix,MATMPIAIJ);
- AssertThrow (ierr == 0, ExcPETScError(ierr));
+ // first set up the column number
+ // array for the rows to be stored
+ // on the local processor. have one
+ // dummy entry at the end to make
+ // sure petsc doesn't read past the
+ // end
+ std::vector<PetscInt>
- // next preset the exact given matrix
- // entries with zeros. this doesn't avoid any
- // memory allocations, but it at least
- // avoids some searches later on. the
- // key here is that we can use the
- // matrix set routines that set an
- // entire row at once, not a single
- // entry at a time
- //
- // for the usefulness of this option
- // read the documentation of this
- // class.
- //if (preset_nonzero_locations == true)
- if (local_rows.n_elements()>0)
+ rowstart_in_window (local_row_end - local_row_start + 1, 0),
+ colnums_in_window;
+ {
+ unsigned int n_cols = 0;
+ for (PetscInt i=local_row_start; i<local_row_end; ++i)
{
- Assert(local_columns.n_elements()>0, ExcInternalError());
- // MatMPIAIJSetPreallocationCSR
- // can be used to allocate the sparsity
- // pattern of a matrix
-
- const PetscInt local_row_start = local_rows.nth_index_in_set(0);
- const PetscInt
- local_row_end = local_row_start + local_rows.n_elements();
-
-
- // first set up the column number
- // array for the rows to be stored
- // on the local processor. have one
- // dummy entry at the end to make
- // sure petsc doesn't read past the
- // end
- std::vector<PetscInt>
-
- rowstart_in_window (local_row_end - local_row_start + 1, 0),
- colnums_in_window;
- {
- unsigned int n_cols = 0;
- for (PetscInt i=local_row_start; i<local_row_end; ++i)
- {
- const PetscInt row_length = sparsity_pattern.row_length(i);
- rowstart_in_window[i+1-local_row_start]
- = rowstart_in_window[i-local_row_start] + row_length;
- n_cols += row_length;
- }
- colnums_in_window.resize (n_cols+1, -1);
- }
-
- // now copy over the information
- // from the sparsity pattern.
- {
- PetscInt* ptr = & colnums_in_window[0];
-
- for (PetscInt i=local_row_start; i<local_row_end; ++i)
- {
- typename SparsityType::row_iterator
- row_start = sparsity_pattern.row_begin(i),
- row_end = sparsity_pattern.row_end(i);
-
- std::copy(row_start, row_end, ptr);
- ptr += row_end - row_start;
- }
- }
-
-
- // then call the petsc function
- // that summarily allocates these
- // entries:
- MatMPIAIJSetPreallocationCSR (matrix,
- &rowstart_in_window[0],
- &colnums_in_window[0],
- 0);
+ const PetscInt row_length = sparsity_pattern.row_length(i);
+ rowstart_in_window[i+1-local_row_start]
+ = rowstart_in_window[i-local_row_start] + row_length;
+ n_cols += row_length;
}
- else
- {
- PetscInt i=0;
- MatMPIAIJSetPreallocationCSR (matrix,
- &i,
- &i,
- 0);
+ colnums_in_window.resize (n_cols+1, -1);
+ }
+ // now copy over the information
+ // from the sparsity pattern.
+ {
+ PetscInt *ptr = & colnums_in_window[0];
- }
- compress (dealii::VectorOperation::insert);
-
- {
-
- // Tell PETSc that we are not
- // planning on adding new entries
- // to the matrix. Generate errors
- // in debug mode.
- int ierr;
- #if DEAL_II_PETSC_VERSION_LT(3,0,0)
- #ifdef DEBUG
- ierr = MatSetOption (matrix, MAT_NEW_NONZERO_LOCATION_ERR);
- AssertThrow (ierr == 0, ExcPETScError(ierr));
- #else
- ierr = MatSetOption (matrix, MAT_NO_NEW_NONZERO_LOCATIONS);
- AssertThrow (ierr == 0, ExcPETScError(ierr));
- #endif
- #else
- #ifdef DEBUG
- ierr = MatSetOption (matrix, MAT_NEW_NONZERO_LOCATION_ERR, PETSC_TRUE);
- AssertThrow (ierr == 0, ExcPETScError(ierr));
- #else
- ierr = MatSetOption (matrix, MAT_NEW_NONZERO_LOCATIONS, PETSC_FALSE);
- AssertThrow (ierr == 0, ExcPETScError(ierr));
- #endif
- #endif
-
- // Tell PETSc to keep the
- // SparsityPattern entries even if
- // we delete a row with
- // clear_rows() which calls
- // MatZeroRows(). Otherwise one can
- // not write into that row
- // afterwards.
- #if DEAL_II_PETSC_VERSION_LT(3,0,0)
- ierr = MatSetOption (matrix, MAT_KEEP_ZEROED_ROWS);
- AssertThrow (ierr == 0, ExcPETScError(ierr));
- #elif DEAL_II_PETSC_VERSION_LT(3,1,0)
- ierr = MatSetOption (matrix, MAT_KEEP_ZEROED_ROWS, PETSC_TRUE);
- AssertThrow (ierr == 0, ExcPETScError(ierr));
- #else
- ierr = MatSetOption (matrix, MAT_KEEP_NONZERO_PATTERN, PETSC_TRUE);
- AssertThrow (ierr == 0, ExcPETScError(ierr));
- #endif
+ for (PetscInt i=local_row_start; i<local_row_end; ++i)
+ {
+ typename SparsityType::row_iterator
+ row_start = sparsity_pattern.row_begin(i),
+ row_end = sparsity_pattern.row_end(i);
+ std::copy(row_start, row_end, ptr);
+ ptr += row_end - row_start;
}
+ }
+
+
+ // then call the petsc function
+ // that summarily allocates these
+ // entries:
+ MatMPIAIJSetPreallocationCSR (matrix,
+ &rowstart_in_window[0],
+ &colnums_in_window[0],
+ 0);
+ }
+ else
+ {
+ PetscInt i=0;
+ MatMPIAIJSetPreallocationCSR (matrix,
+ &i,
+ &i,
+ 0);
+
+
+ }
+ compress (dealii::VectorOperation::insert);
+
+ {
+
+ // Tell PETSc that we are not
+ // planning on adding new entries
+ // to the matrix. Generate errors
+ // in debug mode.
+ int ierr;
+#if DEAL_II_PETSC_VERSION_LT(3,0,0)
+#ifdef DEBUG
+ ierr = MatSetOption (matrix, MAT_NEW_NONZERO_LOCATION_ERR);
+ AssertThrow (ierr == 0, ExcPETScError(ierr));
+#else
+ ierr = MatSetOption (matrix, MAT_NO_NEW_NONZERO_LOCATIONS);
+ AssertThrow (ierr == 0, ExcPETScError(ierr));
+#endif
+#else
+#ifdef DEBUG
+ ierr = MatSetOption (matrix, MAT_NEW_NONZERO_LOCATION_ERR, PETSC_TRUE);
+ AssertThrow (ierr == 0, ExcPETScError(ierr));
+#else
+ ierr = MatSetOption (matrix, MAT_NEW_NONZERO_LOCATIONS, PETSC_FALSE);
+ AssertThrow (ierr == 0, ExcPETScError(ierr));
+#endif
+#endif
+
+ // Tell PETSc to keep the
+ // SparsityPattern entries even if
+ // we delete a row with
+ // clear_rows() which calls
+ // MatZeroRows(). Otherwise one can
+ // not write into that row
+ // afterwards.
+#if DEAL_II_PETSC_VERSION_LT(3,0,0)
+ ierr = MatSetOption (matrix, MAT_KEEP_ZEROED_ROWS);
+ AssertThrow (ierr == 0, ExcPETScError(ierr));
+#elif DEAL_II_PETSC_VERSION_LT(3,1,0)
+ ierr = MatSetOption (matrix, MAT_KEEP_ZEROED_ROWS, PETSC_TRUE);
+ AssertThrow (ierr == 0, ExcPETScError(ierr));
+#else
+ ierr = MatSetOption (matrix, MAT_KEEP_NONZERO_PATTERN, PETSC_TRUE);
+ AssertThrow (ierr == 0, ExcPETScError(ierr));
+#endif
+
+ }
}
local_row_start += local_rows_per_process[p];
local_col_start += local_columns_per_process[p];
}
- const size_type
+ const size_type
local_row_end = local_row_start + local_rows_per_process[this_process];
#if DEAL_II_PETSC_VERSION_LT(2,3,3)
//at least starting from 2.3.3 (tested,
//see below)
- const size_type
+ const size_type
local_col_end = local_col_start + local_columns_per_process[this_process];
// then count the elements in- and
// now copy over the information
// from the sparsity pattern.
{
- PetscInt* ptr = & colnums_in_window[0];
+ PetscInt *ptr = & colnums_in_window[0];
for (size_type i=local_row_start; i<local_row_end; ++i)
{
template void
SparseMatrix::
- reinit (const IndexSet &,
+ reinit (const IndexSet &,
const IndexSet &,
const CompressedSimpleSparsityPattern &,
- const MPI_Comm &);
+ const MPI_Comm &);
template void
SparseMatrix::do_reinit (const SparsityPattern &,
const bool);
template void
- SparseMatrix::
- do_reinit (const IndexSet &,
- const IndexSet &,
- const CompressedSimpleSparsityPattern &);
+ SparseMatrix::
+ do_reinit (const IndexSet &,
+ const IndexSet &,
+ const CompressedSimpleSparsityPattern &);
PetscScalar
Vector::Vector (const IndexSet &local,
- const MPI_Comm &communicator)
- :
- communicator (communicator)
+ const MPI_Comm &communicator)
+ :
+ communicator (communicator)
{
Assert(local.is_contiguous(), ExcNotImplemented());
Vector::create_vector(local.size(), local.n_elements());
Vector::Vector (const MPI_Comm &communicator,
- const IndexSet &local)
- :
- communicator (communicator)
+ const IndexSet &local)
+ :
+ communicator (communicator)
{
Assert(local.is_contiguous(), ExcNotImplemented());
Vector::create_vector(local.size(), local.n_elements());
Vector::operator = (const PETScWrappers::Vector &v)
{
Assert(last_action==VectorOperation::unknown,
- ExcMessage("Call to compress() required before calling operator=."));
+ ExcMessage("Call to compress() required before calling operator=."));
//TODO [TH]: can not access v.last_action here. Implement is_compressed()?
//Assert(v.last_action==VectorOperation::unknown,
// ExcMessage("Call to compress() required before calling operator=."));
}
PetscErrorCode SparseDirectMUMPS::convergence_test (KSP /*ksp*/,
- const PetscInt iteration,
- const PetscReal residual_norm,
- KSPConvergedReason *reason,
- void *solver_control_x)
+ const PetscInt iteration,
+ const PetscReal residual_norm,
+ KSPConvergedReason *reason,
+ void *solver_control_x)
{
SolverControl &solver_control = *reinterpret_cast<SolverControl *>(solver_control_x);
// convert, unless we want to play dirty
// tricks with conversions of pointers
const std::vector<PetscInt>
- int_row_lengths (row_lengths.begin(), row_lengths.end());
+ int_row_lengths (row_lengths.begin(), row_lengths.end());
const int ierr
= MatCreateSeqAIJ(PETSC_COMM_SELF, m, n, 0,
attained_ownership(true)
{
Assert( multithread_info.is_running_single_threaded(),
- ExcMessage("PETSc does not support multi-threaded access, set "
- "the thread limit to 1 in MPI_InitFinalize()."));
+ ExcMessage("PETSc does not support multi-threaded access, set "
+ "the thread limit to 1 in MPI_InitFinalize()."));
}
attained_ownership(true)
{
Assert( multithread_info.is_running_single_threaded(),
- ExcMessage("PETSc does not support multi-threaded access, set "
- "the thread limit to 1 in MPI_InitFinalize()."));
+ ExcMessage("PETSc does not support multi-threaded access, set "
+ "the thread limit to 1 in MPI_InitFinalize()."));
int ierr = VecDuplicate (v.vector, &vector);
AssertThrow (ierr == 0, ExcPETScError(ierr));
attained_ownership(false)
{
Assert( multithread_info.is_running_single_threaded(),
- ExcMessage("PETSc does not support multi-threaded access, set "
- "the thread limit to 1 in MPI_InitFinalize()."));
+ ExcMessage("PETSc does not support multi-threaded access, set "
+ "the thread limit to 1 in MPI_InitFinalize()."));
}
- VectorBase::size_type
+ VectorBase::size_type
VectorBase::size () const
{
PetscInt sz;
- VectorBase::size_type
+ VectorBase::size_type
VectorBase::local_size () const
{
PetscInt sz;
return mean;
#else // PETSC_USE_COMPLEX
- Assert ((false),
- ExcMessage ("Your PETSc/SLEPc installation was configured with scalar-type complex "
- "but this function is not defined for complex types."));
+ Assert ((false),
+ ExcMessage ("Your PETSc/SLEPc installation was configured with scalar-type complex "
+ "but this function is not defined for complex types."));
- // Prevent compiler warning about no return value
- PetscScalar dummy;
- return dummy;
+ // Prevent compiler warning about no return value
+ PetscScalar dummy;
+ return dummy;
#endif
}
}
void
- SolverBase::solve (const size_type n_eigenpairs,
- size_type *n_converged)
+ SolverBase::solve (const size_type n_eigenpairs,
+ size_type *n_converged)
{
int ierr;
// set target eigenvalues to solve for
ierr = EPSSetTarget (solver_data->eps, target_eigenvalue);
AssertThrow (ierr == 0, ExcSLEPcError(ierr));
-
+
// set which portion of the eigenspectrum to solve for
ierr = EPSSetWhichEigenpairs (solver_data->eps, set_which);
AssertThrow (ierr == 0, ExcSLEPcError(ierr));
// get converged eigenpair
int ierr = EPSGetEigenpair (solver_data->eps, index,
- &eigenvalues, PETSC_NULL,
- eigenvectors, PETSC_NULL);
+ &eigenvalues, PETSC_NULL,
+ eigenvectors, PETSC_NULL);
AssertThrow (ierr == 0, ExcSLEPcError(ierr));
}
{
#ifndef PETSC_USE_COMPLEX
AssertThrow (solver_data.get() != 0, ExcSLEPcWrappersUsageError());
-
+
// get converged eigenpair
int ierr = EPSGetEigenpair (solver_data->eps, index,
- &real_eigenvalues, &imag_eigenvalues,
- real_eigenvectors, imag_eigenvectors);
+ &real_eigenvalues, &imag_eigenvalues,
+ real_eigenvectors, imag_eigenvectors);
AssertThrow (ierr == 0, ExcSLEPcError(ierr));
#else
Assert ((false),
/* ---------------------- LAPACK ------------------------- */
SolverLAPACK::SolverLAPACK (SolverControl &cn,
- const MPI_Comm &mpi_communicator,
- const AdditionalData &data)
+ const MPI_Comm &mpi_communicator,
+ const AdditionalData &data)
:
SolverBase (cn, mpi_communicator),
additional_data (data)
for (size_type row = 0; row < matrix.m(); ++row)
{
for (typename Matrix::const_iterator p=matrix.begin(row);
- p!=matrix.end(row); ++p)
+ p!=matrix.end(row); ++p)
{
// write entry into the first free one for this row
Ai[row_pointers[row]] = p->column();
for (size_type row = 0; row < matrix.m(); ++row)
{
for (typename Matrix::const_iterator ptr = matrix.begin (row);
- ptr != matrix.end (row); ++ptr)
+ ptr != matrix.end (row); ++ptr)
if (std::abs (ptr->value ()) > 0.0)
{
a[index] = ptr->value ();
std::size_t vec_len = 0;
for (size_type i=0; i<m; ++i)
vec_len += std::min(static_cast<size_type>(store_diagonal_first_in_row ?
- std::max(row_lengths[i], 1U) :
- row_lengths[i]),
+ std::max(row_lengths[i], 1U) :
+ row_lengths[i]),
n);
// sometimes, no entries are
max_row_length = (row_lengths.size() == 0 ?
0 :
std::min (static_cast<size_type>(*std::max_element(row_lengths.begin(),
- row_lengths.end())),
+ row_lengths.end())),
n));
if (store_diagonal_first_in_row && (max_row_length==0) && (m!=0))
rowstart[i] = rowstart[i-1] +
(store_diagonal_first_in_row ?
std::max(std::min(static_cast<size_type>(row_lengths[i-1]),n),
- static_cast<size_type> (1U)) :
+ static_cast<size_type> (1U)) :
std::min(static_cast<size_type>(row_lengths[i-1]),n));
Assert ((rowstart[rows]==vec_len)
||
-SparsityPattern::size_type
+SparsityPattern::size_type
SparsityPattern::max_entries_per_row () const
{
// if compress() has not yet been
-SparsityPattern::size_type
+SparsityPattern::size_type
SparsityPattern::operator () (const size_type i,
const size_type j) const
{
&colnums[rowstart[i]]);
const size_type *const p
= Utilities::lower_bound<const size_type *> (sorted_region_start,
- &colnums[rowstart[i+1]],
- j);
+ &colnums[rowstart[i+1]],
+ j);
if ((p != &colnums[rowstart[i+1]]) && (*p == j))
return (p - &colnums[0]);
else
-SparsityPattern::size_type
+SparsityPattern::size_type
SparsityPattern::row_position (const size_type i, const size_type j) const
{
Assert ((rowstart!=0) && (colnums!=0), ExcEmptyObject());
-SparsityPattern::size_type
+SparsityPattern::size_type
SparsityPattern::bandwidth () const
{
Assert ((rowstart!=0) && (colnums!=0), ExcEmptyObject());
for (size_type d=0; d<constant_modes_dimension; ++d)
for (size_type row=0; row<my_size; ++row)
{
- TrilinosWrappers::types::int_type global_row_id =
+ TrilinosWrappers::types::int_type global_row_id =
constant_modes_are_global ? gid(domain_map,row) : row;
distributed_constant_modes[d][row] =
additional_data.constant_modes[d][global_row_id];
// equidistributed map; avoid
// storing the nonzero
// elements.
- vector_distributor.reset (new Epetra_Map(static_cast<TrilinosWrappers::types::int_type>(n_rows),
- 0, communicator));
+ vector_distributor.reset (new Epetra_Map(static_cast<TrilinosWrappers::types::int_type>(n_rows),
+ 0, communicator));
if (trilinos_matrix.get() == 0)
trilinos_matrix.reset (new SparseMatrix());
SolverBase::SolverBase (const enum SolverBase::SolverName solver_name,
SolverControl &cn)
- :
- solver_name (solver_name),
- solver_control (cn)
+ :
+ solver_name (solver_name),
+ solver_control (cn)
{}
{
namespace
{
- // distinguish between compressed sparsity types that define row_begin()
- // and SparsityPattern that uses begin() as iterator type
+ // distinguish between compressed sparsity types that define row_begin()
+ // and SparsityPattern that uses begin() as iterator type
template <typename Sparsity>
void copy_row (const Sparsity &csp,
const size_type row,
column_space_map.reset (new Epetra_Map (input_col_map));
const size_type first_row = min_my_gid(input_row_map),
- last_row = max_my_gid(input_row_map)+1;
+ last_row = max_my_gid(input_row_map)+1;
std::vector<int> n_entries_per_row(last_row-first_row);
for (size_type row=first_row; row<last_row; ++row)
// check whether we got the number of columns right.
AssertDimension (sparsity_pattern.n_cols(),static_cast<size_type>(
- n_global_cols(*graph)));
+ n_global_cols(*graph)));
// And now finally generate the matrix.
matrix.reset (new Epetra_FECrsMatrix(Copy, *graph, false));
// Extract local indices in
// the matrix.
int trilinos_i = matrix->LRID(static_cast<TrilinosWrappers::types::int_type>(i)),
- trilinos_j = matrix->LCID(static_cast<TrilinosWrappers::types::int_type>(j));
+ trilinos_j = matrix->LCID(static_cast<TrilinosWrappers::types::int_type>(j));
TrilinosScalar value = 0.;
// If the data is not on the
// Extract local indices in
// the matrix.
int trilinos_i = matrix->LRID(static_cast<TrilinosWrappers::types::int_type>(i)),
- trilinos_j = matrix->LCID(static_cast<TrilinosWrappers::types::int_type>(j));
+ trilinos_j = matrix->LCID(static_cast<TrilinosWrappers::types::int_type>(j));
TrilinosScalar value = 0.;
// If the data is not on the
SparseMatrix::memory_consumption () const
{
size_type static_memory = sizeof(this) + sizeof (*matrix)
- + sizeof(*matrix->Graph().DataPtr());
+ + sizeof(*matrix->Graph().DataPtr());
return ((sizeof(TrilinosScalar)+sizeof(TrilinosWrappers::types::int_type))*
- matrix->NumMyNonzeros() + sizeof(int)*local_size() + static_memory);
+ matrix->NumMyNonzeros() + sizeof(int)*local_size() + static_memory);
}
}
return graph.NumGlobalRows();
}
- int n_global_cols(const Epetra_CrsGraph &graph)
+ int n_global_cols(const Epetra_CrsGraph &graph)
{
return graph.NumGlobalCols();
}
{
return graph.GRID(i);
}
- #else
+#else
long long int n_global_elements (const Epetra_BlockMap &map)
{
return map.NumGlobalElements64();
// would it point to?)
Assert (ncols != 0, ExcInternalError());
colnum_cache.reset (new std::vector<size_type> (colnums,
- colnums+ncols));
+ colnums+ncols));
}
}
compressed (true)
{
column_space_map.reset(new Epetra_Map (TrilinosWrappers::types::int_type(0),
- TrilinosWrappers::types::int_type(0),
- Utilities::Trilinos::comm_self()));
+ TrilinosWrappers::types::int_type(0),
+ Utilities::Trilinos::comm_self()));
graph.reset (new Epetra_FECrsGraph(View,
- *column_space_map,
- *column_space_map,
- 0));
+ *column_space_map,
+ *column_space_map,
+ 0));
graph->FillComplete();
}
:
Subscriptor(),
column_space_map (new Epetra_Map(TrilinosWrappers::types::int_type(0),
- TrilinosWrappers::types::int_type(0),
- Utilities::Trilinos::comm_self())),
+ TrilinosWrappers::types::int_type(0),
+ Utilities::Trilinos::comm_self())),
compressed (false),
graph (new Epetra_FECrsGraph(View,
- *column_space_map,
*column_space_map,
- 0))
+ *column_space_map,
+ 0))
{
Assert (input_sparsity.n_rows() == 0,
ExcMessage ("Copy constructor only works for empty sparsity patterns."));
const size_type n_entries_per_row)
{
const Epetra_Map rows (TrilinosWrappers::types::int_type(m), 0,
- Utilities::Trilinos::comm_self());
+ Utilities::Trilinos::comm_self());
const Epetra_Map columns (TrilinosWrappers::types::int_type(n), 0,
- Utilities::Trilinos::comm_self());
+ Utilities::Trilinos::comm_self());
reinit (rows, columns, n_entries_per_row);
}
const std::vector<size_type> &n_entries_per_row)
{
const Epetra_Map rows (TrilinosWrappers::types::int_type(m), 0,
- Utilities::Trilinos::comm_self());
+ Utilities::Trilinos::comm_self());
const Epetra_Map columns (TrilinosWrappers::types::int_type(n), 0,
- Utilities::Trilinos::comm_self());
+ Utilities::Trilinos::comm_self());
reinit (rows, columns, n_entries_per_row);
}
namespace
{
typedef dealii::types::global_dof_index size_type;
- // distinguish between compressed sparsity types that define row_begin()
- // and SparsityPattern that uses begin() as iterator type
+ // distinguish between compressed sparsity types that define row_begin()
+ // and SparsityPattern that uses begin() as iterator type
template <typename Sparsity>
void copy_row (const Sparsity &csp,
const size_type row,
ExcMessage ("This function is not efficient if the map is not contiguous."));
const size_type first_row = min_my_gid(input_row_map),
- last_row = max_my_gid(input_row_map)+1;
+ last_row = max_my_gid(input_row_map)+1;
std::vector<int> n_entries_per_row(last_row - first_row);
- // Trilinos wants the row length as an int
- // this is hopefully never going to be a problem.
+ // Trilinos wants the row length as an int
+ // this is hopefully never going to be a problem.
for (size_type row=first_row; row<last_row; ++row)
n_entries_per_row[row-first_row] = static_cast<int>(sp.row_length(row));
SparsityPattern::copy_from (const SparsityType &sp)
{
const Epetra_Map rows (TrilinosWrappers::types::int_type(sp.n_rows()), 0,
- Utilities::Trilinos::comm_self());
+ Utilities::Trilinos::comm_self());
const Epetra_Map columns (TrilinosWrappers::types::int_type(sp.n_cols()), 0,
- Utilities::Trilinos::comm_self());
+ Utilities::Trilinos::comm_self());
reinit (rows, columns, sp);
}
// the pointer and generate an
// empty sparsity pattern.
column_space_map.reset (new Epetra_Map (TrilinosWrappers::types::int_type(0),
- TrilinosWrappers::types::int_type(0),
- Utilities::Trilinos::comm_self()));
+ TrilinosWrappers::types::int_type(0),
+ Utilities::Trilinos::comm_self()));
graph.reset (new Epetra_FECrsGraph(View, *column_space_map,
*column_space_map, 0));
graph->FillComplete();
// Extract local indices in
// the matrix.
int trilinos_i = graph->LRID(static_cast<TrilinosWrappers::types::int_type>(i)),
- trilinos_j = graph->LCID(static_cast<TrilinosWrappers::types::int_type>(j));
+ trilinos_j = graph->LCID(static_cast<TrilinosWrappers::types::int_type>(j));
// If the data is not on the
// present processor, we throw
// Generate the view and make
// sure that we have not generated
// an error.
- // TODO: trilinos_i is the local row index -> it is an int but
+ // TODO: trilinos_i is the local row index -> it is an int but
// ExtractGlobalRowView requires trilinos_i to be the global row
// index and thus it should be a long long int
int ierr = graph->ExtractGlobalRowView(
- static_cast<TrilinosWrappers::types::int_type>(trilinos_i),
- nnz_extracted, col_indices);
+ static_cast<TrilinosWrappers::types::int_type>(trilinos_i),
+ nnz_extracted, col_indices);
Assert (ierr==0, ExcTrilinosError(ierr));
Assert (nnz_present == nnz_extracted,
ExcDimensionMismatch(nnz_present, nnz_extracted));
// Prepare pointers for extraction
// of a view of the row.
int nnz_present = graph->NumGlobalIndices(
- static_cast<TrilinosWrappers::types::int_type>(i));
+ static_cast<TrilinosWrappers::types::int_type>(i));
int nnz_extracted;
int *col_indices;
// sure that we have not generated
// an error.
int ierr = graph->ExtractMyRowView(trilinos_i,
- nnz_extracted, col_indices);
+ nnz_extracted, col_indices);
Assert (ierr==0, ExcTrilinosError(ierr));
Assert (nnz_present == nnz_extracted,
// Search the index
int *el_find = std::find(col_indices, col_indices + nnz_present,
- static_cast<int>(trilinos_j));
+ static_cast<int>(trilinos_j));
int local_col_index = (int)(el_find - col_indices);
// x-y, that is we have to exchange
// the order of output
out << indices[global_row_index(*graph,static_cast<int>(j))]
- << " " << -static_cast<signed int>(row) << std::endl;
+ << " " << -static_cast<signed int>(row) << std::endl;
}
AssertThrow (out, ExcIO());
// by calling either the 32- or 64-bit function necessary, and returns the
// result in the correct data type so that we can use it in calling other
// Epetra member functions that are overloaded by index type
- int* my_global_elements(const Epetra_BlockMap &map)
+ int *my_global_elements(const Epetra_BlockMap &map)
{
- return map.MyGlobalElements();
+ return map.MyGlobalElements();
}
// define a helper function that queries the global vector length of an
- // Epetra_FEVector object by calling either the 32- or 64-bit
+ // Epetra_FEVector object by calling either the 32- or 64-bit
// function necessary.
int global_length(const Epetra_FEVector &vector)
{
// by calling either the 32- or 64-bit function necessary, and returns the
// result in the correct data type so that we can use it in calling other
// Epetra member functions that are overloaded by index type
- long long int* my_global_elements(const Epetra_BlockMap &map)
+ long long int *my_global_elements(const Epetra_BlockMap &map)
{
- return map.MyGlobalElements64();
+ return map.MyGlobalElements64();
}
// define a helper function that queries the global vector length of an
- // Epetra_FEVector object by calling either the 32- or 64-bit
+ // Epetra_FEVector object by calling either the 32- or 64-bit
// function necessary.
long long int global_length(const Epetra_FEVector &vector)
{
VectorBase()
{
AssertThrow (n_global_elements(input_map) == n_global_elements(v.vector->Map()),
- ExcDimensionMismatch (n_global_elements(input_map),
- n_global_elements(v.vector->Map())));
+ ExcDimensionMismatch (n_global_elements(input_map),
+ n_global_elements(v.vector->Map())));
last_action = Zero;
:
VectorBase()
{
- AssertThrow (parallel_partitioner.size() ==
+ AssertThrow (parallel_partitioner.size() ==
static_cast<size_type>(n_global_elements(v.vector->Map())),
ExcDimensionMismatch (parallel_partitioner.size(),
n_global_elements(v.vector->Map())));
{
#ifndef DEAL_II_USE_LARGE_INDEX_TYPE
// define a helper function that queries the global vector length of an
- // Epetra_FEVector object by calling either the 32- or 64-bit
+ // Epetra_FEVector object by calling either the 32- or 64-bit
// function necessary.
int global_length(const Epetra_FEVector &vector)
{
}
#else
// define a helper function that queries the global vector length of an
- // Epetra_FEVector object by calling either the 32- or 64-bit
+ // Epetra_FEVector object by calling either the 32- or 64-bit
// function necessary.
long long int global_length(const Epetra_FEVector &vector)
{
// we can use []. Note that we
// can only get local values.
- const TrilinosWrappers::types::int_type local_index =
+ const TrilinosWrappers::types::int_type local_index =
vector.vector->Map().LID(static_cast<TrilinosWrappers::types::int_type>(index));
Assert (local_index >= 0,
ExcAccessToNonLocalElement (index,
{
// Extract local indices in
// the vector.
- TrilinosWrappers::types::int_type trilinos_i =
+ TrilinosWrappers::types::int_type trilinos_i =
vector->Map().LID(static_cast<TrilinosWrappers::types::int_type>(index));
TrilinosScalar value = 0.;
{
// Extract local indices in
// the vector.
- TrilinosWrappers::types::int_type trilinos_i =
+ TrilinosWrappers::types::int_type trilinos_i =
vector->Map().LID(static_cast<TrilinosWrappers::types::int_type>(index));
TrilinosScalar value = 0.;
//entry.
return sizeof(*this)
+ this->local_size()*( sizeof(double)+
- sizeof(TrilinosWrappers::types::int_type) );
+ sizeof(TrilinosWrappers::types::int_type) );
}
} /* end of namespace TrilinosWrappers */
template <int dim, int spacedim, typename number>
void
LocalIntegrator<dim, spacedim, number>::cell (DoFInfo<dim, spacedim, number> &,
- IntegrationInfo<dim, spacedim> &) const
+ IntegrationInfo<dim, spacedim> &) const
{
Assert(false, ExcPureFunction());
}
template <int dim, int spacedim, typename number>
void
LocalIntegrator<dim, spacedim, number>::boundary (DoFInfo<dim, spacedim, number> &,
- IntegrationInfo<dim, spacedim> &) const
+ IntegrationInfo<dim, spacedim> &) const
{
Assert(false, ExcPureFunction());
}
template <int dim, int spacedim, typename number>
void
LocalIntegrator<dim, spacedim, number>::face (DoFInfo<dim, spacedim, number> &,
- DoFInfo<dim, spacedim, number> &,
- IntegrationInfo<dim, spacedim> &,
- IntegrationInfo<dim, spacedim> &) const
+ DoFInfo<dim, spacedim, number> &,
+ IntegrationInfo<dim, spacedim> &,
+ IntegrationInfo<dim, spacedim> &) const
{
Assert(false, ExcPureFunction());
}
endc = dof.end(level);
for (; cell!=endc; ++cell)
{
- if (!cell->is_locally_owned_on_level()) continue;
-
+ if (!cell->is_locally_owned_on_level()) continue;
+
cell->get_mg_dof_indices (dofs_on_this_cell);
// make sparsity pattern for this cell
for (unsigned int i=0; i<dofs_per_cell; ++i)
endc = dof.end(level);
for (; cell!=endc; ++cell)
{
- if (!cell->is_locally_owned_on_level()) continue;
-
+ if (!cell->is_locally_owned_on_level()) continue;
+
cell->get_mg_dof_indices (dofs_on_this_cell);
// Loop over all interior neighbors
for (unsigned int face = 0;
for (; cell!=endc; ++cell)
{
- if (!cell->is_locally_owned_on_level()) continue;
-
- cell->get_mg_dof_indices (dofs_on_this_cell);
+ if (!cell->is_locally_owned_on_level()) continue;
+
+ cell->get_mg_dof_indices (dofs_on_this_cell);
// make sparsity pattern for this cell
for (unsigned int i=0; i<total_dofs; ++i)
for (unsigned int j=0; j<total_dofs; ++j)
for (; cell!=endc; ++cell)
{
- if (!cell->is_locally_owned_on_level()) continue;
-
+ if (!cell->is_locally_owned_on_level()) continue;
+
cell->get_mg_dof_indices (dofs_on_this_cell);
// Loop over all interior neighbors
for (unsigned int face = 0;
for (; cell!=endc; ++cell)
if (dof.get_tria().locally_owned_subdomain()==numbers::invalid_subdomain_id
|| cell->level_subdomain_id()==dof.get_tria().locally_owned_subdomain())
- for (unsigned int face_no = 0; face_no < GeometryInfo<dim>::faces_per_cell;
- ++face_no)
- {
- if (cell->at_boundary(face_no) == false)
- continue;
+ for (unsigned int face_no = 0; face_no < GeometryInfo<dim>::faces_per_cell;
+ ++face_no)
+ {
+ if (cell->at_boundary(face_no) == false)
+ continue;
- const FiniteElement<dim> &fe = cell->get_fe();
- const unsigned int level = cell->level();
-
- // we can presently deal only with
- // primitive elements for boundary
- // values. this does not preclude
- // us using non-primitive elements
- // in components that we aren't
- // interested in, however. make
- // sure that all shape functions
- // that are non-zero for the
- // components we are interested in,
- // are in fact primitive
- for (unsigned int i=0; i<cell->get_fe().dofs_per_cell; ++i)
- {
- const ComponentMask &nonzero_component_array
- = cell->get_fe().get_nonzero_components (i);
- for (unsigned int c=0; c<n_components; ++c)
- if ((nonzero_component_array[c] == true)
- &&
- (component_mask[c] == true))
- Assert (cell->get_fe().is_primitive (i),
- ExcMessage ("This function can only deal with requested boundary "
- "values that correspond to primitive (scalar) base "
- "elements"));
- }
+ const FiniteElement<dim> &fe = cell->get_fe();
+ const unsigned int level = cell->level();
+
+ // we can presently deal only with
+ // primitive elements for boundary
+ // values. this does not preclude
+ // us using non-primitive elements
+ // in components that we aren't
+ // interested in, however. make
+ // sure that all shape functions
+ // that are non-zero for the
+ // components we are interested in,
+ // are in fact primitive
+ for (unsigned int i=0; i<cell->get_fe().dofs_per_cell; ++i)
+ {
+ const ComponentMask &nonzero_component_array
+ = cell->get_fe().get_nonzero_components (i);
+ for (unsigned int c=0; c<n_components; ++c)
+ if ((nonzero_component_array[c] == true)
+ &&
+ (component_mask[c] == true))
+ Assert (cell->get_fe().is_primitive (i),
+ ExcMessage ("This function can only deal with requested boundary "
+ "values that correspond to primitive (scalar) base "
+ "elements"));
+ }
- typename DoFHandler<dim,spacedim>::face_iterator face = cell->face(face_no);
- const types::boundary_id boundary_component = face->boundary_indicator();
- if (function_map.find(boundary_component) != function_map.end())
- // face is of the right component
- {
- // get indices, physical location and
- // boundary values of dofs on this
- // face
- local_dofs.resize (fe.dofs_per_face);
- face->get_mg_dof_indices (level, local_dofs);
- if (fe_is_system)
- {
- // enter those dofs
- // into the list that
- // match the
- // component
- // signature. avoid
- // the usual
- // complication that
- // we can't just use
- // *_system_to_component_index
- // for non-primitive
- // FEs
- for (unsigned int i=0; i<local_dofs.size(); ++i)
- {
- unsigned int component;
- if (fe.is_primitive())
- component = fe.face_system_to_component_index(i).first;
- else
- {
- // non-primitive
- // case. make
- // sure that
- // this
- // particular
- // shape
- // function
- // _is_
- // primitive,
- // and get at
- // it's
- // component. use
- // usual
- // trick to
- // transfer
- // face dof
- // index to
- // cell dof
- // index
- const unsigned int cell_i
- = (dim == 1 ?
- i
- :
- (dim == 2 ?
- (i<2*fe.dofs_per_vertex ? i : i+2*fe.dofs_per_vertex)
- :
- (dim == 3 ?
- (i<4*fe.dofs_per_vertex ?
- i
+ typename DoFHandler<dim,spacedim>::face_iterator face = cell->face(face_no);
+ const types::boundary_id boundary_component = face->boundary_indicator();
+ if (function_map.find(boundary_component) != function_map.end())
+ // face is of the right component
+ {
+ // get indices, physical location and
+ // boundary values of dofs on this
+ // face
+ local_dofs.resize (fe.dofs_per_face);
+ face->get_mg_dof_indices (level, local_dofs);
+ if (fe_is_system)
+ {
+ // enter those dofs
+ // into the list that
+ // match the
+ // component
+ // signature. avoid
+ // the usual
+ // complication that
+ // we can't just use
+ // *_system_to_component_index
+ // for non-primitive
+ // FEs
+ for (unsigned int i=0; i<local_dofs.size(); ++i)
+ {
+ unsigned int component;
+ if (fe.is_primitive())
+ component = fe.face_system_to_component_index(i).first;
+ else
+ {
+ // non-primitive
+ // case. make
+ // sure that
+ // this
+ // particular
+ // shape
+ // function
+ // _is_
+ // primitive,
+ // and get at
+ // it's
+ // component. use
+ // usual
+ // trick to
+ // transfer
+ // face dof
+ // index to
+ // cell dof
+ // index
+ const unsigned int cell_i
+ = (dim == 1 ?
+ i
+ :
+ (dim == 2 ?
+ (i<2*fe.dofs_per_vertex ? i : i+2*fe.dofs_per_vertex)
:
- (i<4*fe.dofs_per_vertex+4*fe.dofs_per_line ?
- i+4*fe.dofs_per_vertex
+ (dim == 3 ?
+ (i<4*fe.dofs_per_vertex ?
+ i
+ :
+ (i<4*fe.dofs_per_vertex+4*fe.dofs_per_line ?
+ i+4*fe.dofs_per_vertex
+ :
+ i+4*fe.dofs_per_vertex+8*fe.dofs_per_line))
:
- i+4*fe.dofs_per_vertex+8*fe.dofs_per_line))
- :
- numbers::invalid_unsigned_int)));
- Assert (cell_i < fe.dofs_per_cell, ExcInternalError());
-
- // make sure
- // that if
- // this is
- // not a
- // primitive
- // shape function,
- // then all
- // the
- // corresponding
- // components
- // in the
- // mask are
- // not set
+ numbers::invalid_unsigned_int)));
+ Assert (cell_i < fe.dofs_per_cell, ExcInternalError());
+
+ // make sure
+ // that if
+ // this is
+ // not a
+ // primitive
+ // shape function,
+ // then all
+ // the
+ // corresponding
+ // components
+ // in the
+ // mask are
+ // not set
// if (!fe.is_primitive(cell_i))
// for (unsigned int c=0; c<n_components; ++c)
// if (fe.get_nonzero_components(cell_i)[c])
// components. if shape function is non-primitive, then we will ignore
// the result in the following anyway, otherwise there's only one
// non-zero component which we will use
- component = fe.get_nonzero_components(cell_i).first_selected_component();
- }
+ component = fe.get_nonzero_components(cell_i).first_selected_component();
+ }
- if (component_mask[component] == true)
- boundary_indices[level].insert(local_dofs[i]);
- }
- }
- else
- for (unsigned int i=0; i<local_dofs.size(); ++i)
- boundary_indices[level].insert(local_dofs[i]);
- }
- }
+ if (component_mask[component] == true)
+ boundary_indices[level].insert(local_dofs[i]);
+ }
+ }
+ else
+ for (unsigned int i=0; i<local_dofs.size(); ++i)
+ boundary_indices[level].insert(local_dofs[i]);
+ }
+ }
}
}
cell != mg_dof.end(level); ++cell)
if (cell->has_children() &&
( mg_dof.get_tria().locally_owned_subdomain()==numbers::invalid_subdomain_id
- || cell->level_subdomain_id()==mg_dof.get_tria().locally_owned_subdomain()
- ))
+ || cell->level_subdomain_id()==mg_dof.get_tria().locally_owned_subdomain()
+ ))
{
cell->get_mg_dof_indices (dof_indices_parent);
}
}
}
-
+
internal::MatrixSelector<VECTOR>::reinit(*prolongation_matrices[level],
- *prolongation_sparsities[level],
- level,
- csp,
- mg_dof);
+ *prolongation_sparsities[level],
+ level,
+ csp,
+ mg_dof);
csp.reinit(0,0);
-
+
FullMatrix<double> prolongation;
-
+
// now actually build the matrices
for (typename DoFHandler<dim,spacedim>::cell_iterator cell=mg_dof.begin(level);
cell != mg_dof.end(level); ++cell)
if (cell->has_children() &&
(mg_dof.get_tria().locally_owned_subdomain()==numbers::invalid_subdomain_id
|| cell->level_subdomain_id()==mg_dof.get_tria().locally_owned_subdomain())
- )
+ )
{
cell->get_mg_dof_indices (dof_indices_parent);
prolongation
= mg_dof.get_fe().get_prolongation_matrix (child,
cell->refinement_case());
-
- if (mg_constrained_dofs != 0 && mg_constrained_dofs->set_boundary_values())
- for (unsigned int j=0;j<dofs_per_cell; ++j)
- if (mg_constrained_dofs->is_boundary_index(level, dof_indices_parent[j]))
- for (unsigned int i=0; i<dofs_per_cell; ++i)
- prolongation(i,j) = 0.;
+
+ if (mg_constrained_dofs != 0 && mg_constrained_dofs->set_boundary_values())
+ for (unsigned int j=0; j<dofs_per_cell; ++j)
+ if (mg_constrained_dofs->is_boundary_index(level, dof_indices_parent[j]))
+ for (unsigned int i=0; i<dofs_per_cell; ++i)
+ prolongation(i,j) = 0.;
cell->child(child)->get_mg_dof_indices (dof_indices_child);
// of the global and mgdof, so that we later not access non-local elements
// in copy_to/from_mg.
// We keep track in the bitfield dof_touched which global dof has
- // been processed already (on the current level). This is the same as
+ // been processed already (on the current level). This is the same as
// the multigrid running in serial.
- // Only entering on the finest level gives wrong results (why?)
+ // Only entering on the finest level gives wrong results (why?)
copy_indices.resize(n_levels);
copy_indices_from_me.resize(n_levels);
{
if (mg_dof.get_tria().locally_owned_subdomain()!=numbers::invalid_subdomain_id
&& (level_cell->level_subdomain_id()==numbers::artificial_subdomain_id
- || level_cell->subdomain_id()==numbers::artificial_subdomain_id)
- )
+ || level_cell->subdomain_id()==numbers::artificial_subdomain_id)
+ )
continue;
// get the dof numbers of this cell for the global and the level-wise
if (global_mine && level_mine)
copy_indices[level].push_back(
- std::pair<unsigned int, unsigned int> (global_dof_indices[i], level_dof_indices[i]));
+ std::pair<unsigned int, unsigned int> (global_dof_indices[i], level_dof_indices[i]));
else if (level_mine)
copy_indices_from_me[level].push_back(
- std::pair<unsigned int, unsigned int> (global_dof_indices[i], level_dof_indices[i]));
+ std::pair<unsigned int, unsigned int> (global_dof_indices[i], level_dof_indices[i]));
else if (global_mine)
copy_indices_to_me[level].push_back(
- std::pair<unsigned int, unsigned int> (global_dof_indices[i], level_dof_indices[i]));
+ std::pair<unsigned int, unsigned int> (global_dof_indices[i], level_dof_indices[i]));
else
continue;
// more reliable output for regression texts
#ifdef DEBUG
std::less<std::pair<types::global_dof_index, unsigned int> > compare;
- for (unsigned int level=0;level<copy_indices.size();++level)
+ for (unsigned int level=0; level<copy_indices.size(); ++level)
std::sort(copy_indices[level].begin(), copy_indices[level].end(), compare);
- for (unsigned int level=0;level<copy_indices_from_me.size();++level)
+ for (unsigned int level=0; level<copy_indices_from_me.size(); ++level)
std::sort(copy_indices_from_me[level].begin(), copy_indices_from_me[level].end(), compare);
- for (unsigned int level=0;level<copy_indices_to_me.size();++level)
+ for (unsigned int level=0; level<copy_indices_to_me.size(); ++level)
std::sort(copy_indices_to_me[level].begin(), copy_indices_to_me[level].end(), compare);
#endif
}
template <class VECTOR>
void
-MGTransferPrebuilt<VECTOR>::print_matrices (std::ostream& os) const
+MGTransferPrebuilt<VECTOR>::print_matrices (std::ostream &os) const
{
- for (unsigned int level = 0;level<prolongation_matrices.size();++level)
+ for (unsigned int level = 0; level<prolongation_matrices.size(); ++level)
{
os << "Level " << level << std::endl;
prolongation_matrices[level]->print(os);
template <class VECTOR>
void
-MGTransferPrebuilt<VECTOR>::print_indices (std::ostream& os) const
+MGTransferPrebuilt<VECTOR>::print_indices (std::ostream &os) const
{
- for (unsigned int level = 0;level<copy_indices.size();++level)
+ for (unsigned int level = 0; level<copy_indices.size(); ++level)
{
- for (unsigned int i=0;i<copy_indices[level].size();++i)
- os << "copy_indices[" << level
- << "]\t" << copy_indices[level][i].first << '\t' << copy_indices[level][i].second << std::endl;
+ for (unsigned int i=0; i<copy_indices[level].size(); ++i)
+ os << "copy_indices[" << level
+ << "]\t" << copy_indices[level][i].first << '\t' << copy_indices[level][i].second << std::endl;
}
-
- for (unsigned int level = 0;level<copy_indices_from_me.size();++level)
+
+ for (unsigned int level = 0; level<copy_indices_from_me.size(); ++level)
{
- for (unsigned int i=0;i<copy_indices_from_me[level].size();++i)
- os << "copy_ifrom [" << level
- << "]\t" << copy_indices_from_me[level][i].first << '\t' << copy_indices_from_me[level][i].second << std::endl;
+ for (unsigned int i=0; i<copy_indices_from_me[level].size(); ++i)
+ os << "copy_ifrom [" << level
+ << "]\t" << copy_indices_from_me[level][i].first << '\t' << copy_indices_from_me[level][i].second << std::endl;
}
- for (unsigned int level = 0;level<copy_indices_to_me.size();++level)
+ for (unsigned int level = 0; level<copy_indices_to_me.size(); ++level)
{
- for (unsigned int i=0;i<copy_indices_to_me[level].size();++i)
- os << "copy_ito [" << level
- << "]\t" << copy_indices_to_me[level][i].first << '\t' << copy_indices_to_me[level][i].second << std::endl;
+ for (unsigned int i=0; i<copy_indices_to_me[level].size(); ++i)
+ os << "copy_ito [" << level
+ << "]\t" << copy_indices_to_me[level][i].first << '\t' << copy_indices_to_me[level][i].second << std::endl;
}
}
{
fe_face_values_neighbor.get_present_fe_values()
.get_function_gradients (*solutions[n],
- parallel_data.neighbor_psi[n]);
+ parallel_data.neighbor_psi[n]);
// compute the jump in the gradients
for (unsigned int component=0; component<n_components; ++component)
for (unsigned int s=0; s<n_solution_vectors; ++s)
fe_values.get_present_fe_values()
.get_function_gradients (*solutions[s],
- gradients_neighbor[s]);
+ gradients_neighbor[s]);
// extract the
// gradients of all the
std::map<types::global_dof_index,double>::const_iterator dof = boundary_values.begin(),
- endd = boundary_values.end();
+ endd = boundary_values.end();
for (; dof != endd; ++dof)
{
Assert (dof->first < n_dofs, ExcInternalError());
void (TimeDependent::*p) (const unsigned int, const unsigned int)
= &TimeDependent::end_sweep;
parallel::apply_to_subranges (0U, timesteps.size(),
- std_cxx1x::bind (p, this, std_cxx1x::_1, std_cxx1x::_2),
- 1);
+ std_cxx1x::bind (p, this, std_cxx1x::_1, std_cxx1x::_2),
+ 1);
}