// $Id$
// Version: $Name$
//
-// Copyright (C) 2006, 2008, 2009, 2010, 2011 by the deal.II authors
+// Copyright (C) 2006, 2008, 2009, 2010, 2011, 2012 by the deal.II authors
//
// This file is subject to QPL and may not be distributed
// without copyright and license information. Please refer
const InputIterator1 &end_in_1,
const InputIterator2 &begin_in_2,
const OutputIterator &begin_out,
- FunctionObject &function)
+ FunctionObject &function)
{
InputIterator1 in_1 = begin_in_1;
InputIterator2 in_2 = begin_in_2;
* @code
parallel::transform (x.begin(), x.end(),
y.begin(),
- z.begin(),
- (boost::lambda::_1 + boost::lambda::_2),
- 1000);
+ z.begin(),
+ (boost::lambda::_1 + boost::lambda::_2),
+ 1000);
* @endcode
*
* In this example, we used the <a
Vector::iterator dst_ptr = dst.begin();
for (unsigned int row=0; row<n_rows; ++row, ++dst_ptr)
- {
- double s = 0.;
- const double *const val_end_of_row = &values[rowstart[row+1]];
- while (val_ptr != val_end_of_row)
- s += *val_ptr++ * src(*colnum_ptr++);
- *dst_ptr = s;
- }
+ {
+ double s = 0.;
+ const double *const val_end_of_row = &values[rowstart[row+1]];
+ while (val_ptr != val_end_of_row)
+ s += *val_ptr++ * src(*colnum_ptr++);
+ *dst_ptr = s;
+ }
}
* @endcode
* Inside the for loop, we compute the dot product of a single row of the
* @code
void SparseMatrix::vmult (const Vector &src,
Vector &dst,
- Vector::iterator &dst_row) const
+ Vector::iterator &dst_row) const
{
const unsigned int row = (dst_row - dst.begin());
double s = 0.;
const double *const val_end_of_row = &values[rowstart[row+1]];
while (val_ptr != val_end_of_row)
- s += *val_ptr++ * src(*colnum_ptr++);
+ s += *val_ptr++ * src(*colnum_ptr++);
*dst_row = s;
}
{
parallel::transform (dst.begin(), dst.end(),
std_cxx1x::bind (&SparseMatrix::vmult_one_row,
- this,
- std_cxx1x::cref(src),
- std_cxx1x::ref(dst),
- std_cxx1x::_1),
- 200);
+ this,
+ std_cxx1x::cref(src),
+ std_cxx1x::ref(dst),
+ std_cxx1x::_1),
+ 200);
}
* @endcode
* Note how we use <a
* @code
void
SparseMatrix::vmult_on_subrange (const unsigned int begin_row,
- const unsigned int end_row,
- const Vector &src,
- Vector &dst)
+ const unsigned int end_row,
+ const Vector &src,
+ Vector &dst)
{
const double *val_ptr = &values[rowstart[begin_row]];
const unsigned int *colnum_ptr = &colnums[rowstart[begin_row]];
Vector::iterator dst_ptr = dst.begin() + begin_row;
for (unsigned int row=begin_row; row<end_row; ++row, ++dst_ptr)
- {
- double s = 0.;
- const double *const val_end_of_row = &values[rowstart[row+1]];
- while (val_ptr != val_end_of_row)
- s += *val_ptr++ * src(*colnum_ptr++);
- *dst_ptr = s;
- }
+ {
+ double s = 0.;
+ const double *const val_end_of_row = &values[rowstart[row+1]];
+ while (val_ptr != val_end_of_row)
+ s += *val_ptr++ * src(*colnum_ptr++);
+ *dst_ptr = s;
+ }
}
void SparseMatrix::vmult (const Vector &src,
Vector &dst) const
{
parallel::apply_to_subranges (0, n_rows(),
- std_cxx1x::bind (vmult_on_subrange,
- this,
- std_cxx1x::_1, std_cxx1x::_2,
- std_cxx1x::cref(src),
- std_cxx1x::ref(dst)),
- 200);
+ std_cxx1x::bind (vmult_on_subrange,
+ this,
+ std_cxx1x::_1, std_cxx1x::_2,
+ std_cxx1x::cref(src),
+ std_cxx1x::ref(dst)),
+ 200);
}
* @endcode
* Here, we call the <code>vmult_on_subrange</code> function on sub-ranges
double norm_sqr = 0;
for (unsigned int row=0; row<n_rows; ++row, ++dst_ptr)
- {
- double s = 0.;
- const double *const val_end_of_row = &values[rowstart[row+1]];
- while (val_ptr != val_end_of_row)
- s += *val_ptr++ * x(*colnum_ptr++);
- norm_sqr += x(row) * s;
- }
+ {
+ double s = 0.;
+ const double *const val_end_of_row = &values[rowstart[row+1]];
+ while (val_ptr != val_end_of_row)
+ s += *val_ptr++ * x(*colnum_ptr++);
+ norm_sqr += x(row) * s;
+ }
return std::sqrt (norm_sqr);
}
* @code
double
SparseMatrix::mat_norm_sqr_on_subrange (const unsigned int begin_row,
- const unsigned int end_row,
- const Vector &x)
+ const unsigned int end_row,
+ const Vector &x)
{
const double *val_ptr = &values[rowstart[begin_row]];
const unsigned int *colnum_ptr = &colnums[rowstart[begin_row]];
double norm_sqr = 0;
for (unsigned int row=begin_row; row<end_row; ++row, ++dst_ptr)
- {
- double s = 0.;
- const double *const val_end_of_row = &values[rowstart[row+1]];
- while (val_ptr != val_end_of_row)
- s += *val_ptr++ * x(*colnum_ptr++);
- norm_sqr += x(row) * s;
- }
+ {
+ double s = 0.;
+ const double *const val_end_of_row = &values[rowstart[row+1]];
+ while (val_ptr != val_end_of_row)
+ s += *val_ptr++ * x(*colnum_ptr++);
+ norm_sqr += x(row) * s;
+ }
return norm_sqr;
}
{
return
std::sqrt
- (parallel::accumulate_from_subranges (0, n_rows(),
- std_cxx1x::bind (mat_norm_sqr_on_subrange,
- this,
- std_cxx1x::_1, std_cxx1x::_2,
- std_cxx1x::cref(x)),
- 200));
+ (parallel::accumulate_from_subranges (0, n_rows(),
+ std_cxx1x::bind (mat_norm_sqr_on_subrange,
+ this,
+ std_cxx1x::_1, std_cxx1x::_2,
+ std_cxx1x::cref(x)),
+ 200));
}
* @endcode
*
Threads::TaskGroup<void> task_group;
for (typename DoFHandler<dim>::active_cell_iterator
cell = dof_handler.begin_active();
- cell != dof_handler.end(); ++cell)
+ cell != dof_handler.end(); ++cell)
task_group += Threads::new_task (&MyClass<dim>::assemble_on_one_cell,
*this,
- cell);
+ cell);
task_group.join_all ();
}
* @endcode
void MyClass<dim>::assemble_system ()
{
WorkStream::run (dof_handler.begin_active(),
- dof_handler.end(),
- *this,
- &MyClass<dim>::assemble_on_one_cell);
+ dof_handler.end(),
+ *this,
+ &MyClass<dim>::assemble_on_one_cell);
}
* @endcode
*
for (unsigned int i=0; i<fe.dofs_per_cell; ++i)
for (unsigned int j=0; j<fe.dofs_per_cell; ++j)
for (unsigned int q=0; q<fe_values.n_quadrature_points; ++q)
- cell_matrix(i,j) += ...;
+ cell_matrix(i,j) += ...;
...same for cell_rhs...
// now copy results into global system
for (unsigned int i=0; i<fe.dofs_per_cell; ++i)
for (unsigned int j=0; j<fe.dofs_per_cell; ++j)
system_matrix.add (dof_indices[i], dof_indices[j],
- cell_matrix(i,j));
+ cell_matrix(i,j));
...same for rhs...
}
* @endcode
for (unsigned int i=0; i<fe.dofs_per_cell; ++i)
for (unsigned int j=0; j<fe.dofs_per_cell; ++j)
system_matrix.add (dof_indices[i], dof_indices[j],
- cell_matrix(i,j));
+ cell_matrix(i,j));
...same for rhs...
mutex.release ();
}
for (unsigned int i=0; i<fe.dofs_per_cell; ++i)
for (unsigned int j=0; j<fe.dofs_per_cell; ++j)
system_matrix.add (dof_indices[i], dof_indices[j],
- cell_matrix(i,j));
+ cell_matrix(i,j));
...same for rhs...
}
* @endcode
for (unsigned int i=0; i<fe.dofs_per_cell; ++i)
for (unsigned int j=0; j<fe.dofs_per_cell; ++j)
for (unsigned int q=0; q<fe_values.n_quadrature_points; ++q)
- data.cell_matrix(i,j) += ...;
+ data.cell_matrix(i,j) += ...;
...same for cell_rhs...
cell->get_dof_indices (data.dof_indices);
for (unsigned int i=0; i<fe.dofs_per_cell; ++i)
for (unsigned int j=0; j<fe.dofs_per_cell; ++j)
system_matrix.add (data.dof_indices[i], data.dof_indices[j],
- data.cell_matrix(i,j));
+ data.cell_matrix(i,j));
...same for rhs...
}
...initialize members of per_task_data to the correct sizes...
WorkStream::run (dof_handler.begin_active(),
- dof_handler.end(),
- *this,
- &MyClass<dim>::assemble_on_one_cell,
- &MyClass<dim>::copy_local_to_global,
- per_task_data);
+ dof_handler.end(),
+ *this,
+ &MyClass<dim>::assemble_on_one_cell,
+ &MyClass<dim>::copy_local_to_global,
+ per_task_data);
}
* @endcode
*
std::vector<unsigned int> dof_indices;
PerTaskData (const FiniteElement<dim> &fe)
- :
- cell_matrix (fe.dofs_per_cell, fe.dofs_per_cell),
- cell_rhs (fe.dofs_per_cell),
- dof_indices (fe.dofs_per_cell)
+ :
+ cell_matrix (fe.dofs_per_cell, fe.dofs_per_cell),
+ cell_rhs (fe.dofs_per_cell),
+ dof_indices (fe.dofs_per_cell)
{}
}
ScratchData (const FiniteElement<dim> &fe,
const Quadrature<dim> &quadrature,
const UpdateFlags update_flags)
- :
- fe_values (fe, quadrature, update_flags)
+ :
+ fe_values (fe, quadrature, update_flags)
{}
-
+
ScratchData (const ScratchData &scratch)
- :
- fe_values (scratch.fe_values.get_fe(),
- scratch.fe_values.get_quadrature(),
- scratch.fe_values.get_update_flags())
+ :
+ fe_values (scratch.fe_values.get_fe(),
+ scratch.fe_values.get_quadrature(),
+ scratch.fe_values.get_update_flags())
{}
}
* @endcode
template <int dim>
void MyClass<dim>::assemble_on_one_cell (const typename DoFHandler<dim>::active_cell_iterator &cell,
ScratchData &scratch,
- PerTaskData &data)
+ PerTaskData &data)
{
scratch.fe_values.reinit (cell);
...
}
* @endcode
- * Just as for the <code>PerTaskData</code> structure, we will create a
+ * Just as for the <code>PerTaskData</code> structure, we will create a
* sample <code>ScratchData</code> object and pass it to the work stream
- * object, which will replicate it as many times as necessary. For this
- * to work <code>ScratchData</code> structures need to copyable. Since FEValues
- * objects are rather complex and cannot be copied implicitly, we provided
- * our own copy constructor for the <code>ScratchData</code> structure.
- *
+ * object, which will replicate it as many times as necessary. For this
+ * to work <code>ScratchData</code> structures need to copyable. Since FEValues
+ * objects are rather complex and cannot be copied implicitly, we provided
+ * our own copy constructor for the <code>ScratchData</code> structure.
+ *
* The same approach, putting things into the <code>ScratchData</code>
* data structure, should be used for everything that is expensive to
* construct. This holds, in particular, for everything that needs to
template <int dim>
void MyClass<dim>::assemble_on_one_cell (const typename DoFHandler<dim>::active_cell_iterator &cell,
ScratchData &scratch,
- PerTaskData &data)
+ PerTaskData &data)
{
std::vector<double> rhs_values (fe_values.n_quadrature_points);
rhs_function.value_list (data.fe_values.get_quadrature_points,
ScratchData (const FiniteElement<dim> &fe,
const Quadrature<dim> &quadrature,
const UpdateFlags update_flags)
- :
- rhs_values (quadrature.size()),
- fe_values (fe, quadrature, update_flags)
+ :
+ rhs_values (quadrature.size()),
+ fe_values (fe, quadrature, update_flags)
{}
-
+
ScratchData (const ScratchData &scratch)
- :
- rhs_values (scratch.rhs_values),
- fe_values (scratch.fe_values.get_fe(),
- scratch.fe_values.get_quadrature(),
- scratch.fe_values.get_update_flags())
+ :
+ rhs_values (scratch.rhs_values),
+ fe_values (scratch.fe_values.get_fe(),
+ scratch.fe_values.get_quadrature(),
+ scratch.fe_values.get_update_flags())
{}
}
template <int dim>
void MyClass<dim>::assemble_on_one_cell (const typename DoFHandler<dim>::active_cell_iterator &cell,
ScratchData &scratch,
- PerTaskData &data)
+ PerTaskData &data)
{
rhs_function.value_list (scratch.fe_values.get_quadrature_points,
scratch.rhs_values)
* identical:
* @code
WorkStream::run (dof_handler.begin_active(),
- dof_handler.end(),
- *this,
- &MyClass<dim>::assemble_on_one_cell,
- &MyClass<dim>::copy_local_to_global,
- per_task_data);
+ dof_handler.end(),
+ *this,
+ &MyClass<dim>::assemble_on_one_cell,
+ &MyClass<dim>::copy_local_to_global,
+ per_task_data);
// ...is the same as:
WorkStream::run (dof_handler.begin_active(),
- dof_handler.end(),
- std_cxx1x::bind(&MyClass<dim>::assemble_on_one_cell, *this,
- std_cxx1x::_1, std_cxx1x::_2, std_cxx1x::_3),
- std_cxx1x::bind(&MyClass<dim>::copy_local_to_global, *this, std_cxx1x::_1),
- per_task_data);
+ dof_handler.end(),
+ std_cxx1x::bind(&MyClass<dim>::assemble_on_one_cell, *this,
+ std_cxx1x::_1, std_cxx1x::_2, std_cxx1x::_3),
+ std_cxx1x::bind(&MyClass<dim>::copy_local_to_global, *this, std_cxx1x::_1),
+ per_task_data);
* @endcode
* Note how <code>std_cxx1x::bind</code> produces a function object that takes three
* arguments by binding the member function to the <code>*this</code>
const typename DoFHandler<dim>::active_cell_iterator &cell,
ScratchData &scratch,
PerTaskData &data,
- const double current_time)
+ const double current_time)
{ ... }
* @endcode
* Because WorkStream expects to be able to call the worker function with
* to it:
* @code
WorkStream::run (dof_handler.begin_active(),
- dof_handler.end(),
- std_cxx1x::bind(&MyClass<dim>::assemble_on_one_cell,
- *this,
- current_solution,
- std_cxx1x::_1,
- std_cxx1x::_2,
- std_cxx1x::_3,
- previous_time+time_step),
- std_cxx1x::bind(&MyClass<dim>::copy_local_to_global,
- *this, std_cxx1x::_1),
- per_task_data);
+ dof_handler.end(),
+ std_cxx1x::bind(&MyClass<dim>::assemble_on_one_cell,
+ *this,
+ current_solution,
+ std_cxx1x::_1,
+ std_cxx1x::_2,
+ std_cxx1x::_3,
+ previous_time+time_step),
+ std_cxx1x::bind(&MyClass<dim>::copy_local_to_global,
+ *this, std_cxx1x::_1),
+ per_task_data);
* @endcode
* Here, we bind the object, the linearization point argument, and the
* current time argument to the function before we hand it off to
Vector<float> error_per_cell (triangulation.n_active_cells());
KellyErrorEstimator<dim>::estimate (dof_handler,
- QGauss<dim-1>(3),
- typename FunctionMap<dim>::type(),
- solution,
- estimated_error_per_cell);
+ QGauss<dim-1>(3),
+ typename FunctionMap<dim>::type(),
+ solution,
+ estimated_error_per_cell);
thread.join ();
* @endcode
*
// $Id$
// Version: $Name$
//
-// Copyright (C) 2008, 2011 by the deal.II authors
+// Copyright (C) 2008, 2011, 2012 by the deal.II authors
//
// This file is subject to QPL and may not be distributed
// without copyright and license information. Please refer
{
for (unsigned int j=0; j<dofs_per_cell; ++j)
local_matrix(i,j) += (fe_values[velocities].value (i, q) *
- fe_values[velocities].value (j, q)
+ fe_values[velocities].value (j, q)
-
- fe_values[velocities].divergence (i, q) *
- fe_values[pressure].value (j, q)
+ fe_values[velocities].divergence (i, q) *
+ fe_values[pressure].value (j, q)
-
- fe_values[pressure].value (i, q) *
- fe_values[velocities].divergence (j, q)) *
+ fe_values[pressure].value (i, q) *
+ fe_values[velocities].divergence (j, q)) *
fe_values.JxW(q);
local_rhs(i) += - fe_values[pressure].value (i, q)
* V_j=\left(\begin{array}{c}\mathbf v_j \\ q_j\end{array}\right)$:
@f{eqnarray*}
(\mathbf v_i, \mathbf v_j)
- -
- (\mathrm{div}\ \mathbf v_i, q_j)
- -
- (q_i, \mathrm{div}\ \mathbf v_j)
+ -
+ (\mathrm{div}\ \mathbf v_i, q_j)
+ -
+ (q_i, \mathrm{div}\ \mathbf v_j)
@f}
* whereas the implementation looked like this:
* @code
local_matrix(i,j) += (fe_values[velocities].value (i, q) *
- fe_values[velocities].value (j, q)
+ fe_values[velocities].value (j, q)
-
- fe_values[velocities].divergence (i, q) *
- fe_values[pressure].value (j, q)
+ fe_values[velocities].divergence (i, q) *
+ fe_values[pressure].value (j, q)
-
- fe_values[pressure].value (i, q) *
- fe_values[velocities].divergence (j, q)
- ) *
+ fe_values[pressure].value (i, q) *
+ fe_values[velocities].divergence (j, q)
+ ) *
fe_values.JxW(q);
* @endcode
* The similarities are pretty obvious.
...
for (unsigned int q_point=0; q_point<n_q_points; ++q_point)
- for (unsigned int i=0; i<dofs_per_cell; ++i)
- {
- const Tensor<2,dim> phi_i_grad
- = fe_values[displacements].gradient (i,q_point);
- const double phi_i_div
- = fe_values[displacements].divergence (i,q_point);
+ for (unsigned int i=0; i<dofs_per_cell; ++i)
+ {
+ const Tensor<2,dim> phi_i_grad
+ = fe_values[displacements].gradient (i,q_point);
+ const double phi_i_div
+ = fe_values[displacements].divergence (i,q_point);
- for (unsigned int j=0; j<dofs_per_cell; ++j)
- {
- const Tensor<2,dim> phi_j_grad
- = fe_values[displacements].gradient (j,q_point);
- const double phi_j_div
- = fe_values[displacements].divergence (j,q_point);
+ for (unsigned int j=0; j<dofs_per_cell; ++j)
+ {
+ const Tensor<2,dim> phi_j_grad
+ = fe_values[displacements].gradient (j,q_point);
+ const double phi_j_div
+ = fe_values[displacements].divergence (j,q_point);
- cell_matrix(i,j)
- += (lambda_values[q_point] *
- phi_i_div * phi_j_div
- +
- mu_values[q_point] *
- scalar_product(phi_i_grad, phi_j_grad)
- +
- mu_values[q_point] *
- scalar_product(phi_i_grad, transpose(phi_j_grad))
- ) *
- fe_values.JxW(q_point);
- }
- }
+ cell_matrix(i,j)
+ += (lambda_values[q_point] *
+ phi_i_div * phi_j_div
+ +
+ mu_values[q_point] *
+ scalar_product(phi_i_grad, phi_j_grad)
+ +
+ mu_values[q_point] *
+ scalar_product(phi_i_grad, transpose(phi_j_grad))
+ ) *
+ fe_values.JxW(q_point);
+ }
+ }
* @endcode
*
* The scalar product between two tensors used in this bilinear form is
template <int dim>
double
scalar_product (const Tensor<2,dim> &u,
- const Tensor<2,dim> &v)
+ const Tensor<2,dim> &v)
{
double tmp = 0;
for (unsigned int i=0; i<dim; ++i)
*
* @code
for (unsigned int q_point=0; q_point<n_q_points; ++q_point)
- for (unsigned int i=0; i<dofs_per_cell; ++i)
- {
- const SymmetricTensor<2,dim> phi_i_symmgrad
- = fe_values[displacements].symmetric_gradient (i,q_point);
- const double phi_i_div
- = fe_values[displacements].divergence (i,q_point);
+ for (unsigned int i=0; i<dofs_per_cell; ++i)
+ {
+ const SymmetricTensor<2,dim> phi_i_symmgrad
+ = fe_values[displacements].symmetric_gradient (i,q_point);
+ const double phi_i_div
+ = fe_values[displacements].divergence (i,q_point);
- for (unsigned int j=0; j<dofs_per_cell; ++j)
- {
- const SymmetricTensor<2,dim> phi_j_symmgrad
- = fe_values[displacements].symmetric_gradient (j,q_point);
- const double phi_j_div
- = fe_values[displacements].divergence (j,q_point);
+ for (unsigned int j=0; j<dofs_per_cell; ++j)
+ {
+ const SymmetricTensor<2,dim> phi_j_symmgrad
+ = fe_values[displacements].symmetric_gradient (j,q_point);
+ const double phi_j_div
+ = fe_values[displacements].divergence (j,q_point);
- cell_matrix(i,j)
- += (phi_i_div * phi_j_div *
- lambda_values[q_point]
- +
- 2 *
- (phi_i_symmgrad * phi_j_symmgrad) *
- mu_values[q_point]) *
- fe_values.JxW(q_point));
- }
- }
+ cell_matrix(i,j)
+ += (phi_i_div * phi_j_div *
+ lambda_values[q_point]
+ +
+ 2 *
+ (phi_i_symmgrad * phi_j_symmgrad) *
+ mu_values[q_point]) *
+ fe_values.JxW(q_point));
+ }
+ }
* @endcode
*
* So if, again, this is not the code we use in step-8, what do
cg.solve (system_matrix.block(0,0),
solution.block(0),
- tmp,
+ tmp,
PreconditionIdentity());
* @endcode
*
DataOut<dim> data_out;
data_out.attach_dof_handler (dof_handler);
data_out.add_data_vector (solution, solution_names,
- DataOut<dim>::type_dof_data,
- data_component_interpretation);
+ DataOut<dim>::type_dof_data,
+ data_component_interpretation);
data_out.build_patches ();
* @endcode
* In other words, we here create an array of <code>dim+1</code> elements in