template <int dim>
const SymmetricTensor<4, dim>
- StandardTensors<dim>::dev_P = (II - (1.0 / dim) * IxI);
+ StandardTensors<dim>::dev_P = deviator_tensor<dim>();
}
// @sect3{Time class}
}
// Derivative of the volumetric free
- // energy wrt $\widetilde{J}$ return
+ // energy with respect to $\widetilde{J}$ return
// $\frac{\partial
// \Psi_{\text{vol}}(\widetilde{J})}{\partial
// \widetilde{J}}$
// and the first and second derivatives
// of the volumetric free energy.
//
- // Finally, we store the inverse of
+ // We also store the inverse of
// the deformation gradient since
// we frequently use it:
+ F_inv = invert(F);
tau = material->get_tau();
-
Jc = material->get_Jc();
dPsi_vol_dJ = material->get_dPsi_vol_dJ();
d2Psi_vol_dJ2 = material->get_d2Psi_vol_dJ2();
- F_inv = invert(F);
+
}
// We offer an interface to retrieve
return material->get_det_F();
}
- Tensor<2, dim> get_F_inv() const
+ const Tensor<2, dim>& get_F_inv() const
{
return F_inv;
}
return material->get_p_tilde();
}
- SymmetricTensor<2, dim> get_tau() const
+ const SymmetricTensor<2, dim>& get_tau() const
{
return tau;
}
}
// and finally the tangent
- SymmetricTensor<4, dim> get_Jc() const
+ const SymmetricTensor<4, dim>& get_Jc() const
{
return Jc;
}
// this operation (we could, in principle simply create a new task using
// Threads::new_task for each cell) but there is not much harm done to doing
// it this way anyway.
-// Furthermore, should their be different material models associated with a
+// Furthermore, should there be different material models associated with a
// quadrature point, requiring varying levels of computational expense, then
// the method used here could be advantageous.
template <int dim>
block_component[p_component] = p_dof; // Pressure
block_component[J_component] = J_dof; // Dilatation
- // DOF handler is then initialised and we
+ // The DOF handler is then initialised and we
// renumber the grid in an efficient
// manner. We also record the number of
// DOF's per block.
csp.block(J_dof, J_dof).reinit(n_dofs_J, n_dofs_J);
csp.collect_sizes();
- // In order to perform the static condensation efficiently,
- // we choose to exploit the symmetry of the the system matrix.
// The global system matrix initially has the following structure
// @f{align*}
// \underbrace{\begin{bmatrix}
// preconditioner as it appears to
// provide the fastest solver
// convergence characteristics for this
- // problem. However, for multicore
+ // problem on a single-thread machine.
+ // However, for multicore
// computing, the Jacobi preconditioner
// which is multithreaded may converge
// quicker for larger linear systems.
// $
// and
// $
- // \overline{\mathbf{\mathsf{K}}} =
- // \mathbf{\mathsf{K}}_{\widetilde{J}\widetilde{p}}^{-1} \mathbf{\mathsf{K}}_{\widetilde{J}\widetilde{J}}
- // \mathbf{\mathsf{K}}_{\widetilde{p}\widetilde{J}}^{-1}
+ // \overline{\mathbf{\mathsf{k}}} =
+ // \mathbf{\mathsf{k}}_{\widetilde{J}\widetilde{p}}^{-1} \mathbf{\mathsf{k}}_{\widetilde{J}\widetilde{J}}
+ // \mathbf{\mathsf{k}}_{\widetilde{p}\widetilde{J}}^{-1}
// $.
//
// At this point, we need to take note of