<ol>
- <li>
- Fixed: VectorTools::compute_no_normal_flux_constraints had a bug that
- only appeared in rare cases at vertices of the domain if one adjacent
- cell had two boundary indicators selected for no normal flux and another
- had only one. This is now fixed.
- <br>
- (Wolfgang Bangerth, 2013/11/17)
- </li>
-
<li> New: introduced "make test" that runs a minimal set of tests. We
encourage every user to run this, especially if they run in to problems.
The tests are automatically picked depending on the configuration and
<h3>Specific improvements</h3>
<ol>
+ <li>
+ Fixed: MappingQ1::transform_real_to_unit_cell() could fail in some
+ cases with very elongated and twisted cells. This should now be fixed
+ with an algorithm that uses a better method of computing the Newton
+ convergence.
+ <br>
+ (Wolfgang Bangerth, 2013/11/17)
+ </li>
+
+ <li>
+ Fixed: VectorTools::compute_no_normal_flux_constraints had a bug that
+ only appeared in rare cases at vertices of the domain if one adjacent
+ cell had two boundary indicators selected for no normal flux and another
+ had only one. This is now fixed.
+ <br>
+ (Wolfgang Bangerth, 2013/11/17)
+ </li>
+
<li> Fixed: dealii::FETools::interpolation_difference was
not working for TrilinosWrappers::MPI::Vectors with ghost
entries. The TrilinosWrappers::VectorBase class has now a
// Newton iteration to solve
- // f(x)=p(x)-p=0
- // x_{n+1}=x_n-[f'(x)]^{-1}f(x)
+ // f(x)=p(x)-p=0
+ // where we are looking for 'x' and p(x) is the forward transformation
+ // from unit to real cell. We solve this using a Newton iteration
+ // x_{n+1}=x_n-[f'(x)]^{-1}f(x)
+ // The start value is set to be the linear approximation to the cell
- // The start value was set to be the
- // linear approximation to the cell
-
- // The shape values and derivatives
- // of the mapping at this point are
+ // The shape values and derivatives of the mapping at this point are
// previously computed.
- // For the <2,3> case there is a
- // template specialization.
-
- // f(x)
-
Point<dim> p_unit = initial_p_unit;
compute_shapes(std::vector<Point<dim> > (1, p_unit), mdata);
- Point<spacedim> p_real(transform_unit_to_real_cell_internal(mdata));
+ Point<spacedim> p_real = transform_unit_to_real_cell_internal(mdata);
Point<spacedim> f = p_real-p;
- const double eps = 1.e-12*cell->diameter();
+ // early out if we already have our point
+ if (f.square() < 1e-24 * cell->diameter() * cell->diameter())
+ return p_unit;
+
+ // we need to compare the position of the computed p(x) against the given
+ // point 'p'. We will terminate the iteration and return 'x' if they are
+ // less than eps apart. The question is how to choose eps -- or, put maybe
+ // more generally: in which norm we want these 'p' and 'p(x)' to be eps
+ // apart.
+ //
+ // the question is difficult since we may have to deal with very elongated
+ // cells where we may achieve 1e-12*h for the distance of these two points
+ // in the 'long' direction, but achieving this tolerance in the 'short'
+ // direction of the cell may not be possible
+ //
+ // what we do instead is then to terminate iterations if
+ // \| p(x) - p \|_A < eps
+ // where the A-norm is somehow induced by the transformation of the cell.
+ // in particular, we want to measure distances relative to the sizes of
+ // the cell in its principal directions.
+ //
+ // to define what exactly A should be, note that to first order we have
+ // the following (assuming that x* is the solution of the problem, i.e.,
+ // p(x*)=p):
+ // p(x) - p = p(x) - p(x*)
+ // = -grad p(x) * (x*-x) + higher order terms
+ // This suggest to measure with a norm that corresponds to
+ // A = {[grad p(x]^T [grad p(x)]}^{-1}
+ // because then
+ // \| p(x) - p \|_A \approx \| x - x* \|
+ // Consequently, we will try to enforce that
+ // \| p(x) - p \|_A = \| f \| <= eps
+ //
+ // Note that using this norm is a bit dangerous since the norm changes
+ // in every iteration (A isn't fixed by depends on xk). However, if the
+ // cell is not too deformed (it may be stretched, but not twisted) then
+ // the mapping is almost linear and A is indeed constant or nearly so.
+ const double eps = 1.e-11;
const unsigned int newton_iteration_limit = 20;
unsigned int newton_iteration=0;
- while (f.square()>eps*eps)
+ double last_f_weighted_norm = -1./0.;
+ do
{
+#ifdef DEBUG_TRANSFORM_REAL_TO_UNIT_CELL
+ std::cout << "Newton iteration " << newton_iteration << std::endl;
+#endif
+
// f'(x)
Tensor<2,spacedim> df;
for (unsigned int k=0; k<mdata.n_shape_functions; ++k)
// Solve [f'(x)]d=f(x)
Tensor<1,spacedim> delta;
- contract (delta, invert(df), static_cast<const Tensor<1,spacedim>&>(f));
+ Tensor<2,spacedim> df_inverse = invert(df);
+ contract (delta, df_inverse, static_cast<const Tensor<1,spacedim>&>(f));
+#ifdef DEBUG_TRANSFORM_REAL_TO_UNIT_CELL
+ std::cout << " delta=" << delta << std::endl;
+#endif
+
// do a line search
double step_length = 1;
do
{
- // update of p_unit. The
- // spacedimth component of
- // transformed point is simply
- // ignored in codimension one
- // case. When this component is
- // not zero, then we are
- // projecting the point to the
- // surface or curve identified
- // by the cell.
+ // update of p_unit. The spacedim-th component of transformed point
+ // is simply ignored in codimension one case. When this component is
+ // not zero, then we are projecting the point to the surface or
+ // curve identified by the cell.
Point<dim> p_unit_trial = p_unit;
for (unsigned int i=0; i<dim; ++i)
p_unit_trial[i] -= step_length * delta[i];
Point<spacedim> p_real_trial = transform_unit_to_real_cell_internal(mdata);
const Point<spacedim> f_trial = p_real_trial-p;
+#ifdef DEBUG_TRANSFORM_REAL_TO_UNIT_CELL
+ std::cout << " step_length=" << step_length << std::endl
+ << " ||f || =" << f.norm() << std::endl
+ << " ||f*|| =" << f_trial.norm() << std::endl
+ << " ||f*||_A =" << (df_inverse * f_trial).norm() << std::endl;
+#endif
+
// see if we are making progress with the current step length
// and if not, reduce it by a factor of two and try again
+ //
+ // strictly speaking, we should probably use the same norm as we use
+ // for the outer algorithm. in practice, line search is just a
+ // crutch to find a "reasonable" step length, and so using the l2
+ // norm is probably just fine
if (f_trial.norm() < f.norm())
{
p_real = p_real_trial;
else if (step_length > 0.05)
step_length /= 2;
else
- goto failure;
+ AssertThrow (false,
+ (typename Mapping<dim,spacedim>::ExcTransformationFailed()));
}
while (true);
++newton_iteration;
if (newton_iteration > newton_iteration_limit)
- goto failure;
+ AssertThrow (false,
+ (typename Mapping<dim,spacedim>::ExcTransformationFailed()));
+ last_f_weighted_norm = (df_inverse * f).norm();
}
-
+ while (last_f_weighted_norm > eps);
+
return p_unit;
-
- // if we get to the following label, then we have either run out
- // of Newton iterations, or the line search has not converged.
- // in either case, we need to give up, so throw an exception that
- // can then be caught
-failure:
- AssertThrow (false, (typename Mapping<dim,spacedim>::ExcTransformationFailed()));
-
- // ...the compiler wants us to return something, though we can
- // of course never get here...
- return Point<dim>();
}