/**
- * All data needed by the several functions
- * of the error estimator is gathered in
- * this struct. It is passed as a reference
- * to the seperate functions in this class.
+ * All data needed by the several
+ * functions of the error
+ * estimator is gathered in this
+ * struct. It is passed as a
+ * reference to the seperate
+ * functions in this class.
*
* The reason for invention of
* this object is two-fold:
* first, class member data is
* not possible because no real
* object is created (all
- * functions are @p{static}), which
- * is a historical
+ * functions are @p{static}),
+ * which is a historical
* reason. Second, if we don't
* collect the data the various
* functions need somewhere at a
* parallel, since they are quite
* often blocked by these
* synchronisation points.
+ *
+ * Thus, every thread gets an
+ * instance of this class to work
+ * with and needs not allocate
+ * memory itself, or synchronise
+ * with other threads.
*/
struct Data
{
- const DoFHandler<dim> &dof;
+ const DoFHandler<dim> &dof_handler;
const Quadrature<dim-1> &quadrature;
const FunctionMap &neumann_bc;
const Vector<double> &solution;
- vector<bool> component_mask;
+ const vector<bool> component_mask;
const Function<dim> *coefficients;
- unsigned int n_threads;
-
- DoFHandler<dim>::active_cell_iterator endc;
- unsigned int n_components;
- unsigned int n_q_points;
- FaceIntegrals face_integrals;
+ const unsigned int n_threads;
/**
- * A vector to store the jump of the
- * normal vectors in the quadrature
- * points.
- * There is one vector for every
- * thread used in the estimator.
- * The allocation of memory has to
- * be global to enable fast use
- * of multithreading
+ * Reference to the global
+ * object that stores the
+ * face integrals.
+ */
+ FaceIntegrals &face_integrals;
+
+ /**
+ * A vector to store the jump
+ * of the normal vectors in
+ * the quadrature points
+ * (i.e. a temporary
+ * value). This vector is not
+ * allocated inside the
+ * functions that use it, but
+ * rather globally, since
+ * memory allocation is slow,
+ * in particular in presence
+ * of multiple threads where
+ * synchronisation makes
+ * things even slower.
*/
- vector< vector<vector<double> > > phi;
+ vector<vector<double> > phi;
/**
* A vector for the gradients of
* index the number of the
* quadrature point.
*/
- vector< vector<vector<Tensor<1,dim> > > > psi;
+ vector<vector<Tensor<1,dim> > > psi;
/**
* The same vector for a neighbor cell
*/
- vector< vector<vector<Tensor<1,dim> > > > neighbor_psi;
+ vector<vector<Tensor<1,dim> > > neighbor_psi;
/**
* The normal vectors of the finite
* element function on one face
*/
- vector< vector<Point<dim> > > normal_vectors;
+ vector<Point<dim> > normal_vectors;
/**
* Two arrays needed for the
* the jumps, if they are
* given.
*/
- vector< vector<double> > coefficient_values1;
- vector< vector<Vector<double> > > coefficient_values;
+ vector<double> coefficient_values1;
+ vector<Vector<double> > coefficient_values;
/**
* Array for the products of
* weights of quadraturs
* points.
*/
- vector< vector<double> > JxW_values;
+ vector<double> JxW_values;
/**
* A constructor of the
const Quadrature<dim-1> &quadrature,
const FunctionMap &neumann_bc,
const Vector<double> &solution,
- vector<bool> component_mask_,
+ const vector<bool> &component_mask,
const Function<dim> *coefficients,
- unsigned int n_threads);
+ const unsigned int n_threads,
+ FaceIntegrals &face_integrals);
};
const unsigned int this_thread);
/**
- * Actually do the computation on a face
- * which has no hanging nodes (it is
- * regular), i.e.
- * either on the other side there is
- * nirvana (face is at boundary), or
- * the other side's refinement level
- * is the same as that of this side,
- * then handle the integration of
+ * Actually do the computation on
+ * a face which has no hanging
+ * nodes (it is regular), i.e.
+ * either on the other side there
+ * is nirvana (face is at
+ * boundary), or the other side's
+ * refinement level is the same
+ * as that of this side, then
+ * handle the integration of
* these both cases together.
*
- * The meaning of the parameters becomes
- * clear when looking at the source
- * code. This function is only
- * externalized from @p{estimate_error}
- * to avoid ending up with a function
- * of 500 lines of code.
+ * The meaning of the parameters
+ * becomes clear when looking at
+ * the source code. This function
+ * is only externalized from
+ * @p{estimate_error} to avoid
+ * ending up with a function of
+ * 500 lines of code.
*/
static void integrate_over_regular_face (Data &data,
- const unsigned int this_thread,
const active_cell_iterator &cell,
const unsigned int face_no,
FEFaceValues<dim> &fe_face_values_cell,
/**
- * The same applies as for the function
- * above, except that integration is
- * over face @p{face_no} of @p{cell}, where
- * the respective neighbor is refined,
- * so that the integration is a bit more
+ * The same applies as for the
+ * function above, except that
+ * integration is over face
+ * @p{face_no} of @p{cell}, where
+ * the respective neighbor is
+ * refined, so that the
+ * integration is a bit more
* complex.
*/
static void integrate_over_irregular_face (Data &data,
- const unsigned int this_thread,
const active_cell_iterator &cell,
const unsigned int face_no,
FEFaceValues<dim> &fe_face_values,
#include <fe/fe.h>
#include <fe/fe_values.h>
#include <fe/fe_update_flags.h>
+#include <base/thread_management.h>
#include <base/quadrature.h>
#include <base/quadrature_lib.h>
#include <numerics/error_estimator.h>
#include <vector>
#include <base/timer.h>
- // if multithreaded include
- // ThreadManager
-#ifdef DEAL_II_USE_MT
-# include <base/thread_management.h>
-#endif
-
static
const Quadrature<0> &,
const FunctionMap &,
const Vector<double> &,
- vector<bool> ,
+ const vector<bool> &,
const Function<1> *,
- unsigned int ):
- dof(* static_cast <const DoFHandler<1> *> (0)),
+ const unsigned int ,
+ FaceIntegrals &):
+ dof_handler(* static_cast <const DoFHandler<1> *> (0)),
quadrature(* static_cast <const Quadrature<0> *> (0)),
neumann_bc(* static_cast <const FunctionMap *> (0)),
- solution(* static_cast <const Vector<double> *> (0))
+ solution(* static_cast <const Vector<double> *> (0)),
+ face_integrals (* static_cast<FaceIntegrals*> (0))
{
Assert (false, ExcInternalError());
}
#else
template <int dim>
-KellyErrorEstimator<dim>::Data::Data(const DoFHandler<dim> &dof,
+KellyErrorEstimator<dim>::Data::Data(const DoFHandler<dim> &dof_handler,
const Quadrature<dim-1> &quadrature,
const FunctionMap &neumann_bc,
const Vector<double> &solution,
- vector<bool> component_mask_,
+ const vector<bool> &component_mask,
const Function<dim> *coefficients,
- unsigned int n_threads):
- dof(dof),
- quadrature(quadrature),
- neumann_bc(neumann_bc),
- solution(solution),
- coefficients(coefficients),
- n_threads(n_threads)
+ const unsigned int n_threads,
+ FaceIntegrals &face_integrals):
+ dof_handler (dof_handler),
+ quadrature (quadrature),
+ neumann_bc (neumann_bc),
+ solution (solution),
+ component_mask (component_mask),
+ coefficients (coefficients),
+ n_threads (n_threads),
+ face_integrals (face_integrals)
{
- n_components = dof.get_fe().n_components();
-
- // if no mask given: treat all components
- component_mask = ((component_mask_.size() == 0) ?
- vector<bool>(n_components, true) :
- component_mask_);
+ const unsigned int n_components = dof_handler.get_fe().n_components();
Assert (component_mask.size() == n_components, ExcInvalidComponentMask());
Assert (count(component_mask.begin(), component_mask.end(), true) > 0,
for (typename FunctionMap::const_iterator i=neumann_bc.begin(); i!=neumann_bc.end(); ++i)
Assert (i->second->n_components == n_components, ExcInvalidBoundaryFunction());
- // the last cell, often needed
- endc=dof.end();
-
- n_q_points=quadrature.n_quadrature_points;
// Init the size of a lot of vectors
// needed in the calculations once
// per thread.
- phi.resize(n_threads);
- psi.resize(n_threads);
- neighbor_psi.resize(n_threads);
- normal_vectors.resize(n_threads);
- coefficient_values1.resize(n_threads);
- coefficient_values.resize(n_threads);
- JxW_values.resize(n_threads);
+ const unsigned int n_q_points = quadrature.n_quadrature_points;
+ phi.resize(n_q_points);
+ psi.resize(n_q_points);
+ neighbor_psi.resize(n_q_points);
+ normal_vectors.resize(n_q_points);
+ coefficient_values1.resize(n_q_points);
+ coefficient_values.resize(n_q_points);
+ JxW_values.resize(n_q_points);
- for (unsigned int t=0;t<n_threads;++t)
+ for (unsigned int qp=0;qp<n_q_points;++qp)
{
- phi[t].resize(n_q_points);
- psi[t].resize(n_q_points);
- neighbor_psi[t].resize(n_q_points);
- normal_vectors[t].resize(n_q_points);
- coefficient_values1[t].resize(n_q_points);
- coefficient_values[t].resize(n_q_points);
- JxW_values[t].resize(n_q_points);
-
- for (unsigned int qp=0;qp<n_q_points;++qp)
- {
- phi[t][qp].resize(n_components);
- psi[t][qp].resize(n_components);
- neighbor_psi[t][qp].resize(n_components);
- coefficient_values[t][qp].reinit(n_components);
- }
+ phi[qp].resize(n_components);
+ psi[qp].resize(n_components);
+ neighbor_psi[qp].resize(n_components);
+ coefficient_values[qp].reinit(n_components);
}
}
template <>
void KellyErrorEstimator<1>::estimate_some (Data &, const unsigned int)
{
+ // in 1d, the @p{estimate} function
+ // does all the work
Assert (false, ExcInternalError() );
}
+
template <>
-void KellyErrorEstimator<1>::estimate (const DoFHandler<1> &dof,
+void KellyErrorEstimator<1>::estimate (const DoFHandler<1> &dof_handler,
const Quadrature<0> &,
const FunctionMap &neumann_bc,
const Vector<double> &solution,
const Function<1> *coefficient,
const unsigned int)
{
- const unsigned int n_components = dof.get_fe().n_components();
+ const unsigned int n_components = dof_handler.get_fe().n_components();
// if no mask given: treat all components
vector<bool> component_mask ((component_mask_.size() == 0) ?
// reserve one slot for each cell and set
// it to zero
- error.reinit (dof.get_tria().n_active_cells());
+ error.reinit (dof_handler.get_tria().n_active_cells());
// fields to get the gradients on
// the present and the neighbor cell.
// two contributions from the two
// vertices of each cell.
QTrapez<1> quadrature;
- FEValues<1> fe_values (dof.get_fe(), quadrature, update_gradients);
- DoFHandler<1>::active_cell_iterator cell = dof.begin_active();
- for (unsigned int cell_index=0; cell != dof.end(); ++cell, ++cell_index)
+ FEValues<1> fe_values (dof_handler.get_fe(), quadrature, update_gradients);
+ active_cell_iterator cell = dof_handler.begin_active();
+ for (unsigned int cell_index=0; cell != dof_handler.end(); ++cell, ++cell_index)
{
error(cell_index) = 0;
// loop over the two points bounding
// need not compute all values on the
// neighbor cells, so using two objects
// gives us a performance gain).
- FEFaceValues<dim> fe_face_values_cell (data.dof.get_fe(),
+ FEFaceValues<dim> fe_face_values_cell (data.dof_handler.get_fe(),
data.quadrature,
UpdateFlags(update_gradients |
update_JxW_values |
(data.coefficients != 0)) ?
update_q_points : 0) |
update_normal_vectors));
- FEFaceValues<dim> fe_face_values_neighbor (data.dof.get_fe(),
+ FEFaceValues<dim> fe_face_values_neighbor (data.dof_handler.get_fe(),
data.quadrature,
update_gradients);
- FESubfaceValues<dim> fe_subface_values (data.dof.get_fe(),
+ FESubfaceValues<dim> fe_subface_values (data.dof_handler.get_fe(),
data.quadrature,
update_gradients);
- DoFHandler<dim>::active_cell_iterator cell=data.dof.begin_active();
+ active_cell_iterator cell=data.dof_handler.begin_active();
// calculate the start cell for
// this thread. note that this way
// pseudorandom distribution of the
// `hard' cells to the different
// threads.
- for (unsigned int t=0; (t<this_thread) && (cell!=data.endc); ++t, ++cell);
+ for (unsigned int t=0; (t<this_thread) && (cell!=data.dof_handler.end());
+ ++t, ++cell);
// loop over all cells for this thread
// the iteration of cell is done at the end
- for (; cell!=data.endc; )
+ for (; cell!=data.dof_handler.end(); )
{
// loop over all faces of this cell
// of faces with contribution zero.
const unsigned char boundary_indicator
= cell->face(face_no)->boundary_indicator();
- if ((boundary_indicator != 255) &&
+ if (cell->face(face_no)->at_boundary()
+ &&
data.neumann_bc.find(boundary_indicator)==data.neumann_bc.end())
{
data.face_integrals[cell->face(face_no)] = 0;
// then handle the integration of
// these both cases together
integrate_over_regular_face (data,
- this_thread,
cell, face_no,
fe_face_values_cell,
fe_face_values_neighbor);
// not fit into the framework of
// the above function
integrate_over_irregular_face (data,
- this_thread,cell, face_no,
+ cell, face_no,
fe_face_values_cell,
fe_subface_values);
};
- // next cell in this thread
- for (unsigned int t=0;((t<data.n_threads)&&(cell!=data.endc));++t,++cell)
- {};
+ // go to next cell for this
+ // thread. note that the cells
+ // for each of the threads are
+ // interleaved.
+ for (unsigned int t=0;
+ ((t<data.n_threads) && (cell!=data.dof_handler.end()));
+ ++t, ++cell);
};
};
template <int dim>
-void KellyErrorEstimator<dim>::estimate (const DoFHandler<dim> &dof,
+void KellyErrorEstimator<dim>::estimate (const DoFHandler<dim> &dof_handler,
const Quadrature<dim-1> &quadrature,
const FunctionMap &neumann_bc,
const Vector<double> &solution,
#ifndef DEAL_II_USE_MT
n_threads = 1;
#endif
+ Assert (n_threads > 0, ExcInternalError());
- // all the data needed in the error-
- // estimator is gathered in this stuct.
- KellyErrorEstimator<dim>::Data data (dof,
- quadrature,
- neumann_bc,
- solution,
- component_mask,
- coefficients,
- n_threads);
- // map of integrals indexed by
+ // Map of integrals indexed by
// the corresponding face. In this map
// we store the integrated jump of the
- // gradient for each face. By doing so,
- // we can check whether we have already
- // integrated along this face by testing
- // whether the respective face is already
- // a key in this map.
+ // gradient for each face.
// At the end of the function, we again
// loop over the cells and collect the
- // conrtibutions of the different faces
+ // contributions of the different faces
// of the cell.
- // the values for all faces are set to
- // -10e20. It would cost a lot of time
- // to synchronise the initialisation
- // of the map in multithreaded mode.
- // negative value indicates that the
- // face is not calculated.
-
- for (active_cell_iterator cell=data.dof.begin_active(); cell!=data.endc; ++cell)
+ //
+ // The initial values for all faces
+ // are set to -10e20. It would cost
+ // a lot of time to synchronise the
+ // initialisation (i.e. the
+ // creation of new keys) of the map
+ // in multithreaded mode. Negative
+ // value indicates that the face
+ // has not yet been processed.
+ FaceIntegrals face_integrals;
+ for (active_cell_iterator cell=dof_handler.begin_active(); cell!=dof_handler.end(); ++cell)
for (unsigned int face_no=0; face_no<GeometryInfo<dim>::faces_per_cell; ++face_no)
- data.face_integrals[cell->face(face_no)]=-10e20;
-
+ face_integrals[cell->face(face_no)]=-10e20;
- // split all cells into threads
- // if multithreading is used
-#ifdef DEAL_II_USE_MT
+ // all the data needed in the error
+ // estimator by each of the threads
+ // is gathered in the following
+ // stuctures
+ //
+ // note that if no component mask
+ // was given, then treat all
+ // components
+ vector<Data*> data_structures (n_threads);
+ for (unsigned int i=0; i<n_threads; ++i)
+ data_structures[i] = new Data (dof_handler,
+ quadrature,
+ neumann_bc,
+ solution,
+ ((component_mask.size() == 0) ?
+ vector<bool>(dof_handler.get_fe().n_components(),
+ true) :
+ component_mask),
+ coefficients,
+ n_threads,
+ face_integrals);
+
+ // split all cells into threads if
+ // multithreading is used and run
+ // the whole thing
Threads::ThreadManager thread_manager;
- for (unsigned int i=0;i<data.n_threads; ++i)
+ for (unsigned int i=0; i<n_threads; ++i)
Threads::spawn (thread_manager,
Threads::encapsulate (&KellyErrorEstimator<dim>::estimate_some)
- .collect_args (data, i));
+ .collect_args (*data_structures[i], i));
thread_manager.wait();
+
+ // delete the structures for the
+ // different threads again. the
+ // results are in the
+ // face_integrals object
+ for (unsigned int i=0; i<n_threads; ++i)
+ {
+ delete data_structures[i];
+ data_structures[i] = 0;
+ };
- // ... ifdef DEAL_II_USE_MT
-#else
- // just one thread, calculate
- // error on all cells
- KellyErrorEstimator::estimate_some(data,0);
-#endif
-
-
// finally add up the contributions of the
// faces for each cell
// reserve one slot for each cell and set
// it to zero
- error.reinit (data.dof.get_tria().n_active_cells());
- for (unsigned int i=0;i<data.dof.get_tria().n_active_cells();++i)
+ error.reinit (dof_handler.get_tria().n_active_cells());
+ for (unsigned int i=0;i<dof_handler.get_tria().n_active_cells();++i)
error(i)=0;
unsigned int present_cell=0;
- for (active_cell_iterator cell=data.dof.begin_active();
- cell!=data.endc;
+ for (active_cell_iterator cell=dof_handler.begin_active();
+ cell!=dof_handler.end();
++cell, ++present_cell)
{
// loop over all faces of this cell
for (unsigned int face_no=0; face_no<GeometryInfo<dim>::faces_per_cell;
++face_no)
{
- Assert(data.face_integrals.find(cell->face(face_no)) != data.face_integrals.end(),
+ Assert(face_integrals.find(cell->face(face_no)) != face_integrals.end(),
ExcInternalError());
- error(present_cell) += (data.face_integrals[cell->face(face_no)] *
+ error(present_cell) += (face_integrals[cell->face(face_no)] *
cell->diameter() / 24);
};
error(present_cell) = sqrt(error(present_cell));
template <>
void KellyErrorEstimator<1>::integrate_over_regular_face (Data &,
- const unsigned int,
const active_cell_iterator &,
const unsigned int ,
FEFaceValues<1> &,
template <>
void KellyErrorEstimator<1>::
integrate_over_irregular_face (Data &,
- const unsigned int,
const active_cell_iterator &,
const unsigned int ,
FEFaceValues<1> &,
template <int dim>
void KellyErrorEstimator<dim>::
integrate_over_regular_face (Data &data,
- const unsigned int this_thread,
const active_cell_iterator &cell,
const unsigned int face_no,
FEFaceValues<dim> &fe_face_values_cell,
FEFaceValues<dim> &fe_face_values_neighbor)
{
const DoFHandler<dim>::face_iterator face = cell->face(face_no);
+ const unsigned int n_q_points = data.quadrature.n_quadrature_points,
+ n_components = data.dof_handler.get_fe().n_components();
+
// initialize data of the restriction
// of this cell to the present face
// get gradients of the finite element
// function on this cell
- fe_face_values_cell.get_function_grads (data.solution, data.psi[this_thread]);
+ fe_face_values_cell.get_function_grads (data.solution, data.psi);
// now compute over the other side of
// the face
Assert (cell->neighbor(face_no).state() == valid,
ExcInternalError());
- const DoFHandler<dim>::active_cell_iterator neighbor = cell->neighbor(face_no);
+ const active_cell_iterator neighbor = cell->neighbor(face_no);
// find which number the current
// face has relative to the neighboring
// get gradients on neighbor cell
fe_face_values_neighbor.get_function_grads (data.solution,
- data.neighbor_psi[this_thread]);
+ data.neighbor_psi);
// compute the jump in the gradients
- for (unsigned int component=0; component<data.n_components; ++component)
- for (unsigned int p=0; p<data.n_q_points; ++p)
- data.psi[this_thread][p][component] -= data.neighbor_psi[this_thread][p][component];
+ for (unsigned int component=0; component<n_components; ++component)
+ for (unsigned int p=0; p<n_q_points; ++p)
+ data.psi[p][component] -= data.neighbor_psi[p][component];
};
// would only change the sign. We take
// the outward normal.
- data.normal_vectors[this_thread]=fe_face_values_cell.get_normal_vectors();
+ data.normal_vectors=fe_face_values_cell.get_normal_vectors();
- for (unsigned int component=0; component<data.n_components; ++component)
- for (unsigned int point=0; point<data.n_q_points; ++point)
- data.phi[this_thread][point][component] = data.psi[this_thread][point][component]*
- data.normal_vectors[this_thread][point];
+ for (unsigned int component=0; component<n_components; ++component)
+ for (unsigned int point=0; point<n_q_points; ++point)
+ data.phi[point][component] = data.psi[point][component]*
+ data.normal_vectors[point];
// if a coefficient was given: use that
// to scale the jump in the gradient
{
data.coefficients->value_list (fe_face_values_cell.get_quadrature_points(),
- data.coefficient_values1[this_thread]);
- for (unsigned int component=0; component<data.n_components; ++component)
- for (unsigned int point=0; point<data.n_q_points; ++point)
- data.phi[this_thread][point][component] *=
- data.coefficient_values1[this_thread][point];
+ data.coefficient_values1);
+ for (unsigned int component=0; component<n_components; ++component)
+ for (unsigned int point=0; point<n_q_points; ++point)
+ data.phi[point][component] *=
+ data.coefficient_values1[point];
}
else
// vector-valued coefficient
{
data.coefficients->vector_value_list (fe_face_values_cell.get_quadrature_points(),
- data.coefficient_values[this_thread]);
- for (unsigned int component=0; component<data.n_components; ++component)
- for (unsigned int point=0; point<data.n_q_points; ++point)
- data.phi[this_thread][point][component] *=
- data.coefficient_values[this_thread][point](component);
+ data.coefficient_values);
+ for (unsigned int component=0; component<n_components; ++component)
+ for (unsigned int point=0; point<n_q_points; ++point)
+ data.phi[point][component] *=
+ data.coefficient_values[point](component);
};
};
// function at the quadrature
// points
- vector<Vector<double> > g(data.n_q_points, Vector<double>(data.n_components));
+ vector<Vector<double> > g(n_q_points, Vector<double>(n_components));
data.neumann_bc.find(boundary_indicator)->second
->vector_value_list (fe_face_values_cell.get_quadrature_points(),
g);
- for (unsigned int component=0; component<data.n_components; ++component)
- for (unsigned int point=0; point<data.n_q_points; ++point)
- data.phi[this_thread][point][component] -= g[point](component);
+ for (unsigned int component=0; component<n_components; ++component)
+ for (unsigned int point=0; point<n_q_points; ++point)
+ data.phi[point][component] -= g[point](component);
};
// mentioned value at one of the
// quadrature points
- data.JxW_values[this_thread] = fe_face_values_cell.get_JxW_values();
+ data.JxW_values = fe_face_values_cell.get_JxW_values();
// take the square of the phi[i]
// for integration, and sum up
double face_integral = 0;
- for (unsigned int component=0; component<data.n_components; ++component)
+ for (unsigned int component=0; component<n_components; ++component)
if (data.component_mask[component] == true)
- for (unsigned int p=0; p<data.n_q_points; ++p)
- face_integral += sqr(data.phi[this_thread][p][component]) *
- data.JxW_values[this_thread][p];
+ for (unsigned int p=0; p<n_q_points; ++p)
+ face_integral += sqr(data.phi[p][component]) *
+ data.JxW_values[p];
data.face_integrals[face] = face_integral;
};
template <int dim>
void KellyErrorEstimator<dim>::
integrate_over_irregular_face (Data &data,
- const unsigned int this_thread,
const active_cell_iterator &cell,
const unsigned int face_no,
FEFaceValues<dim> &fe_face_values,
FESubfaceValues<dim> &fe_subface_values)
{
const DoFHandler<dim>::cell_iterator neighbor = cell->neighbor(face_no);
+ const unsigned int n_q_points = data.quadrature.n_quadrature_points,
+ n_components = data.dof_handler.get_fe().n_components();
Assert (neighbor.state() == valid, ExcInternalError());
Assert (neighbor->has_children(), ExcInternalError());
// store the gradient of the solution
// in psi
fe_subface_values.reinit (cell, face_no, subface_no);
- fe_subface_values.get_function_grads (data.solution, data.psi[this_thread]);
+ fe_subface_values.get_function_grads (data.solution, data.psi);
// restrict the finite element on the
// neighbor cell to the common @p{subface}.
// store the gradient in @p{neighbor_psi}
fe_face_values.reinit (neighbor_child, neighbor_neighbor);
- fe_face_values.get_function_grads (data.solution, data.neighbor_psi[this_thread]);
+ fe_face_values.get_function_grads (data.solution, data.neighbor_psi);
// compute the jump in the gradients
- for (unsigned int component=0; component<data.n_components; ++component)
- for (unsigned int p=0; p<data.n_q_points; ++p)
- data.psi[this_thread][p][component] -=
- data.neighbor_psi[this_thread][p][component];
+ for (unsigned int component=0; component<n_components; ++component)
+ for (unsigned int p=0; p<n_q_points; ++p)
+ data.psi[p][component] -=
+ data.neighbor_psi[p][component];
// note that unlike for the
// case of regular faces
//
// let phi be the name of the integrand
- data.normal_vectors[this_thread]=fe_face_values.get_normal_vectors();
+ data.normal_vectors=fe_face_values.get_normal_vectors();
- for (unsigned int component=0; component<data.n_components; ++component)
- for (unsigned int point=0; point<data.n_q_points; ++point)
- data.phi[this_thread][point][component] =
- data.psi[this_thread][point][component]*
- data.normal_vectors[this_thread][point];
+ for (unsigned int component=0; component<n_components; ++component)
+ for (unsigned int point=0; point<n_q_points; ++point)
+ data.phi[point][component] =
+ data.psi[point][component]*
+ data.normal_vectors[point];
// if a coefficient was given: use that
// to scale the jump in the gradient
if (data.coefficients->n_components == 1)
{
data.coefficients->value_list (fe_face_values.get_quadrature_points(),
- data.coefficient_values1[this_thread]);
- for (unsigned int component=0; component<data.n_components; ++component)
- for (unsigned int point=0; point<data.n_q_points; ++point)
- data.phi[this_thread][point][component] *=
- data.coefficient_values1[this_thread][point];
+ data.coefficient_values1);
+ for (unsigned int component=0; component<n_components; ++component)
+ for (unsigned int point=0; point<n_q_points; ++point)
+ data.phi[point][component] *=
+ data.coefficient_values1[point];
}
else
// vector-valued coefficient
{
data.coefficients->vector_value_list (fe_face_values.get_quadrature_points(),
- data.coefficient_values[this_thread]);
- for (unsigned int component=0; component<data.n_components; ++component)
- for (unsigned int point=0; point<data.n_q_points; ++point)
- data.phi[this_thread][point][component] *=
- data.coefficient_values[this_thread][point](component);
+ data.coefficient_values);
+ for (unsigned int component=0; component<n_components; ++component)
+ for (unsigned int point=0; point<n_q_points; ++point)
+ data.phi[point][component] *=
+ data.coefficient_values[point](component);
};
};
- data.JxW_values[this_thread] = fe_face_values.get_JxW_values();
+ data.JxW_values = fe_face_values.get_JxW_values();
// take the square of the phi[i]
// for integration, and sum up
double face_integral = 0;
- for (unsigned int component=0; component<data.n_components; ++component)
+ for (unsigned int component=0; component<n_components; ++component)
if (data.component_mask[component] == true)
- for (unsigned int p=0; p<data.n_q_points; ++p)
- face_integral += sqr(data.phi[this_thread][p][component]) *
- data.JxW_values[this_thread][p];
+ for (unsigned int p=0; p<n_q_points; ++p)
+ face_integral += sqr(data.phi[p][component]) *
+ data.JxW_values[p];
data.face_integrals[neighbor_child->face(neighbor_neighbor)] = face_integral;
};