//---------------------------------------------------------------------------
// $Id$
//
-// Copyright (C) 2007, 2009, 2012 by the deal.II authors
+// Copyright (C) 2007, 2009, 2012, 2013 by the deal.II authors
//
// This file is subject to QPL and may not be distributed
// without copyright and license information. Please refer
* tell where your points are, you will save a lot of computational
* time by letting this class know.
*
- * An optimization based on a caching mechanism was used by the
- * author of this class for the implementation of a Finite Element
- * Immersed Boundary Method.
*
- * \ingroup functions
+ * <h3>Using FEFieldFunction with parallel::distributed::Triangulation</h3>
*
- * \author Luca Heltai, 2006, Markus Buerg, 2012
+ * When using this class with a parallel distributed triangulation object
+ * and evaluating the solution at a particular point, not every processor
+ * will own the cell at which the solution is evaluated. Rather, it may be
+ * that the cell in which this point is found is in fact a ghost or
+ * artificial cell (see @ref GlossArtificialCell and
+ * @ref @ref GlossGhostCell). If the cell is artificial, we have no access
+ * to the solution there and functions that evaluate the solution at
+ * such a point will trigger an exception of type
+ * FEFieldFunction::ExcPointNotAvailableHere. The same kind of exception
+ * will also be produced if the cell is a ghost cell: On such cells, one
+ * could in principle evaluate the solution, but it becomes easier if we
+ * do not allow to do so because then there is exactly one processor in
+ * a parallel distributed computation that can indeed evaluate the
+ * solution. Consequently, it is clear which processor is responsible
+ * for producing output if the point evaluation is done as a postprocessing
+ * step.
+ *
+ * To deal with this situation, you will want to use code as follows
+ * when, for example, evaluating the solution at the origin (here using
+ * a parallel TrilinosWrappers vector to hold the solution):
+ * @code
+ * Functions::FEFieldFunction<dim,DoFHandler<dim>,TrilinosWrappers::MPI::Vector>
+ * solution_function (dof_handler, solution);
+ * Point<dim> origin = Point<dim>();
+ *
+ * double solution_at_origin;
+ * bool point_found = true;
+ * try
+ * {
+ * solution_at_origin = solution_function.value (origin);
+ * }
+ * catch (const typename Functions::FEFieldFunction<dim>::ExcPointNotAvailableHere &)
+ * {
+ * point_found = false;
+ * }
+ *
+ * if (point_found == true)
+ * ...do something...;
+ * @endcode
+ *
+ * @ingroup functions
+ * @author Luca Heltai, 2006, Markus Buerg, 2012, Wolfgang Bangerth, 2013
*/
template <int dim,
typename DH=DoFHandler<dim>,
void set_active_cell (const typename DH::active_cell_iterator &newcell);
/**
- * Get ONE vector value at the
+ * Get one vector value at the
* given point. It is
* inefficient to use single
* points. If you need more
* cell. This is not mandatory,
* however it does speed things
* up.
+ *
+ * @note When using this function on a parallel::distributed::Triangulation
+ * you may get an exception when trying to evaluate the solution at a
+ * point that does not lie in a locally owned cell (see
+ * @ref GlossLocallyOwnedCell). See the section in the general
+ * documentation of this class for more information.
*/
virtual void vector_value (const Point<dim> &p,
Vector<double> &values) const;
* cell. This is not mandatory,
* however it does speed things
* up.
+ *
+ * @note When using this function on a parallel::distributed::Triangulation
+ * you may get an exception when trying to evaluate the solution at a
+ * point that does not lie in a locally owned cell (see
+ * @ref GlossLocallyOwnedCell). See the section in the general
+ * documentation of this class for more information.
*/
virtual double value (const Point< dim > &p,
const unsigned int component = 0) const;
* lie on the same cell. If
* this is not the case, things
* may slow down a bit.
+ *
+ * @note When using this function on a parallel::distributed::Triangulation
+ * you may get an exception when trying to evaluate the solution at a
+ * point that does not lie in a locally owned cell (see
+ * @ref GlossLocallyOwnedCell). See the section in the general
+ * documentation of this class for more information.
*/
virtual void value_list (const std::vector<Point< dim > > &points,
std::vector< double > &values,
* lie on the same cell. If
* this is not the case, things
* may slow down a bit.
+ *
+ * @note When using this function on a parallel::distributed::Triangulation
+ * you may get an exception when trying to evaluate the solution at a
+ * point that does not lie in a locally owned cell (see
+ * @ref GlossLocallyOwnedCell). See the section in the general
+ * documentation of this class for more information.
*/
virtual void vector_value_list (const std::vector<Point< dim > > &points,
std::vector< Vector<double> > &values) const;
* cell. This is not mandatory,
* however it does speed things
* up.
+ *
+ * @note When using this function on a parallel::distributed::Triangulation
+ * you may get an exception when trying to evaluate the solution at a
+ * point that does not lie in a locally owned cell (see
+ * @ref GlossLocallyOwnedCell). See the section in the general
+ * documentation of this class for more information.
*/
virtual void
vector_gradient (const Point< dim > &p,
* cell. This is not mandatory,
* however it does speed things
* up.
+ *
+ * @note When using this function on a parallel::distributed::Triangulation
+ * you may get an exception when trying to evaluate the solution at a
+ * point that does not lie in a locally owned cell (see
+ * @ref GlossLocallyOwnedCell). See the section in the general
+ * documentation of this class for more information.
*/
virtual Tensor<1,dim> gradient(const Point< dim > &p,
const unsigned int component = 0)const;
* same cell. If this is not
* the case, things may slow
* down a bit.
+ *
+ * @note When using this function on a parallel::distributed::Triangulation
+ * you may get an exception when trying to evaluate the solution at a
+ * point that does not lie in a locally owned cell (see
+ * @ref GlossLocallyOwnedCell). See the section in the general
+ * documentation of this class for more information.
*/
virtual void
vector_gradient_list (const std::vector< Point< dim > > &p,
* lie on the same cell. If
* this is not the case, things
* may slow down a bit.
+ *
+ * @note When using this function on a parallel::distributed::Triangulation
+ * you may get an exception when trying to evaluate the solution at a
+ * point that does not lie in a locally owned cell (see
+ * @ref GlossLocallyOwnedCell). See the section in the general
+ * documentation of this class for more information.
*/
virtual void
gradient_list (const std::vector< Point< dim > > &p,
/**
* Compute the Laplacian of a
* given component at point <tt>p</tt>.
+ *
+ * @note When using this function on a parallel::distributed::Triangulation
+ * you may get an exception when trying to evaluate the solution at a
+ * point that does not lie in a locally owned cell (see
+ * @ref GlossLocallyOwnedCell). See the section in the general
+ * documentation of this class for more information.
*/
virtual double
laplacian (const Point<dim> &p,
* Compute the Laplacian of all
* components at point <tt>p</tt> and
* store them in <tt>values</tt>.
+ *
+ * @note When using this function on a parallel::distributed::Triangulation
+ * you may get an exception when trying to evaluate the solution at a
+ * point that does not lie in a locally owned cell (see
+ * @ref GlossLocallyOwnedCell). See the section in the general
+ * documentation of this class for more information.
*/
virtual void
vector_laplacian (const Point<dim> &p,
/**
* Compute the Laplacian of one
* component at a set of points.
+ *
+ * @note When using this function on a parallel::distributed::Triangulation
+ * you may get an exception when trying to evaluate the solution at a
+ * point that does not lie in a locally owned cell (see
+ * @ref GlossLocallyOwnedCell). See the section in the general
+ * documentation of this class for more information.
*/
virtual void
laplacian_list (const std::vector<Point<dim> > &points,
/**
* Compute the Laplacians of all
* components at a set of points.
+ *
+ * @note When using this function on a parallel::distributed::Triangulation
+ * you may get an exception when trying to evaluate the solution at a
+ * point that does not lie in a locally owned cell (see
+ * @ref GlossLocallyOwnedCell). See the section in the general
+ * documentation of this class for more information.
*/
virtual void
vector_laplacian_list (const std::vector<Point<dim> > &points,
std::vector<std::vector<Point<dim> > > &qpoints,
std::vector<std::vector<unsigned int> > &maps) const;
+ /**
+ * Exception
+ */
+ DeclException0 (ExcPointNotAvailableHere);
+
private:
/**
* Typedef holding the local cell_hint.
*/
- typedef Threads::ThreadLocalStorage <
- typename DH::active_cell_iterator > cell_hint_t;
+ typedef
+ Threads::ThreadLocalStorage <typename DH::active_cell_iterator >
+ cell_hint_t;
/**
* Pointer to the dof handler.
//---------------------------------------------------------------------------
// $Id$
//
-// Copyright (C) 2007, 2011, 2012 by the deal.II authors
+// Copyright (C) 2007, 2011, 2012, 2013 by the deal.II authors
//
// This file is subject to QPL and may not be distributed
// without copyright and license information. Please refer
typename DH::active_cell_iterator cell = cell_hint.get();
if (cell == dh->end())
cell = dh->begin_active();
-
+
boost::optional<Point<dim> >
qp = get_reference_coordinates (cell, p);
if (!qp)
{
const std::pair<typename DH::active_cell_iterator, Point<dim> > my_pair
= GridTools::find_active_cell_around_point (mapping, *dh, p);
+ AssertThrow (my_pair.first->is_locally_owned(),
+ ExcPointNotAvailableHere());
+
cell = my_pair.first;
qp = my_pair.second;
}
qp = get_reference_coordinates (cell, p);
if (!qp)
{
- std::pair<typename DH::active_cell_iterator, Point<dim> > my_pair
+ const std::pair<typename DH::active_cell_iterator, Point<dim> > my_pair
= GridTools::find_active_cell_around_point (mapping, *dh, p);
+ AssertThrow (my_pair.first->is_locally_owned(),
+ ExcPointNotAvailableHere());
+
cell = my_pair.first;
qp = my_pair.second;
}
{
const std::pair<typename DH::active_cell_iterator, Point<dim> > my_pair
= GridTools::find_active_cell_around_point (mapping, *dh, p);
+ AssertThrow (my_pair.first->is_locally_owned(),
+ ExcPointNotAvailableHere());
+
cell = my_pair.first;
qp = my_pair.second;
}
const std::pair<typename DH::active_cell_iterator, Point<dim> >
my_pair = GridTools::find_active_cell_around_point
(mapping, *dh, points[0]);
+ AssertThrow (my_pair.first->is_locally_owned(),
+ ExcPointNotAvailableHere());
+
cell = my_pair.first;
qp = my_pair.second;
point_flags[0] = true;
{
const std::pair<typename DH::active_cell_iterator, Point<dim> > my_pair
= GridTools::find_active_cell_around_point (mapping, *dh, points[first_outside]);
+ AssertThrow (my_pair.first->is_locally_owned(),
+ ExcPointNotAvailableHere());
+
cells.push_back(my_pair.first);
qpoints.push_back(std::vector<Point<dim> >(1, my_pair.second));
maps.push_back(std::vector<unsigned int>(1, first_outside));
--- /dev/null
+//---------------------------------------------------------------------------
+// $Id$
+// Version: $Name$
+//
+// Copyright (C) 2009, 2010, 2011, 2013 by the deal.II authors
+//
+// This file is subject to QPL and may not be distributed
+// without copyright and license information. Please refer
+// to the file deal.II/doc/license.html for the text and
+// further information on this license.
+//
+//---------------------------------------------------------------------------
+
+
+// Test VectorTools::fe_field_function_01 for parallel computations. we
+// interpolate a linear function onto the grid with a symmetric mesh. the mean
+// value of the interpolation must be the mean of the linear function
+
+#include "../tests.h"
+#include "coarse_grid_common.h"
+#include <deal.II/base/logstream.h>
+#include <deal.II/base/function.h>
+#include <deal.II/base/tensor.h>
+#include <deal.II/lac/trilinos_vector.h>
+#include <deal.II/grid/tria.h>
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/distributed/tria.h>
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/grid/grid_out.h>
+#include <deal.II/dofs/dof_handler.h>
+#include <deal.II/dofs/dof_handler.h>
+#include <deal.II/dofs/dof_tools.h>
+#include <deal.II/grid/tria_accessor.h>
+#include <deal.II/grid/tria_iterator.h>
+#include <deal.II/dofs/dof_accessor.h>
+
+#include <deal.II/fe/fe_q.h>
+#include <deal.II/numerics/fe_field_function.h>
+#include <deal.II/numerics/vector_tools.h>
+
+#include <fstream>
+
+
+template <int dim>
+class LinearFunction : public Function<dim>
+{
+ public:
+ double value (const Point<dim> &p,
+ const unsigned int) const
+ {
+ return p[0] + 2;
+ }
+};
+
+
+template<int dim>
+void test()
+{
+ parallel::distributed::Triangulation<dim> tr(MPI_COMM_WORLD);
+
+ // create a mesh and refine it sufficiently often that only one
+ // processor will have the cell we care about
+ GridGenerator::hyper_cube(tr);
+ tr.refine_global (5);
+
+ const FE_Q<dim> fe(2);
+ DoFHandler<dim> dofh(tr);
+ dofh.distribute_dofs (fe);
+
+ TrilinosWrappers::MPI::Vector interpolated(dofh.locally_owned_dofs(),
+ MPI_COMM_WORLD);
+ VectorTools::interpolate (dofh,
+ LinearFunction<dim>(),
+ interpolated);
+
+ IndexSet relevant_set;
+ DoFTools::extract_locally_relevant_dofs (dofh, relevant_set);
+ TrilinosWrappers::MPI::Vector x_rel(relevant_set, MPI_COMM_WORLD);
+ x_rel = interpolated;
+
+ Functions::FEFieldFunction<dim,DoFHandler<dim>,TrilinosWrappers::MPI::Vector>
+ field_function (dofh, x_rel);
+
+ for (unsigned int test=0; test<4; ++test)
+ {
+ double value;
+ Point<dim> p = (dim == 2 ?
+ Point<dim>(test/2+1,test%2+1)/3 :
+ Point<dim>(test/2+1,test/2+1,test%2+1)/3);
+
+ // see if we can find the point on the current processor
+ bool point_found = false;
+ try
+ {
+ value = field_function.value (p);
+ point_found = true;
+
+ Assert (std::fabs(value - (p[0]+2)) < 1e-8* std::fabs(value + (p[0]+2)),
+ ExcInternalError());
+ }
+ catch (typename Functions::FEFieldFunction<dim,DoFHandler<dim>,TrilinosWrappers::MPI::Vector>::ExcPointNotAvailableHere &)
+ {
+ point_found = false;
+ }
+
+ Assert (Utilities::MPI::sum(point_found ? 1 : 0, MPI_COMM_WORLD) == 1,
+ ExcInternalError());
+ }
+
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
+ deallog << "OK" << std::endl;
+
+
+}
+
+
+int main(int argc, char *argv[])
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+
+
+ deallog.push(Utilities::int_to_string(myid));
+
+ if (myid == 0)
+ {
+ std::ofstream logfile(output_file_for_mpi("fe_field_function_01").c_str());
+ deallog.attach(logfile);
+ deallog.depth_console(0);
+ deallog.threshold_double(1.e-10);
+
+ deallog.push("2d");
+ test<2>();
+ deallog.pop();
+
+ deallog.push("3d");
+ test<3>();
+ deallog.pop();
+ }
+ else
+ {
+ deallog.push("2d");
+ test<2>();
+ deallog.pop();
+
+ deallog.push("3d");
+ test<3>();
+ deallog.pop();
+ }
+
+}