From: Wolfgang Bangerth Date: Tue, 16 Apr 2013 04:06:32 +0000 (+0000) Subject: Make FEFieldFunction work in parallel as well. X-Git-Tag: v8.0.0~694 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=3ac7048ba6c6de14b492bf597f41938e95aaf3b6;p=dealii.git Make FEFieldFunction work in parallel as well. git-svn-id: https://svn.dealii.org/trunk@29294 0785d39b-7218-0410-832d-ea1e28bc413d --- diff --git a/deal.II/doc/news/changes.h b/deal.II/doc/news/changes.h index f691adcc08..38a6f061a9 100644 --- a/deal.II/doc/news/changes.h +++ b/deal.II/doc/news/changes.h @@ -99,6 +99,12 @@ this function.

Specific improvements

    +
  1. New: Functions::FEFieldFunction can now deal with +parallel::distributed::Triangulation objects. +
    +(Wolfgang Bangerth, 2013/04/15) +
  2. +
  3. New: There is now a version of SparseMatrix::copy_from that can copy from TrilinosWrappers::SparseMatrix.
    diff --git a/deal.II/include/deal.II/numerics/fe_field_function.h b/deal.II/include/deal.II/numerics/fe_field_function.h index 2ac4043bcb..24e8899781 100644 --- a/deal.II/include/deal.II/numerics/fe_field_function.h +++ b/deal.II/include/deal.II/numerics/fe_field_function.h @@ -1,7 +1,7 @@ //--------------------------------------------------------------------------- // $Id$ // -// Copyright (C) 2007, 2009, 2012 by the deal.II authors +// Copyright (C) 2007, 2009, 2012, 2013 by the deal.II authors // // This file is subject to QPL and may not be distributed // without copyright and license information. Please refer @@ -110,13 +110,51 @@ namespace Functions * tell where your points are, you will save a lot of computational * time by letting this class know. * - * An optimization based on a caching mechanism was used by the - * author of this class for the implementation of a Finite Element - * Immersed Boundary Method. * - * \ingroup functions + *

    Using FEFieldFunction with parallel::distributed::Triangulation

    * - * \author Luca Heltai, 2006, Markus Buerg, 2012 + * When using this class with a parallel distributed triangulation object + * and evaluating the solution at a particular point, not every processor + * will own the cell at which the solution is evaluated. Rather, it may be + * that the cell in which this point is found is in fact a ghost or + * artificial cell (see @ref GlossArtificialCell and + * @ref @ref GlossGhostCell). If the cell is artificial, we have no access + * to the solution there and functions that evaluate the solution at + * such a point will trigger an exception of type + * FEFieldFunction::ExcPointNotAvailableHere. The same kind of exception + * will also be produced if the cell is a ghost cell: On such cells, one + * could in principle evaluate the solution, but it becomes easier if we + * do not allow to do so because then there is exactly one processor in + * a parallel distributed computation that can indeed evaluate the + * solution. Consequently, it is clear which processor is responsible + * for producing output if the point evaluation is done as a postprocessing + * step. + * + * To deal with this situation, you will want to use code as follows + * when, for example, evaluating the solution at the origin (here using + * a parallel TrilinosWrappers vector to hold the solution): + * @code + * Functions::FEFieldFunction,TrilinosWrappers::MPI::Vector> + * solution_function (dof_handler, solution); + * Point origin = Point(); + * + * double solution_at_origin; + * bool point_found = true; + * try + * { + * solution_at_origin = solution_function.value (origin); + * } + * catch (const typename Functions::FEFieldFunction::ExcPointNotAvailableHere &) + * { + * point_found = false; + * } + * + * if (point_found == true) + * ...do something...; + * @endcode + * + * @ingroup functions + * @author Luca Heltai, 2006, Markus Buerg, 2012, Wolfgang Bangerth, 2013 */ template , @@ -157,7 +195,7 @@ namespace Functions void set_active_cell (const typename DH::active_cell_iterator &newcell); /** - * Get ONE vector value at the + * Get one vector value at the * given point. It is * inefficient to use single * points. If you need more @@ -169,6 +207,12 @@ namespace Functions * cell. This is not mandatory, * however it does speed things * up. + * + * @note When using this function on a parallel::distributed::Triangulation + * you may get an exception when trying to evaluate the solution at a + * point that does not lie in a locally owned cell (see + * @ref GlossLocallyOwnedCell). See the section in the general + * documentation of this class for more information. */ virtual void vector_value (const Point &p, Vector &values) const; @@ -193,6 +237,12 @@ namespace Functions * cell. This is not mandatory, * however it does speed things * up. + * + * @note When using this function on a parallel::distributed::Triangulation + * you may get an exception when trying to evaluate the solution at a + * point that does not lie in a locally owned cell (see + * @ref GlossLocallyOwnedCell). See the section in the general + * documentation of this class for more information. */ virtual double value (const Point< dim > &p, const unsigned int component = 0) const; @@ -210,6 +260,12 @@ namespace Functions * lie on the same cell. If * this is not the case, things * may slow down a bit. + * + * @note When using this function on a parallel::distributed::Triangulation + * you may get an exception when trying to evaluate the solution at a + * point that does not lie in a locally owned cell (see + * @ref GlossLocallyOwnedCell). See the section in the general + * documentation of this class for more information. */ virtual void value_list (const std::vector > &points, std::vector< double > &values, @@ -228,6 +284,12 @@ namespace Functions * lie on the same cell. If * this is not the case, things * may slow down a bit. + * + * @note When using this function on a parallel::distributed::Triangulation + * you may get an exception when trying to evaluate the solution at a + * point that does not lie in a locally owned cell (see + * @ref GlossLocallyOwnedCell). See the section in the general + * documentation of this class for more information. */ virtual void vector_value_list (const std::vector > &points, std::vector< Vector > &values) const; @@ -246,6 +308,12 @@ namespace Functions * cell. This is not mandatory, * however it does speed things * up. + * + * @note When using this function on a parallel::distributed::Triangulation + * you may get an exception when trying to evaluate the solution at a + * point that does not lie in a locally owned cell (see + * @ref GlossLocallyOwnedCell). See the section in the general + * documentation of this class for more information. */ virtual void vector_gradient (const Point< dim > &p, @@ -265,6 +333,12 @@ namespace Functions * cell. This is not mandatory, * however it does speed things * up. + * + * @note When using this function on a parallel::distributed::Triangulation + * you may get an exception when trying to evaluate the solution at a + * point that does not lie in a locally owned cell (see + * @ref GlossLocallyOwnedCell). See the section in the general + * documentation of this class for more information. */ virtual Tensor<1,dim> gradient(const Point< dim > &p, const unsigned int component = 0)const; @@ -278,6 +352,12 @@ namespace Functions * same cell. If this is not * the case, things may slow * down a bit. + * + * @note When using this function on a parallel::distributed::Triangulation + * you may get an exception when trying to evaluate the solution at a + * point that does not lie in a locally owned cell (see + * @ref GlossLocallyOwnedCell). See the section in the general + * documentation of this class for more information. */ virtual void vector_gradient_list (const std::vector< Point< dim > > &p, @@ -293,6 +373,12 @@ namespace Functions * lie on the same cell. If * this is not the case, things * may slow down a bit. + * + * @note When using this function on a parallel::distributed::Triangulation + * you may get an exception when trying to evaluate the solution at a + * point that does not lie in a locally owned cell (see + * @ref GlossLocallyOwnedCell). See the section in the general + * documentation of this class for more information. */ virtual void gradient_list (const std::vector< Point< dim > > &p, @@ -303,6 +389,12 @@ namespace Functions /** * Compute the Laplacian of a * given component at point p. + * + * @note When using this function on a parallel::distributed::Triangulation + * you may get an exception when trying to evaluate the solution at a + * point that does not lie in a locally owned cell (see + * @ref GlossLocallyOwnedCell). See the section in the general + * documentation of this class for more information. */ virtual double laplacian (const Point &p, @@ -312,6 +404,12 @@ namespace Functions * Compute the Laplacian of all * components at point p and * store them in values. + * + * @note When using this function on a parallel::distributed::Triangulation + * you may get an exception when trying to evaluate the solution at a + * point that does not lie in a locally owned cell (see + * @ref GlossLocallyOwnedCell). See the section in the general + * documentation of this class for more information. */ virtual void vector_laplacian (const Point &p, @@ -320,6 +418,12 @@ namespace Functions /** * Compute the Laplacian of one * component at a set of points. + * + * @note When using this function on a parallel::distributed::Triangulation + * you may get an exception when trying to evaluate the solution at a + * point that does not lie in a locally owned cell (see + * @ref GlossLocallyOwnedCell). See the section in the general + * documentation of this class for more information. */ virtual void laplacian_list (const std::vector > &points, @@ -329,6 +433,12 @@ namespace Functions /** * Compute the Laplacians of all * components at a set of points. + * + * @note When using this function on a parallel::distributed::Triangulation + * you may get an exception when trying to evaluate the solution at a + * point that does not lie in a locally owned cell (see + * @ref GlossLocallyOwnedCell). See the section in the general + * documentation of this class for more information. */ virtual void vector_laplacian_list (const std::vector > &points, @@ -365,12 +475,18 @@ namespace Functions std::vector > > &qpoints, std::vector > &maps) const; + /** + * Exception + */ + DeclException0 (ExcPointNotAvailableHere); + private: /** * Typedef holding the local cell_hint. */ - typedef Threads::ThreadLocalStorage < - typename DH::active_cell_iterator > cell_hint_t; + typedef + Threads::ThreadLocalStorage + cell_hint_t; /** * Pointer to the dof handler. diff --git a/deal.II/include/deal.II/numerics/fe_field_function.templates.h b/deal.II/include/deal.II/numerics/fe_field_function.templates.h index 091525fb3e..a5b657c227 100644 --- a/deal.II/include/deal.II/numerics/fe_field_function.templates.h +++ b/deal.II/include/deal.II/numerics/fe_field_function.templates.h @@ -1,7 +1,7 @@ //--------------------------------------------------------------------------- // $Id$ // -// Copyright (C) 2007, 2011, 2012 by the deal.II authors +// Copyright (C) 2007, 2011, 2012, 2013 by the deal.II authors // // This file is subject to QPL and may not be distributed // without copyright and license information. Please refer @@ -61,13 +61,16 @@ namespace Functions typename DH::active_cell_iterator cell = cell_hint.get(); if (cell == dh->end()) cell = dh->begin_active(); - + boost::optional > qp = get_reference_coordinates (cell, p); if (!qp) { const std::pair > my_pair = GridTools::find_active_cell_around_point (mapping, *dh, p); + AssertThrow (my_pair.first->is_locally_owned(), + ExcPointNotAvailableHere()); + cell = my_pair.first; qp = my_pair.second; } @@ -114,8 +117,11 @@ namespace Functions qp = get_reference_coordinates (cell, p); if (!qp) { - std::pair > my_pair + const std::pair > my_pair = GridTools::find_active_cell_around_point (mapping, *dh, p); + AssertThrow (my_pair.first->is_locally_owned(), + ExcPointNotAvailableHere()); + cell = my_pair.first; qp = my_pair.second; } @@ -166,6 +172,9 @@ namespace Functions { const std::pair > my_pair = GridTools::find_active_cell_around_point (mapping, *dh, p); + AssertThrow (my_pair.first->is_locally_owned(), + ExcPointNotAvailableHere()); + cell = my_pair.first; qp = my_pair.second; } @@ -431,6 +440,9 @@ namespace Functions const std::pair > my_pair = GridTools::find_active_cell_around_point (mapping, *dh, points[0]); + AssertThrow (my_pair.first->is_locally_owned(), + ExcPointNotAvailableHere()); + cell = my_pair.first; qp = my_pair.second; point_flags[0] = true; @@ -492,6 +504,9 @@ namespace Functions { const std::pair > my_pair = GridTools::find_active_cell_around_point (mapping, *dh, points[first_outside]); + AssertThrow (my_pair.first->is_locally_owned(), + ExcPointNotAvailableHere()); + cells.push_back(my_pair.first); qpoints.push_back(std::vector >(1, my_pair.second)); maps.push_back(std::vector(1, first_outside)); diff --git a/tests/mpi/fe_field_function_01.cc b/tests/mpi/fe_field_function_01.cc new file mode 100644 index 0000000000..cfa547fc8c --- /dev/null +++ b/tests/mpi/fe_field_function_01.cc @@ -0,0 +1,152 @@ +//--------------------------------------------------------------------------- +// $Id$ +// Version: $Name$ +// +// Copyright (C) 2009, 2010, 2011, 2013 by the deal.II authors +// +// This file is subject to QPL and may not be distributed +// without copyright and license information. Please refer +// to the file deal.II/doc/license.html for the text and +// further information on this license. +// +//--------------------------------------------------------------------------- + + +// Test VectorTools::fe_field_function_01 for parallel computations. we +// interpolate a linear function onto the grid with a symmetric mesh. the mean +// value of the interpolation must be the mean of the linear function + +#include "../tests.h" +#include "coarse_grid_common.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include + + +template +class LinearFunction : public Function +{ + public: + double value (const Point &p, + const unsigned int) const + { + return p[0] + 2; + } +}; + + +template +void test() +{ + parallel::distributed::Triangulation tr(MPI_COMM_WORLD); + + // create a mesh and refine it sufficiently often that only one + // processor will have the cell we care about + GridGenerator::hyper_cube(tr); + tr.refine_global (5); + + const FE_Q fe(2); + DoFHandler dofh(tr); + dofh.distribute_dofs (fe); + + TrilinosWrappers::MPI::Vector interpolated(dofh.locally_owned_dofs(), + MPI_COMM_WORLD); + VectorTools::interpolate (dofh, + LinearFunction(), + interpolated); + + IndexSet relevant_set; + DoFTools::extract_locally_relevant_dofs (dofh, relevant_set); + TrilinosWrappers::MPI::Vector x_rel(relevant_set, MPI_COMM_WORLD); + x_rel = interpolated; + + Functions::FEFieldFunction,TrilinosWrappers::MPI::Vector> + field_function (dofh, x_rel); + + for (unsigned int test=0; test<4; ++test) + { + double value; + Point p = (dim == 2 ? + Point(test/2+1,test%2+1)/3 : + Point(test/2+1,test/2+1,test%2+1)/3); + + // see if we can find the point on the current processor + bool point_found = false; + try + { + value = field_function.value (p); + point_found = true; + + Assert (std::fabs(value - (p[0]+2)) < 1e-8* std::fabs(value + (p[0]+2)), + ExcInternalError()); + } + catch (typename Functions::FEFieldFunction,TrilinosWrappers::MPI::Vector>::ExcPointNotAvailableHere &) + { + point_found = false; + } + + Assert (Utilities::MPI::sum(point_found ? 1 : 0, MPI_COMM_WORLD) == 1, + ExcInternalError()); + } + + if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0) + deallog << "OK" << std::endl; + + +} + + +int main(int argc, char *argv[]) +{ + Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1); + + unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD); + + + deallog.push(Utilities::int_to_string(myid)); + + if (myid == 0) + { + std::ofstream logfile(output_file_for_mpi("fe_field_function_01").c_str()); + deallog.attach(logfile); + deallog.depth_console(0); + deallog.threshold_double(1.e-10); + + deallog.push("2d"); + test<2>(); + deallog.pop(); + + deallog.push("3d"); + test<3>(); + deallog.pop(); + } + else + { + deallog.push("2d"); + test<2>(); + deallog.pop(); + + deallog.push("3d"); + test<3>(); + deallog.pop(); + } + +} diff --git a/tests/mpi/fe_field_function_01/ncpu_10/cmp/generic b/tests/mpi/fe_field_function_01/ncpu_10/cmp/generic new file mode 100644 index 0000000000..995f6e446b --- /dev/null +++ b/tests/mpi/fe_field_function_01/ncpu_10/cmp/generic @@ -0,0 +1,3 @@ + +DEAL:0:2d::OK +DEAL:0:3d::OK diff --git a/tests/mpi/fe_field_function_01/ncpu_4/cmp/generic b/tests/mpi/fe_field_function_01/ncpu_4/cmp/generic new file mode 100644 index 0000000000..995f6e446b --- /dev/null +++ b/tests/mpi/fe_field_function_01/ncpu_4/cmp/generic @@ -0,0 +1,3 @@ + +DEAL:0:2d::OK +DEAL:0:3d::OK