From: Wolfgang Bangerth Date: Fri, 15 May 2009 17:59:46 +0000 (+0000) Subject: Merge branch_threading_building_blocks. X-Git-Tag: v8.0.0~7688 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=cd299b0d8f6d232b01e852de6bc75f3d6ac095a4;p=dealii.git Merge branch_threading_building_blocks. git-svn-id: https://svn.dealii.org/trunk@18849 0785d39b-7218-0410-832d-ea1e28bc413d --- diff --git a/deal.II/Makefile b/deal.II/Makefile index de687ebc19..4ccc8def6f 100644 --- a/deal.II/Makefile +++ b/deal.II/Makefile @@ -52,16 +52,16 @@ deps: common/scripts/make_dependencies common/scripts/expand_instantiations cd $D/deal.II && $(MAKE) $(MAKEOPTIONS) Makefile.dep cd $D/lib && $(MAKE) $(MAKEOPTIONS) external-links -baseg: deps contrib-functionparser +baseg: deps contrib cd $D/base && $(MAKE) $(MAKEOPTIONS) libg -baseo: deps contrib-functionparser +baseo: deps contrib cd $D/base && $(MAKE) $(MAKEOPTIONS) libo -lacg: deps baseg contrib +lacg: deps baseg cd $D/lac && $(MAKE) $(MAKEOPTIONS) libg -laco: deps baseo contrib +laco: deps baseo cd $D/lac && $(MAKE) $(MAKEOPTIONS) libo 1dg: deps baseg lacg @@ -92,7 +92,7 @@ lac: lacg laco $(LIBDIR): @mkdir $@ -baseg baseo base lacg laco lac 1dg 1do 1d 2dg 2do 2d 3dg 3do 3d all debug contrib -functionparser : $(LIBDIR) deps +baseg baseo base lacg laco lac 1dg 1do 1d 2dg 2do 2d 3dg 3do 3d all debug contrib functionparser : $(LIBDIR) deps all: debug optimized debug: contrib baseg lacg 2dg 1dg 3dg @@ -214,8 +214,7 @@ build-test: -(./configure $(BUILDTESTFLAGS) 2>&1) > build-test-config if test -f common/Make.global_options ; then \ grep '^enable-shared' common/Make.global_options | grep = ; \ - grep enable-multithreading common/Make.global_options | grep = ; \ - grep with-multithreading common/Make.global_options | grep = ; \ + grep enable-threads common/Make.global_options | grep = ; \ grep USE_CONTRIB_PETSC common/Make.global_options | grep = ; \ grep USE_CONTRIB_METIS common/Make.global_options | grep = ; \ grep USE_CONTRIB_HSL common/Make.global_options | grep = ; \ diff --git a/deal.II/aclocal.m4 b/deal.II/aclocal.m4 index c23072956b..dc856965b9 100644 --- a/deal.II/aclocal.m4 +++ b/deal.II/aclocal.m4 @@ -644,6 +644,11 @@ AC_DEFUN(DEAL_II_SET_CXX_FLAGS, dnl dnl Use -Wno-long-long on Apple Darwin to avoid some unnecessary dnl warnings. However, newer gccs on that platform do not have dnl this flag any more, so check whether we can indeed do this + dnl + dnl Also, the TBB tries to determine whether the system is + dnl 64-bit enabled and if so, it builds the object files in 64bit. + dnl In order to be able to link with these files, we then have to + dnl link with -m64 as well *apple-darwin*) OLD_CXXFLAGS="$CXXFLAGS" CXXFLAGS=-Wno-long-double @@ -659,6 +664,13 @@ AC_DEFUN(DEAL_II_SET_CXX_FLAGS, dnl AC_MSG_RESULT(no) ]) + if test "`/usr/sbin/sysctl -n hw.optional.x86_64`" = "1" ; then + CXXFLAGS="$CXXFLAGS -m64" + CXXFLAGSG="$CXXFLAGSG -m64" + CXXFLAGSO="$CXXFLAGSO -m64" + LDFLAGS="$LDFLAGS -m64" + fi + CXXFLAGS="${OLD_CXXFLAGS}" ;; @@ -1516,6 +1528,16 @@ AC_DEFUN(DEAL_II_SET_CC_FLAGS, dnl CFLAGSPIC= ;; + *apple-darwin*) + dnl The TBB tries to determine whether the system is + dnl 64-bit enabled and if so, it builds the object files in 64bit. + dnl In order to be able to link with these files, we then have to + dnl link with -m64 as well + if test "`/usr/sbin/sysctl -n hw.optional.x86_64`" = "1" ; then + CFLAGS="$CFLAGS -m64" + fi + ;; + *) CFLAGSPIC="-fPIC" ;; @@ -2082,28 +2104,8 @@ AC_DEFUN(DEAL_II_GET_THREAD_FLAGS, dnl dnl ------------------------------------------------------------- -dnl Test whether multithreading support is requested. This -dnl does not tell deal.II to actually use it, but the -dnl compiler flags can be set to allow for it. If the user specified -dnl --enable-multithreading, then set $enablemultithreading=yes, -dnl otherwise $enablemultithreading=no. +dnl Test whether multithreading support is requested. dnl -dnl Usage: -dnl DEAL_II_CHECK_MULTITHREADING -dnl -dnl ------------------------------------------------------------- -AC_DEFUN(DEAL_II_CHECK_MULTITHREADING, dnl -[ - AC_ARG_ENABLE(multithreading, - [ --enable-multithreading Set compiler flags to allow for multithreaded - programs], - enablemultithreading="$enableval", - enablemultithreading=no) -]) - - - -dnl ------------------------------------------------------------- dnl If multithreading support is requested, figure out the right dnl compiler flags to use: dnl - Use the right -threads/-pthread/-mthread option @@ -2121,12 +2123,38 @@ dnl better to put the flags into the command line, since then we have them dnl defined for all include files. dnl dnl Usage: -dnl DEAL_II_SET_MULTITHREADING_FLAGS +dnl DEAL_II_CHECK_MULTITHREADING dnl dnl ------------------------------------------------------------- -AC_DEFUN(DEAL_II_SET_MULTITHREADING_FLAGS, dnl +AC_DEFUN(DEAL_II_CHECK_MULTITHREADING, dnl [ - if test "$enablemultithreading" = yes ; then + AC_ARG_ENABLE(threads, + [ --enable-threads Use multiple threads inside deal.II], + [ + enablethreads="$enableval" + ], + [ + dnl Set default to yes unless on cygwin where the TBB is + dnl currently not yet ported to + case "$target" in + *cygwin* ) + enablethreads=no + ;; + + * ) + enablethreads=yes + ;; + esac + ]) + + if test "$enablethreads" = yes ; then + dnl On cygwin, the TBB does not compile. Error out. + case "$target" in + *cygwin* ) + AC_MSG_ERROR(Multithreading is not supported on CygWin) + ;; + esac + if test "$GXX" = yes ; then DEAL_II_GET_THREAD_FLAGS DEAL_II_THREAD_CPPFLAGS @@ -2166,7 +2194,27 @@ AC_DEFUN(DEAL_II_SET_MULTITHREADING_FLAGS, dnl ;; esac fi + + DEAL_II_CHECK_POSIX_THREAD_FUNCTIONS + AC_DEFINE(DEAL_II_USE_MT_POSIX, 1, + [Defined if multi-threading is to be achieved by using + the POSIX functions]) + + dnl In any case, we need to link with libdl since the Threading + dnl Building Blocks require this + LDFLAGS="$LDFLAGS -ldl" fi + + if test "x$enablethreads" != "xno" ; then + DEAL_II_USE_MT_VAL=1 + else + DEAL_II_USE_MT_VAL=0 + fi + + AC_DEFINE_UNQUOTED(DEAL_II_USE_MT, $DEAL_II_USE_MT_VAL, + [Flag indicating whether the library shall be + compiled for multithreaded applications. If so, + then it is set to one, otherwise to zero.]) ]) @@ -2188,7 +2236,7 @@ dnl dnl ------------------------------------------------------------- AC_DEFUN(DEAL_II_CHECK_PARTLY_BRACKETED_INITIALIZER, dnl [ - if test "$enablemultithreading" = yes ; then + if test "$enablethreads" = yes ; then case "$GXX_VERSION" in gcc*) AC_MSG_CHECKING(for only partly bracketed mutex initializer) @@ -2216,55 +2264,6 @@ AC_DEFUN(DEAL_II_CHECK_PARTLY_BRACKETED_INITIALIZER, dnl -dnl ------------------------------------------------------------- -dnl Test which library the MT code shall use to support threads. -dnl We used to support either POSIX or ACE, but support for ACE -dnl has now been deleted and we only use POSIX these days. However, -dnl the code to pass another name than "posix" remains here in -dnl case someone wants to hook in support for other thread -dnl implementations -dnl -dnl Usage: -dnl DEAL_II_CHECK_USE_MT -dnl -dnl ------------------------------------------------------------- -AC_DEFUN(DEAL_II_CHECK_USE_MT, dnl -[ - AC_ARG_WITH(multithreading, - [ --with-multithreading=name - If name==posix, or no name given, then use POSIX - threads.], - withmultithreading="$withval", - withmultithreading=no) - -dnl Default (i.e. no arg) means POSIX - if test "x$withmultithreading" = "xyes" ; then - withmultithreading=posix - fi - - if test "x$withmultithreading" != "xno" ; then - if test "x$withmultithreading" = "xposix" ; then - DEAL_II_CHECK_POSIX_THREAD_FUNCTIONS - AC_DEFINE(DEAL_II_USE_MT_POSIX, 1, - [Defined if multi-threading is to be achieved by using - the POSIX functions]) - else - AC_MSG_ERROR(Invalid flag for --with-multithreading) - fi - - DEAL_II_USE_MT_VAL=1 - else - DEAL_II_USE_MT_VAL=0 - fi - - AC_DEFINE_UNQUOTED(DEAL_II_USE_MT, $DEAL_II_USE_MT_VAL, - [Flag indicating whether the library shall be - compiled for multithreaded applications. If so, - then it is set to one, otherwise to zero.]) -]) - - - dnl ------------------------------------------------------------- dnl Check whether the POSIX functions needed for multi-threading dnl are available on this system @@ -4238,6 +4237,48 @@ AC_DEFUN(DEAL_II_CHECK_EXPLICIT_CONSTRUCTOR_BUG, dnl +dnl ------------------------------------------------------------- +dnl Some older versions of gcc deduce pointers to const functions in +dnl template contexts to pointer-to-function of const objects. +dnl This is not correct +dnl +dnl Check for this misfeature. +dnl +dnl Usage: DEAL_II_CHECK_CONST_MEMBER_DEDUCTION_BUG +dnl +dnl -------------------------------------------------------------- +AC_DEFUN(DEAL_II_CHECK_CONST_MEMBER_DEDUCTION_BUG, dnl +[ + AC_MSG_CHECKING(for const member deduction bug) + AC_LANG(C++) + CXXFLAGS="$CXXFLAGSG" + AC_TRY_COMPILE( + [ + template struct identity { typedef T type; }; + + template void new_thread (void (C::*fun_ptr)(), + typename identity::type &c); + template void new_thread (void (C::*fun_ptr)() const, + const typename identity::type &c); + struct X { void f() const; }; + ], + [ + X x; + new_thread (&X::f, x); + ], + [ + AC_MSG_RESULT(no) + ], + [ + AC_MSG_RESULT(yes) + AC_DEFINE(DEAL_II_CONST_MEMBER_DEDUCTION_BUG, 1, + [Defined if the compiler has a bug in deducing + the type of pointers to const member functions.]) + ]) +]) + + + dnl ------------------------------------------------------------- dnl Check for GCC bug 36052, see dnl http://gcc.gnu.org/bugzilla/show_bug.cgi?id=36052 diff --git a/deal.II/base/Makefile b/deal.II/base/Makefile index 80da5b0661..9804c154c7 100644 --- a/deal.II/base/Makefile +++ b/deal.II/base/Makefile @@ -17,20 +17,16 @@ h-files = $(sort $(shell echo include/base/*.h)) # get options which hold for all files of the project include $D/common/Make.global_options + # there's an extra .o file for the function parser that we need to link into -# libbase +# libbase. do similarly for the threading building block things if threading +# is enabled ifeq ($(enable-parser),yes) - extra-o-files = $(LIBDIR)/contrib/functionparser/fparser.$(OBJEXT) + extra-o-files = $(LIBDIR)/contrib/functionparser/fparser.$(OBJEXT) + extra-g.o-files = $(LIBDIR)/contrib/functionparser/fparser.$(OBJEXT) endif - # production rules -ifneq ($(with-multithreading),no) - MT = MT -else - MT = == -endif - $(LIBDIR)/base/%.g.$(OBJEXT) : @echo "=====base=============debug======$(MT)== $( */ #undef DEAL_II_COMPILER_SUPPORTS_MPI +/* Defined if the compiler has a bug in deducing the type of pointers to const + member functions. */ +#undef DEAL_II_CONST_MEMBER_DEDUCTION_BUG + /* disable the function parser in contrib */ #undef DEAL_II_DISABLE_PARSER diff --git a/deal.II/base/include/base/parallel.h b/deal.II/base/include/base/parallel.h new file mode 100644 index 0000000000..26b6fd7cb3 --- /dev/null +++ b/deal.II/base/include/base/parallel.h @@ -0,0 +1,917 @@ +//--------------------------------------------------------------------------- +// $Id: parallel.h 14038 2006-10-23 02:46:34Z bangerth $ +// Version: $Name$ +// +// Copyright (C) 2008, 2009 by the deal.II authors +// +// This file is subject to QPL and may not be distributed +// without copyright and license information. Please refer +// to the file deal.II/doc/license.html for the text and +// further information on this license. +// +//--------------------------------------------------------------------------- +#ifndef __deal2__parallel_h +#define __deal2__parallel_h + + +#include +#include +#include + +#include +#include +#include + +#include +#include + +#if DEAL_II_USE_MT == 1 +# include +# include +# include +# include +#endif + + +//TODO[WB]: allow calling functions to pass along a tbb::affinity_partitioner object to ensure that subsequent calls use the same cache lines + +DEAL_II_NAMESPACE_OPEN + +/** + * A namespace in which we define a few algorithms that can run in parallel + * when deal.II is configured to use multiple threads. + * + * @ingroup threads + * @author Wolfgang Bangerth, 2008, 2009 + */ +namespace parallel +{ + namespace internal + { + /** + * A class that represents a set of + * iterators each of which are + * incremented by one at the same + * time. This is typically used in calls + * like std::transform(a.begin(), + * a.end(), b.begin(), functor); + * where we have synchronous iterators + * marching through the containers + * a,b. If an object of this + * type represents the end of a range, + * only the first element is considered + * (we only have a.end(), + * not b.end()) + * + * The template argument of the current + * class shall be of type + * std_cxx1x::tuple with + * arguments equal to the iterator types. + * + * This type, and the helper functions + * associated with it, are used as the + * Value concept for the blocked_range + * type of the Threading Building Blocks. + */ + template + struct SynchronousIterators + { + /** + * Constructor. + */ + SynchronousIterators (const Iterators &i); + + /** + * Copy constructor. + */ + SynchronousIterators (const SynchronousIterators &i); + + /** + * Storage for the iterators + * represented by the current class. + */ + Iterators iterators; + }; + + + + template + inline + SynchronousIterators:: + SynchronousIterators (const Iterators &i) + : + iterators (i) + {} + + + template + inline + SynchronousIterators:: + SynchronousIterators (const SynchronousIterators &i) + : + iterators (i.iterators) + {} + + + + /** + * Return whether the first element of + * the first argument is less than the + * first element of the second + * argument. Since the objects compared + * march forward all elements at the same + * time, comparing the first element is + * sufficient. + */ + template + inline + bool + operator< (const SynchronousIterators &a, + const SynchronousIterators &b) + { + return std_cxx1x::get<0>(a.iterators) < std_cxx1x::get<0>(b.iterators); + } + + + + /** + * Return the distance between the first + * and the second argument. Since the + * objects compared march forward all + * elements at the same time, + * differencing the first element is + * sufficient. + */ + template + inline + std::size_t + operator- (const SynchronousIterators &a, + const SynchronousIterators &b) + { + Assert (std::distance (std_cxx1x::get<0>(b.iterators), + std_cxx1x::get<0>(a.iterators)) >= 0, + ExcInternalError()); + return std::distance (std_cxx1x::get<0>(b.iterators), + std_cxx1x::get<0>(a.iterators)); + } + + + /** + * Advance a tuple of iterators by $n$. + */ + template + inline + void advance (std_cxx1x::tuple &t, + const unsigned int n) + { + std::advance (std_cxx1x::get<0>(t), n); + std::advance (std_cxx1x::get<1>(t), n); + } + + /** + * Advance a tuple of iterators by $n$. + */ + template + inline + void advance (std_cxx1x::tuple &t, + const unsigned int n) + { + std::advance (std_cxx1x::get<0>(t), n); + std::advance (std_cxx1x::get<1>(t), n); + std::advance (std_cxx1x::get<2>(t), n); + } + + /** + * Advance a tuple of iterators by $n$. + */ + template + inline + void advance (std_cxx1x::tuple &t, + const unsigned int n) + { + std::advance (std_cxx1x::get<0>(t), n); + std::advance (std_cxx1x::get<1>(t), n); + std::advance (std_cxx1x::get<2>(t), n); + std::advance (std_cxx1x::get<3>(t), n); + } + + + + /** + * Advance a tuple of iterators by 1. + */ + template + inline + void advance_by_one (std_cxx1x::tuple &t) + { + ++std_cxx1x::get<0>(t); + ++std_cxx1x::get<1>(t); + } + + /** + * Advance a tuple of iterators by 1. + */ + template + inline + void advance_by_one (std_cxx1x::tuple &t) + { + ++std_cxx1x::get<0>(t); + ++std_cxx1x::get<1>(t); + ++std_cxx1x::get<2>(t); + } + + /** + * Advance a tuple of iterators by 1. + */ + template + inline + void advance_by_one (std_cxx1x::tuple &t) + { + ++std_cxx1x::get<0>(t); + ++std_cxx1x::get<1>(t); + ++std_cxx1x::get<2>(t); + ++std_cxx1x::get<3>(t); + } + + + + /** + * Advance the elements of this iterator + * by $n$. + */ + template + inline + SynchronousIterators + operator + (const SynchronousIterators &a, + const std::size_t n) + { + SynchronousIterators x (a); + parallel::internal::advance (x.iterators, n); + return x; + } + + /** + * Advance the elements of this iterator + * by 1. + */ + template + inline + SynchronousIterators + operator ++ (SynchronousIterators &a) + { + parallel::internal::advance_by_one (a.iterators); + return a; + } + + + /** + * Compare synch iterators for + * inequality. Since they march in synch, + * comparing only the first element is + * sufficient. + */ + template + inline + bool + operator != (const SynchronousIterators &a, + const SynchronousIterators &b) + { + return (std_cxx1x::get<0>(a.iterators) != + std_cxx1x::get<0>(b.iterators)); + } + + + /** + * Convert a function object of type F + * into an object that can be applied to + * all elements of a range of synchronous + * iterators. + */ + template + struct Body + { + /** + * Constructor. Take and package the + * given function object. + */ + Body (const F &f) + : + f (f) + {} + + template + void + operator () (const Range &range) const + { + for (typename Range::const_iterator p=range.begin(); + p != range.end(); ++p) + apply (f, p.iterators); + } + + private: + /** + * The stored function object. + */ + const F f; + + /** + * Apply F to a set of iterators with + * two elements. + */ + template + static + void + apply (const F &f, + const std_cxx1x::tuple &p) + { + *std_cxx1x::get<1>(p) = f (*std_cxx1x::get<0>(p)); + } + + /** + * Apply F to a set of iterators with + * three elements. + */ + template + static + void + apply (const F &f, + const std_cxx1x::tuple &p) + { + *std_cxx1x::get<2>(p) = f (*std_cxx1x::get<0>(p), + *std_cxx1x::get<1>(p)); + } + + /** + * Apply F to a set of iterators with + * three elements. + */ + template + static + void + apply (const F &f, + const std_cxx1x::tuple &p) + { + *std_cxx1x::get<3>(p) = f (*std_cxx1x::get<0>(p), + *std_cxx1x::get<1>(p), + *std_cxx1x::get<2>(p)); + } + }; + + + /** + * Take a function object and create a + * Body object from it. We do this in + * this helper function since + * alternatively we would have to specify + * the actual data type of F -- which for + * function objects is often + * extraordinarily complicated. + */ + template + Body make_body(const F &f) + { + return Body(f); + } + } + + /** + * An algorithm that performs the action + * *out++ = predicate(*in++) + * where the in iterator + * ranges over the given input range. + * + * This algorithm does pretty much what + * std::transform does. The difference is + * that the function can run in parallel + * when deal.II is configured to use + * multiple threads. In that case, the last + * argument denotes the minimum number each + * thread can work on; the number must be + * large enough to amortize the startup + * cost of new threads, and small enough to + * ensure that (i) threads will be started + * at all, and (ii) threads can be + * reasonably load balanced. + * + * For a discussion of the kind of + * problems to which this function + * is applicable, see the + * @ref threads "Parallel computing with multiple processors" + * module. + */ + template + void transform (const InputIterator &begin_in, + const InputIterator &end_in, + OutputIterator out, + Predicate &predicate, + const unsigned int grainsize) + { +#if DEAL_II_USE_MT == 0 + // make sure we don't get compiler + // warnings about unused arguments + (void) grainsize; + + for (OutputIterator in = begin_in; in != end_in;) + *out++ = predicate (*in++); +#else + typedef std_cxx1x::tuple Iterators; + typedef internal::SynchronousIterators SyncIterators; + Iterators x_begin (begin_in, out); + Iterators x_end (end_in, OutputIterator()); + tbb::parallel_for (tbb::blocked_range(x_begin, + x_end, + grainsize), + internal::make_body (predicate), + tbb::auto_partitioner()); +#endif + } + + + + /** + * An algorithm that performs the action + * *out++ = predicate(*in1++, *in2++) + * where the in1 iterator + * ranges over the given input range. + * + * This algorithm does pretty much what + * std::transform does. The difference is + * that the function can run in parallel + * when deal.II is configured to use + * multiple threads. In that case, the last + * argument denotes the minimum number each + * thread can work on; the number must be + * large enough to amortize the startup + * cost of new threads, and small enough to + * ensure that (i) threads will be started + * at all, and (ii) threads can be + * reasonably load balanced. + * + * For a discussion of the kind of + * problems to which this function + * is applicable, see the + * @ref threads "Parallel computing with multiple processors" + * module. + */ + template + void transform (const InputIterator1 &begin_in1, + const InputIterator1 &end_in1, + InputIterator2 in2, + OutputIterator out, + Predicate &predicate, + const unsigned int grainsize) + { +#if DEAL_II_USE_MT == 0 + // make sure we don't get compiler + // warnings about unused arguments + (void) grainsize; + + for (OutputIterator in1 = begin_in1; in1 != end_in1;) + *out++ = predicate (*in1++, *in2++); +#else + typedef + std_cxx1x::tuple + Iterators; + typedef internal::SynchronousIterators SyncIterators; + Iterators x_begin (begin_in1, in2, out); + Iterators x_end (end_in1, InputIterator2(), OutputIterator()); + tbb::parallel_for (tbb::blocked_range(x_begin, + x_end, + grainsize), + internal::make_body (predicate), + tbb::auto_partitioner()); +#endif + } + + + + /** + * An algorithm that performs the action + * *out++ = predicate(*in1++, *in2++, *in3++) + * where the in1 iterator + * ranges over the given input range. + * + * This algorithm does pretty much what + * std::transform does. The difference is + * that the function can run in parallel + * when deal.II is configured to use + * multiple threads. In that case, the last + * argument denotes the minimum number each + * thread can work on; the number must be + * large enough to amortize the startup + * cost of new threads, and small enough to + * ensure that (i) threads will be started + * at all, and (ii) threads can be + * reasonably load balanced. + * + * For a discussion of the kind of + * problems to which this function + * is applicable, see the + * @ref threads "Parallel computing with multiple processors" + * module. + */ + template + void transform (const InputIterator1 &begin_in1, + const InputIterator1 &end_in1, + InputIterator2 in2, + InputIterator3 in3, + OutputIterator out, + Predicate &predicate, + const unsigned int grainsize) + { +#if DEAL_II_USE_MT == 0 + // make sure we don't get compiler + // warnings about unused arguments + (void) grainsize; + + for (OutputIterator in1 = begin_in1; in1 != end_in1;) + *out++ = predicate (*in1++, *in2++, *in3++); +#else + typedef + std_cxx1x::tuple + Iterators; + typedef internal::SynchronousIterators SyncIterators; + Iterators x_begin (begin_in1, in2, in3, out); + Iterators x_end (end_in1, InputIterator2(), + InputIterator3(), OutputIterator()); + tbb::parallel_for (tbb::blocked_range(x_begin, + x_end, + grainsize), + internal::make_body (predicate), + tbb::auto_partitioner()); +#endif + } + + + namespace internal + { +#if DEAL_II_USE_MT + /** + * Take a range argument and call the + * given function with its begin and end. + */ + template + void apply_to_subranges (const tbb::blocked_range &range, + const Function &f) + { + f (range.begin(), range.end()); + } +#endif + } + + + /** + * This function applies the given function + * argument @p f to all elements in the range + * [begin,end) and may do so + * in parallel. + * + * However, in many cases it is not + * efficient to call a function on each + * element, so this function calls the + * given function object on sub-ranges. In + * other words: if the given range + * [begin,end) is smaller than + * grainsize or if multithreading is not + * enabled, then we call + * f(begin,end); otherwise, we + * may execute, possibly in %parallel, a + * sequence of calls f(b,e) + * where [b,e) are + * subintervals of [begin,end) + * and the collection of calls we do to + * f(.,.) will happen on + * disjoint subintervals that collectively + * cover the original interval + * [begin,end). + * + * Oftentimes, the called function will of + * course have to get additional + * information, such as the object to work + * on for a given value of the iterator + * argument. This can be achieved by + * binding certain arguments. For + * example, here is an implementation of a + * matrix-vector multiplication $y=Ax$ for + * a full matrix $A$ and vectors $x,y$: + * @code + * void matrix_vector_product (const FullMatrix &A, + * const Vector &x, + * Vector &y) + * { + * parallel::apply_to_subranges + * (0, A.n_rows(), + * std_cxx1x::bind (&mat_vec_on_subranges, + * _1, _2, + * std_cxx1x::cref(A), + * std_cxx1x::cref(x), + * std_cxx1x::ref(y)), + * 50); + * } + * + * void mat_vec_on_subranges (const unsigned int begin_row, + * const unsigned int end_row, + * const FullMatrix &A, + * const Vector &x, + * Vector &y) + * { + * for (unsigned int row=begin_row; row!=end_row; ++row) + * for (unsigned int col=0; colstd_cxx1x::bind function to + * convert + * mat_vec_on_subranged from a + * function that takes 5 arguments to one + * taking 2 by binding the remaining + * arguments (the modifiers + * std_cxx1x::ref and + * std_cxx1x::cref make sure + * that the enclosed variables are actually + * passed by reference and constant + * reference, rather than by value). The + * resulting function object requires only + * two arguments, begin_row and end_row, + * with all other arguments fixed. + * + * The code, if in single-thread mode, will + * call mat_vec_on_subranges + * on the entire range + * [0,n_rows) exactly once. In + * multi-threaded mode, however, it may be + * called multiple times on subranges of + * this interval, possibly allowing more + * than one CPU core to take care of part + * of the work. + * + * The @p grainsize argument (50 in the + * example above) makes sure that subranges + * do not become too small, to avoid + * spending more time on scheduling + * subranges to CPU resources than on doing + * actual work. + * + * For a discussion of the kind of + * problems to which this function + * is applicable, see also the + * @ref threads "Parallel computing with multiple processors" + * module. + */ + template + void apply_to_subranges (const RangeType &begin, + const typename identity::type &end, + const Function &f, + const unsigned int grainsize) + { +#if DEAL_II_USE_MT == 0 + // make sure we don't get compiler + // warnings about unused arguments + (void) grainsize; + + f (begin, end); +#else + tbb::parallel_for (tbb::blocked_range + (begin, end, grainsize), + std_cxx1x::bind (&internal::apply_to_subranges, + _1, + std_cxx1x::cref(f)), + tbb::auto_partitioner()); +#endif + } + + + + namespace internal + { +#if DEAL_II_USE_MT == 1 + /** + * A class that conforms to the Body + * requirements of the TBB + * parallel_reduce function. The first + * template argument denotes the type on + * which the reduction is to be done. The + * second denotes the type of the + * function object that shall be called + * for each subrange. + */ + template + struct ReductionOnSubranges + { + /** + * A variable that will hold the + * result of the reduction. + */ + ResultType result; + + /** + * Constructor. Take the function + * object to call on each sub-range + * as well as the neutral element + * with respect to the reduction + * operation. + * + * The second argument denotes a + * function object that will be use + * to reduce the result of two + * computations into one number. An + * example if we want to simply + * accumulate integer results would + * be std::plus(). + */ + template + ReductionOnSubranges (const Function &f, + const Reductor &reductor, + const ResultType neutral_element = ResultType()) + : + result (neutral_element), + f (f), + neutral_element (neutral_element), + reductor (reductor) + {} + + /** + * Splitting constructor. See the TBB + * book for more details about this. + */ + ReductionOnSubranges (const ReductionOnSubranges &r, + tbb::split) + : + result (r.neutral_element), + f (r.f), + neutral_element (r.neutral_element), + reductor (r.reductor) + {} + + /** + * Join operation: merge the results + * from computations on different + * sub-intervals. + */ + void join (const ReductionOnSubranges &r) + { + result = reductor(result, r.result); + } + + /** + * Execute the given function on the + * specified range. + */ + template + void operator () (const tbb::blocked_range &range) + { + result = reductor(result, + f (range.begin(), range.end())); + } + + private: + /** + * The function object to call on + * every sub-range. + */ + const Function f; + + /** + * The neutral element with respect + * to the reduction operation. This + * is needed when calling the + * splitting constructor since we + * have to re-set the result variable + * in this case. + */ + const ResultType neutral_element; + + /** + * The function object to be used to + * reduce the result of two calls + * into one number. + */ + const std_cxx1x::function reductor; + }; +#endif + } + + + /** + * This function works a lot like the + * apply_to_subranges(), but it allows to + * accumulate numerical results computed on + * each subrange into one number. The type + * of this number is given by the + * ResultType template argument that needs + * to be explicitly specified. + * + * An example of use of this function is to + * compute the value of the expression $x^T + * A x$ for a square matrix $A$ and a + * vector $x$. The sum over rows can be + * parallelized and the whole code might + * look like this: + * @code + * void matrix_norm (const FullMatrix &A, + * const Vector &x) + * { + * return + * std::sqrt + * (parallel::accumulate_from_subranges + * (0, A.n_rows(), + * std_cxx1x::bind (&mat_norm_sqr_on_subranges, + * _1, _2, + * std_cxx1x::cref(A), + * std_cxx1x::cref(x)), + * 50); + * } + * + * double + * mat_norm_sqr_on_subranges (const unsigned int begin_row, + * const unsigned int end_row, + * const FullMatrix &A, + * const Vector &x) + * { + * double norm_sqr = 0; + * for (unsigned int row=begin_row; row!=end_row; ++row) + * for (unsigned int col=0; colmat_norm_sqr_on_subranges + * is called on the entire range + * [0,A.n_rows()) if this + * range is less than the minimum grainsize + * (above chosen as 50) or if deal.II is + * configured to not use + * multithreading. Otherwise, it may be + * called on subsets of the given range, + * with results from the individual + * subranges accumulated internally. + * + * @warning If ResultType is a floating point + * type, then accumulation is not a + * commutative operation. In other words, + * if the given function object is called + * three times on three subranges, + * returning values $a,b,c$, then the + * returned result of this function is + * $a+b+c$. However, depending on how the + * three sub-tasks are distributed on + * available CPU resources, the result may + * also be $a+c+b$ or any other + * permutation; because floating point + * addition does not commute (as oppose, of + * course, to addition of real %numbers), + * the result of invoking this function + * several times may differ on the order of + * round-off. + * + * For a discussion of the kind of + * problems to which this function + * is applicable, see also the + * @ref threads "Parallel computing with multiple processors" + * module. + */ + template + ResultType accumulate_from_subranges (const Function &f, + const RangeType &begin, + const typename identity::type &end, + const unsigned int grainsize) + { +#if DEAL_II_USE_MT == 0 + // make sure we don't get compiler + // warnings about unused arguments + (void) grainsize; + + return f(begin,end); +#else + internal::ReductionOnSubranges + reductor (f, std::plus(), 0); + tbb::parallel_reduce (tbb::blocked_range(begin, end, grainsize), + reductor, + tbb::auto_partitioner()); + return reductor.result; +#endif + } + +} + + +DEAL_II_NAMESPACE_CLOSE + +#endif diff --git a/deal.II/base/include/base/template_constraints.h b/deal.II/base/include/base/template_constraints.h index 9c040a0f69..223a8a9590 100644 --- a/deal.II/base/include/base/template_constraints.h +++ b/deal.II/base/include/base/template_constraints.h @@ -101,6 +101,65 @@ template struct constraint_and_return_value +/** + * A template class that simply exports its template argument as a local + * typedef. This class, while at first appearing useless, makes sense in the + * following context: if you have a function template as follows: + * @code + * template void f(T, T); + * @endcode + * then it can't be called in an expression like f(1, 3.141) + * because the type T of the template can not be deduced + * in a unique way from the types of the arguments. However, if the + * template is written as + * @code + * template void f(T, typename identity::type); + * @endcode + * then the call becomes valid: the type T is not deducible + * from the second argument to the function, so only the first argument + * participates in template type resolution. + * + * The context for this feature is as follows: consider + * @code + * template + * void forward_call(RT (*p) (A), A a) { p(a); } + * + * void h (double); + * + * void g() + * { + * forward_call(&h, 1); + * } + * @endcode + * This code fails to compile because the compiler can't decide whether the + * template type A should be double (from the + * signature of the function given as first argument to + * forward_call, or int because the expression + * 1 has that type. Of course, what we would like the compiler + * to do is simply cast the 1 to double. We can + * achieve this by writing the code as follows: + * @code + * template + * void forward_call(RT (*p) (A), typename identity::type a) { p(a); } + * + * void h (double); + * + * void g() + * { + * forward_call(&h, 1); + * } + * @endcode + * + * @author Wolfgang Bangerth, 2008 + */ +template +struct identity +{ + typedef T type; +}; + + + /** * A class to perform comparisons of arbitrary pointers for equality. In some * circumstances, one would like to make sure that two arguments to a function diff --git a/deal.II/base/include/base/thread_management.h b/deal.II/base/include/base/thread_management.h index 99ae5fab68..d60a00de41 100644 --- a/deal.II/base/include/base/thread_management.h +++ b/deal.II/base/include/base/thread_management.h @@ -16,6 +16,7 @@ #include #include +#include #include #include #include @@ -33,8 +34,21 @@ #include +#if DEAL_II_USE_MT == 1 +# if defined(DEAL_II_USE_MT_POSIX) +# include +# endif +# include +# include +#endif + + + DEAL_II_NAMESPACE_OPEN +/*!@addtogroup threads */ +/*@{*/ + /** * A namespace for the implementation of thread management in @@ -48,14 +62,14 @@ namespace Threads { /** * This class is used instead of a true lock class when not using - * multithreading. It allows to write programs such that they start - * new threads and/or lock objects in multithreading mode, and use - * dummy thread management and synchronisation classes instead when - * running in single-thread mode. Specifically, the spawn functions - * only call the function but wait for it to return instead of running - * in on another thread, and the mutices do nothing really. The only - * reason to provide such a function is that the program can be - * compiled both in MT and non-MT mode without difference. + * multithreading. It allows to write programs such that they start new + * threads and/or lock objects in multithreading mode, and use dummy thread + * management and synchronisation classes instead when running in + * single-thread mode. Specifically, the new_thread functions only + * call the function but wait for it to return instead of running in on + * another thread, and the mutices do nothing really. The only reason to + * provide such a function is that the program can be compiled both in MT and + * non-MT mode without difference. * * @author Wolfgang Bangerth, 2000, 2003 */ @@ -142,7 +156,7 @@ namespace Threads * programs such that they start new threads and/or lock objects in * multithreading mode, and use dummy thread management and * synchronisation classes instead when running in single-thread - * mode. Specifically, the spawn functions only call the function + * mode. Specifically, the new_thread functions only call the function * but wait for it to return instead of running in on another thread, * and the mutices do nothing really. The only reason to provide such * a function is that the program can be compiled both in MT and @@ -626,9 +640,8 @@ namespace Threads * Upon program start, this number * is one. It is increased each * time a thread is created using - * the Threads::spawn or - * Threads::spawn_n() - * functions. It is decreased once + * the Threads::new_thread + * function. It is decreased once * a thread terminates by returning * from the function that was * spawned. @@ -649,6 +662,8 @@ namespace Threads * thread), then these events are * not registered and counted for * the result of this function. + * + * @ingroup threads */ unsigned int n_existing_threads (); @@ -666,6 +681,8 @@ namespace Threads * systems seems to support * gettid, so that part of * the code is untested yet. + * + * @ingroup threads */ unsigned int this_thread_id (); @@ -689,6 +706,8 @@ namespace Threads * iterators, where each pair * denotes the range * [begin[i],end[i]). + * + * @ingroup threads */ template std::vector > @@ -704,6 +723,8 @@ namespace Threads * the difference that instead of * iterators, now values are taken * that define the whole interval. + * + * @ingroup threads */ std::vector > split_interval (const unsigned int begin, @@ -1326,6 +1347,34 @@ namespace Threads void thread_entry_point (const std_cxx1x::function function, ThreadDescriptor *descriptor) { + // create a new scheduler object + // so that we can start tasks on + // the new thread. the scheduler + // will go out of scope at the + // end of the function, which + // also coincides with the end of + // the thread + // + // one may think that the + // creation of a scheduler is + // expensive, but just creating + // one and then destroying it + // again costs about 92 + // nanoseconds on my laptop + // (early 2009), whereas thread + // creation takes about 12000 + // nanoseconds, more than 100 + // times longer. there is + // therefore no need to only + // create the scheduler when this + // is actually necessary because + // we are spawning tasks on this + // particular thread; rather, we + // make our life simpler by + // always having a scheduler + // around + tbb::task_scheduler_init scheduler; + // now call the function // in question. since an // exception that is @@ -1452,6 +1501,7 @@ namespace Threads * * @author Wolfgang Bangerth, 2003, 2009 * @ingroup threads + * @ingroup threads */ template class Thread @@ -2045,6 +2095,8 @@ namespace Threads * Overload of the spawn function for * non-member or static member * functions with no arguments. + * + * @deprecated */ template inline @@ -2059,6 +2111,8 @@ namespace Threads * Overload of the non-const spawn * function for member functions with * no arguments. + * + * @deprecated */ template inline @@ -2074,6 +2128,8 @@ namespace Threads * Overload of the spawn function for * const member functions with no * arguments. + * + * @deprecated */ template inline @@ -2094,6 +2150,8 @@ namespace Threads * Overload of the spawn function for * non-member or static member * functions with 1 argument. + * + * @deprecated */ template inline @@ -2109,6 +2167,8 @@ namespace Threads * Overload of the non-const spawn * function for member functions with * 1 argument. + * + * @deprecated */ template inline @@ -2124,6 +2184,8 @@ namespace Threads * Overload of the spawn function for * const member functions with 1 * argument. + * + * @deprecated */ template inline @@ -2142,6 +2204,8 @@ namespace Threads * Overload of the spawn function for * non-member or static member * functions with 2 arguments. + * + * @deprecated */ template inline @@ -2157,6 +2221,8 @@ namespace Threads * Overload of the non-const spawn * function for member functions with * 2 arguments. + * + * @deprecated */ template inline @@ -2172,6 +2238,8 @@ namespace Threads * Overload of the spawn function for * const member functions with 2 * arguments. + * + * @deprecated */ template inline @@ -2190,6 +2258,8 @@ namespace Threads * Overload of the spawn function for * non-member or static member * functions with 3 arguments. + * + * @deprecated */ template @@ -2206,6 +2276,8 @@ namespace Threads * Overload of the non-const spawn * function for member functions with * 3 arguments. + * + * @deprecated */ template @@ -2222,6 +2294,8 @@ namespace Threads * Overload of the spawn function for * const member functions with 3 * arguments. + * + * @deprecated */ template @@ -2242,6 +2316,8 @@ namespace Threads * Overload of the spawn function for * non-member or static member * functions with 4 arguments. + * + * @deprecated */ template @@ -2258,6 +2334,8 @@ namespace Threads * Overload of the non-const spawn * function for member functions with * 4 arguments. + * + * @deprecated */ template @@ -2274,6 +2352,8 @@ namespace Threads * Overload of the spawn function for * const member functions with 4 * arguments. + * + * @deprecated */ template @@ -2293,6 +2373,8 @@ namespace Threads * Overload of the spawn function for * non-member or static member * functions with 5 arguments. + * + * @deprecated */ template >::type> (std_cxx1x::bind(fun_ptr, std_cxx1x::cref(c), _1, _2, _3, _4, _5, _6, _7, _8, _9)); } + + + +// ----------- thread starters for functions not taking any parameters + + /** + * Overload of the new_thread function for + * objects that can be converted to + * std_cxx1x::function, i.e. anything + * that can be called like a function + * object without arguments and returning + * an object of type RT (or void). + * + * @ingroup threads + */ + template + inline + Thread + new_thread (const std_cxx1x::function &function) + { + return Thread(function); + } + + /** + * Overload of the new_thread function for + * non-member or static member + * functions with no arguments. + * + * @ingroup threads + */ + template + inline + Thread + new_thread (RT (*fun_ptr)()) + { + return Thread(fun_ptr); + } + /** + * Overload of the non-const new_thread + * function for member functions with + * no arguments. + * + * @ingroup threads + */ + template + inline + Thread + new_thread (RT (C::*fun_ptr)(), + typename identity::type &c) + { + return + Thread + (std_cxx1x::bind(fun_ptr, std_cxx1x::ref(c))); + } + +#ifndef DEAL_II_CONST_MEMBER_DEDUCTION_BUG /** - * A container for thread - * objects. Allows to add new - * thread objects and wait for them - * all together. The thread objects - * need to have the same return - * value for the called function. + * Overload of the new_thread function for + * const member functions with no + * arguments. * - * @author Wolfgang Bangerth, 2003 - */ - template - class ThreadGroup + * @ingroup threads + */ + template + inline + Thread + new_thread (RT (C::*fun_ptr)() const, + const typename identity::type &c) { - public: - /** - * Add another thread object to - * the collection. - */ - ThreadGroup & operator += (const Thread &t) - { - threads.push_back (t); - return *this; - } + return + Thread + (std_cxx1x::bind(fun_ptr, std_cxx1x::cref(c))); + } +#endif + + + +// ----------- thread starters for unary functions + + /** + * Overload of the new_thread function for + * non-member or static member + * functions with 1 argument. + * + * @ingroup threads + */ + template + inline + Thread + new_thread (RT (*fun_ptr)(Arg1), + typename identity::type arg1) + { + return + Thread + (std_cxx1x::bind(fun_ptr, + internal::maybe_make_ref::act(arg1))); + } + + + + /** + * Overload of the non-const new_thread + * function for member functions with + * 1 argument. + * + * @ingroup threads + */ + template + inline + Thread + new_thread (RT (C::*fun_ptr)(Arg1), + typename identity::type &c, + typename identity::type arg1) + { + return + Thread + (std_cxx1x::bind(fun_ptr, std_cxx1x::ref(c), + internal::maybe_make_ref::act(arg1))); + } + +#ifndef DEAL_II_CONST_MEMBER_DEDUCTION_BUG + /** + * Overload of the new_thread function for + * const member functions with 1 + * argument. + * + * @ingroup threads + */ + template + inline + Thread + new_thread (RT (C::*fun_ptr)(Arg1) const, + typename identity::type &c, + typename identity::type arg1) + { + return + Thread + (std_cxx1x::bind(fun_ptr, std_cxx1x::cref(c), + internal::maybe_make_ref::act(arg1))); + } +#endif + +// ----------- thread starters for binary functions + + /** + * Overload of the new_thread function for + * non-member or static member + * functions with 2 arguments. + * + * @ingroup threads + */ + template + inline + Thread + new_thread (RT (*fun_ptr)(Arg1,Arg2), + typename identity::type arg1, + typename identity::type arg2) + { + return + Thread + (std_cxx1x::bind(fun_ptr, + internal::maybe_make_ref::act(arg1), + internal::maybe_make_ref::act(arg2))); + } + - /** - * Wait for all threads in the - * collection to finish. It is - * not a problem if some of - * them have already been - * waited for, i.e. you may - * call this function more than - * once, and you can also add - * new thread objects between - * subsequent calls to this - * function if you want. - */ - void join_all () const { - for (typename std::list >::const_iterator - t=threads.begin(); t!=threads.end(); ++t) - t->join (); - } - - private: - /** - * List of thread objects. - */ - std::list > threads; - }; - - -} // end of implementation of namespace Threads + /** + * Overload of the non-const new_thread + * function for member functions with + * 2 arguments. + * + * @ingroup threads + */ + template + inline + Thread + new_thread (RT (C::*fun_ptr)(Arg1,Arg2), + typename identity::type &c, + typename identity::type arg1, + typename identity::type arg2) + { + return + Thread + (std_cxx1x::bind(fun_ptr, std_cxx1x::ref(c), + internal::maybe_make_ref::act(arg1), + internal::maybe_make_ref::act(arg2))); + } + +#ifndef DEAL_II_CONST_MEMBER_DEDUCTION_BUG + /** + * Overload of the new_thread function for + * const member functions with 2 + * arguments. + * + * @ingroup threads + */ + template + inline + Thread + new_thread (RT (C::*fun_ptr)(Arg1,Arg2) const, + typename identity::type &c, + typename identity::type arg1, + typename identity::type arg2) + { + return + Thread + (std_cxx1x::bind(fun_ptr, std_cxx1x::cref(c), + internal::maybe_make_ref::act(arg1), + internal::maybe_make_ref::act(arg2))); + } +#endif + +// ----------- thread starters for ternary functions + + /** + * Overload of the new_thread function for + * non-member or static member + * functions with 3 arguments. + * + * @ingroup threads + */ + template + inline + Thread + new_thread (RT (*fun_ptr)(Arg1,Arg2,Arg3), + typename identity::type arg1, + typename identity::type arg2, + typename identity::type arg3) + { + return + Thread + (std_cxx1x::bind(fun_ptr, + internal::maybe_make_ref::act(arg1), + internal::maybe_make_ref::act(arg2), + internal::maybe_make_ref::act(arg3))); + } + + + + /** + * Overload of the non-const new_thread + * function for member functions with + * 3 arguments. + * + * @ingroup threads + */ + template + inline + Thread + new_thread (RT (C::*fun_ptr)(Arg1,Arg2,Arg3), + typename identity::type &c, + typename identity::type arg1, + typename identity::type arg2, + typename identity::type arg3) + { + return + Thread + (std_cxx1x::bind(fun_ptr, std_cxx1x::ref(c), + internal::maybe_make_ref::act(arg1), + internal::maybe_make_ref::act(arg2), + internal::maybe_make_ref::act(arg3))); + } + +#ifndef DEAL_II_CONST_MEMBER_DEDUCTION_BUG + /** + * Overload of the new_thread function for + * const member functions with 3 + * arguments. + * + * @ingroup threads + */ + template + inline + Thread + new_thread (RT (C::*fun_ptr)(Arg1,Arg2,Arg3) const, + typename identity::type &c, + typename identity::type arg1, + typename identity::type arg2, + typename identity::type arg3) + { + return + Thread + (std_cxx1x::bind(fun_ptr, std_cxx1x::cref(c), + internal::maybe_make_ref::act(arg1), + internal::maybe_make_ref::act(arg2), + internal::maybe_make_ref::act(arg3))); + } +#endif + + +// ----------- thread starters for functions with 4 arguments + + /** + * Overload of the new_thread function for + * non-member or static member + * functions with 4 arguments. + * + * @ingroup threads + */ + template + inline + Thread + new_thread (RT (*fun_ptr)(Arg1,Arg2,Arg3,Arg4), + typename identity::type arg1, + typename identity::type arg2, + typename identity::type arg3, + typename identity::type arg4) + { + return + Thread + (std_cxx1x::bind(fun_ptr, + internal::maybe_make_ref::act(arg1), + internal::maybe_make_ref::act(arg2), + internal::maybe_make_ref::act(arg3), + internal::maybe_make_ref::act(arg4))); + } + + + + /** + * Overload of the non-const new_thread + * function for member functions with + * 4 arguments. + * + * @ingroup threads + */ + template + inline + Thread + new_thread (RT (C::*fun_ptr)(Arg1,Arg2,Arg3,Arg4), + typename identity::type &c, + typename identity::type arg1, + typename identity::type arg2, + typename identity::type arg3, + typename identity::type arg4) + { + return + Thread + (std_cxx1x::bind(fun_ptr, std_cxx1x::ref(c), + internal::maybe_make_ref::act(arg1), + internal::maybe_make_ref::act(arg2), + internal::maybe_make_ref::act(arg3), + internal::maybe_make_ref::act(arg4))); + } + +#ifndef DEAL_II_CONST_MEMBER_DEDUCTION_BUG + /** + * Overload of the new_thread function for + * const member functions with 4 + * arguments. + * + * @ingroup threads + */ + template + inline + Thread + new_thread (RT (C::*fun_ptr)(Arg1,Arg2,Arg3,Arg4) const, + typename identity::type &c, + typename identity::type arg1, + typename identity::type arg2, + typename identity::type arg3, + typename identity::type arg4) + { + return + Thread + (std_cxx1x::bind(fun_ptr, std_cxx1x::cref(c), + internal::maybe_make_ref::act(arg1), + internal::maybe_make_ref::act(arg2), + internal::maybe_make_ref::act(arg3), + internal::maybe_make_ref::act(arg4))); + } +#endif + +// ----------- thread starters for functions with 5 arguments + + /** + * Overload of the new_thread function for + * non-member or static member + * functions with 5 arguments. + * + * @ingroup threads + */ + template + inline + Thread + new_thread (RT (*fun_ptr)(Arg1,Arg2,Arg3,Arg4,Arg5), + typename identity::type arg1, + typename identity::type arg2, + typename identity::type arg3, + typename identity::type arg4, + typename identity::type arg5) + { + return + Thread + (std_cxx1x::bind(fun_ptr, + internal::maybe_make_ref::act(arg1), + internal::maybe_make_ref::act(arg2), + internal::maybe_make_ref::act(arg3), + internal::maybe_make_ref::act(arg4), + internal::maybe_make_ref::act(arg5))); + } + + + + /** + * Overload of the non-const new_thread + * function for member functions with + * 5 arguments. + * + * @ingroup threads + */ + template + inline + Thread + new_thread (RT (C::*fun_ptr)(Arg1,Arg2,Arg3,Arg4,Arg5), + typename identity::type &c, + typename identity::type arg1, + typename identity::type arg2, + typename identity::type arg3, + typename identity::type arg4, + typename identity::type arg5) + { + return + Thread + (std_cxx1x::bind(fun_ptr, std_cxx1x::ref(c), + internal::maybe_make_ref::act(arg1), + internal::maybe_make_ref::act(arg2), + internal::maybe_make_ref::act(arg3), + internal::maybe_make_ref::act(arg4), + internal::maybe_make_ref::act(arg5))); + } + +#ifndef DEAL_II_CONST_MEMBER_DEDUCTION_BUG + /** + * Overload of the new_thread function for + * const member functions with 5 + * arguments. + * + * @ingroup threads + */ + template + inline + Thread + new_thread (RT (C::*fun_ptr)(Arg1,Arg2,Arg3,Arg4,Arg5) const, + typename identity::type &c, + typename identity::type arg1, + typename identity::type arg2, + typename identity::type arg3, + typename identity::type arg4, + typename identity::type arg5) + { + return + Thread + (std_cxx1x::bind(fun_ptr, std_cxx1x::cref(c), + internal::maybe_make_ref::act(arg1), + internal::maybe_make_ref::act(arg2), + internal::maybe_make_ref::act(arg3), + internal::maybe_make_ref::act(arg4), + internal::maybe_make_ref::act(arg5))); + } +#endif + +// ----------- thread starters for functions with 6 arguments + + /** + * Overload of the new_thread function for + * non-member or static member + * functions with 6 arguments. + * + * @ingroup threads + */ + template + inline + Thread + new_thread (RT (*fun_ptr)(Arg1,Arg2,Arg3,Arg4,Arg5,Arg6), + typename identity::type arg1, + typename identity::type arg2, + typename identity::type arg3, + typename identity::type arg4, + typename identity::type arg5, + typename identity::type arg6) + { + return + Thread + (std_cxx1x::bind(fun_ptr, + internal::maybe_make_ref::act(arg1), + internal::maybe_make_ref::act(arg2), + internal::maybe_make_ref::act(arg3), + internal::maybe_make_ref::act(arg4), + internal::maybe_make_ref::act(arg5), + internal::maybe_make_ref::act(arg6))); + } + + + + /** + * Overload of the non-const new_thread + * function for member functions with + * 6 arguments. + * + * @ingroup threads + */ + template + inline + Thread + new_thread (RT (C::*fun_ptr)(Arg1,Arg2,Arg3,Arg4,Arg5,Arg6), + typename identity::type &c, + typename identity::type arg1, + typename identity::type arg2, + typename identity::type arg3, + typename identity::type arg4, + typename identity::type arg5, + typename identity::type arg6) + { + return + Thread + (std_cxx1x::bind(fun_ptr, std_cxx1x::ref(c), + internal::maybe_make_ref::act(arg1), + internal::maybe_make_ref::act(arg2), + internal::maybe_make_ref::act(arg3), + internal::maybe_make_ref::act(arg4), + internal::maybe_make_ref::act(arg5), + internal::maybe_make_ref::act(arg6))); + } + +#ifndef DEAL_II_CONST_MEMBER_DEDUCTION_BUG + /** + * Overload of the new_thread function for + * const member functions with 6 + * arguments. + * + * @ingroup threads + */ + template + inline + Thread + new_thread (RT (C::*fun_ptr)(Arg1,Arg2,Arg3,Arg4,Arg5,Arg6) const, + typename identity::type &c, + typename identity::type arg1, + typename identity::type arg2, + typename identity::type arg3, + typename identity::type arg4, + typename identity::type arg5, + typename identity::type arg6) + { + return + Thread + (std_cxx1x::bind(fun_ptr, std_cxx1x::cref(c), + internal::maybe_make_ref::act(arg1), + internal::maybe_make_ref::act(arg2), + internal::maybe_make_ref::act(arg3), + internal::maybe_make_ref::act(arg4), + internal::maybe_make_ref::act(arg5), + internal::maybe_make_ref::act(arg6))); + } +#endif + +// ----------- thread starters for functions with 7 arguments + + /** + * Overload of the new_thread function for + * non-member or static member + * functions with 7 arguments. + * + * @ingroup threads + */ + template + inline + Thread + new_thread (RT (*fun_ptr)(Arg1,Arg2,Arg3,Arg4,Arg5,Arg6,Arg7), + typename identity::type arg1, + typename identity::type arg2, + typename identity::type arg3, + typename identity::type arg4, + typename identity::type arg5, + typename identity::type arg6, + typename identity::type arg7) + { + return + Thread + (std_cxx1x::bind(fun_ptr, + internal::maybe_make_ref::act(arg1), + internal::maybe_make_ref::act(arg2), + internal::maybe_make_ref::act(arg3), + internal::maybe_make_ref::act(arg4), + internal::maybe_make_ref::act(arg5), + internal::maybe_make_ref::act(arg6), + internal::maybe_make_ref::act(arg7))); + } + + + + /** + * Overload of the non-const new_thread + * function for member functions with + * 7 arguments. + * + * @ingroup threads + */ + template + inline + Thread + new_thread (RT (C::*fun_ptr)(Arg1,Arg2,Arg3,Arg4,Arg5,Arg6,Arg7), + typename identity::type &c, + typename identity::type arg1, + typename identity::type arg2, + typename identity::type arg3, + typename identity::type arg4, + typename identity::type arg5, + typename identity::type arg6, + typename identity::type arg7) + { + return + Thread + (std_cxx1x::bind(fun_ptr, std_cxx1x::ref(c), + internal::maybe_make_ref::act(arg1), + internal::maybe_make_ref::act(arg2), + internal::maybe_make_ref::act(arg3), + internal::maybe_make_ref::act(arg4), + internal::maybe_make_ref::act(arg5), + internal::maybe_make_ref::act(arg6), + internal::maybe_make_ref::act(arg7))); + } + +#ifndef DEAL_II_CONST_MEMBER_DEDUCTION_BUG + /** + * Overload of the new_thread function for + * const member functions with 7 + * arguments. + * + * @ingroup threads + */ + template + inline + Thread + new_thread (RT (C::*fun_ptr)(Arg1,Arg2,Arg3,Arg4,Arg5,Arg6,Arg7) const, + typename identity::type &c, + typename identity::type arg1, + typename identity::type arg2, + typename identity::type arg3, + typename identity::type arg4, + typename identity::type arg5, + typename identity::type arg6, + typename identity::type arg7) + { + return + Thread + (std_cxx1x::bind(fun_ptr, std_cxx1x::cref(c), + internal::maybe_make_ref::act(arg1), + internal::maybe_make_ref::act(arg2), + internal::maybe_make_ref::act(arg3), + internal::maybe_make_ref::act(arg4), + internal::maybe_make_ref::act(arg5), + internal::maybe_make_ref::act(arg6), + internal::maybe_make_ref::act(arg7))); + } +#endif + +// ----------- thread starters for functions with 8 arguments + + /** + * Overload of the new_thread function for + * non-member or static member + * functions with 8 arguments. + * + * @ingroup threads + */ + template + inline + Thread + new_thread (RT (*fun_ptr)(Arg1,Arg2,Arg3,Arg4,Arg5, + Arg6,Arg7,Arg8), + typename identity::type arg1, + typename identity::type arg2, + typename identity::type arg3, + typename identity::type arg4, + typename identity::type arg5, + typename identity::type arg6, + typename identity::type arg7, + typename identity::type arg8) + { + return + Thread + (std_cxx1x::bind(fun_ptr, + internal::maybe_make_ref::act(arg1), + internal::maybe_make_ref::act(arg2), + internal::maybe_make_ref::act(arg3), + internal::maybe_make_ref::act(arg4), + internal::maybe_make_ref::act(arg5), + internal::maybe_make_ref::act(arg6), + internal::maybe_make_ref::act(arg7), + internal::maybe_make_ref::act(arg8))); + } + + + + /** + * Overload of the non-const new_thread + * function for member functions with + * 8 arguments. + * + * @ingroup threads + */ + template + inline + Thread + new_thread (RT (C::*fun_ptr)(Arg1,Arg2,Arg3,Arg4,Arg5, + Arg6,Arg7,Arg8), + typename identity::type &c, + typename identity::type arg1, + typename identity::type arg2, + typename identity::type arg3, + typename identity::type arg4, + typename identity::type arg5, + typename identity::type arg6, + typename identity::type arg7, + typename identity::type arg8) + { + return + Thread + (std_cxx1x::bind(fun_ptr, std_cxx1x::ref(c), + internal::maybe_make_ref::act(arg1), + internal::maybe_make_ref::act(arg2), + internal::maybe_make_ref::act(arg3), + internal::maybe_make_ref::act(arg4), + internal::maybe_make_ref::act(arg5), + internal::maybe_make_ref::act(arg6), + internal::maybe_make_ref::act(arg7), + internal::maybe_make_ref::act(arg8))); + } + +#ifndef DEAL_II_CONST_MEMBER_DEDUCTION_BUG + /** + * Overload of the new_thread function for + * const member functions with 8 + * arguments. + * + * @ingroup threads + */ + template + inline + Thread + new_thread (RT (C::*fun_ptr)(Arg1,Arg2,Arg3,Arg4,Arg5, + Arg6,Arg7,Arg8) const, + typename identity::type &c, + typename identity::type arg1, + typename identity::type arg2, + typename identity::type arg3, + typename identity::type arg4, + typename identity::type arg5, + typename identity::type arg6, + typename identity::type arg7, + typename identity::type arg8) + { + return + Thread + (std_cxx1x::bind(fun_ptr, std_cxx1x::cref(c), + internal::maybe_make_ref::act(arg1), + internal::maybe_make_ref::act(arg2), + internal::maybe_make_ref::act(arg3), + internal::maybe_make_ref::act(arg4), + internal::maybe_make_ref::act(arg5), + internal::maybe_make_ref::act(arg6), + internal::maybe_make_ref::act(arg7), + internal::maybe_make_ref::act(arg8))); + } +#endif + +// ----------- thread starters for functions with 9 arguments + + /** + * Overload of the new_thread function for + * non-member or static member + * functions with 9 arguments. + * + * @ingroup threads + */ + template + inline + Thread + new_thread (RT (*fun_ptr)(Arg1,Arg2,Arg3,Arg4,Arg5, + Arg6,Arg7,Arg8,Arg9), + typename identity::type arg1, + typename identity::type arg2, + typename identity::type arg3, + typename identity::type arg4, + typename identity::type arg5, + typename identity::type arg6, + typename identity::type arg7, + typename identity::type arg8, + typename identity::type arg9) + { + return + Thread + (std_cxx1x::bind(fun_ptr, + internal::maybe_make_ref::act(arg1), + internal::maybe_make_ref::act(arg2), + internal::maybe_make_ref::act(arg3), + internal::maybe_make_ref::act(arg4), + internal::maybe_make_ref::act(arg5), + internal::maybe_make_ref::act(arg6), + internal::maybe_make_ref::act(arg7), + internal::maybe_make_ref::act(arg8), + internal::maybe_make_ref::act(arg9))); + } + + + + /** + * Overload of the non-const new_thread + * function for member functions with + * 9 arguments. + * + * @ingroup threads + */ + template + inline + Thread + new_thread (RT (C::*fun_ptr)(Arg1,Arg2,Arg3,Arg4,Arg5, + Arg6,Arg7,Arg8,Arg9), + typename identity::type &c, + typename identity::type arg1, + typename identity::type arg2, + typename identity::type arg3, + typename identity::type arg4, + typename identity::type arg5, + typename identity::type arg6, + typename identity::type arg7, + typename identity::type arg8, + typename identity::type arg9) + { + return + Thread + (std_cxx1x::bind(fun_ptr, std_cxx1x::ref(c), + internal::maybe_make_ref::act(arg1), + internal::maybe_make_ref::act(arg2), + internal::maybe_make_ref::act(arg3), + internal::maybe_make_ref::act(arg4), + internal::maybe_make_ref::act(arg5), + internal::maybe_make_ref::act(arg6), + internal::maybe_make_ref::act(arg7), + internal::maybe_make_ref::act(arg8), + internal::maybe_make_ref::act(arg9))); + } + +#ifndef DEAL_II_CONST_MEMBER_DEDUCTION_BUG + /** + * Overload of the new_thread function for + * const member functions with 9 + * arguments. + * + * @ingroup threads + */ + template + inline + Thread + new_thread (RT (C::*fun_ptr)(Arg1,Arg2,Arg3,Arg4,Arg5, + Arg6,Arg7,Arg8,Arg9) const, + typename identity::type &c, + typename identity::type arg1, + typename identity::type arg2, + typename identity::type arg3, + typename identity::type arg4, + typename identity::type arg5, + typename identity::type arg6, + typename identity::type arg7, + typename identity::type arg8, + typename identity::type arg9) + { + return + Thread + (std_cxx1x::bind(fun_ptr, std_cxx1x::cref(c), + internal::maybe_make_ref::act(arg1), + internal::maybe_make_ref::act(arg2), + internal::maybe_make_ref::act(arg3), + internal::maybe_make_ref::act(arg4), + internal::maybe_make_ref::act(arg5), + internal::maybe_make_ref::act(arg6), + internal::maybe_make_ref::act(arg7), + internal::maybe_make_ref::act(arg8), + internal::maybe_make_ref::act(arg9))); + } +#endif + +// ------------------------ ThreadGroup ------------------------------------- + + /** + * A container for thread + * objects. Allows to add new + * thread objects and wait for them + * all together. The thread objects + * need to have the same return + * value for the called function. + * + * @author Wolfgang Bangerth, 2003 + * @ingroup threads + */ + template + class ThreadGroup + { + public: + /** + * Add another thread object to + * the collection. + */ + ThreadGroup & operator += (const Thread &t) + { + threads.push_back (t); + return *this; + } + + /** + * Wait for all threads in the + * collection to finish. It is + * not a problem if some of + * them have already been + * waited for, i.e. you may + * call this function more than + * once, and you can also add + * new thread objects between + * subsequent calls to this + * function if you want. + */ + void join_all () const { + for (typename std::list >::const_iterator + t=threads.begin(); t!=threads.end(); ++t) + t->join (); + } + + private: + /** + * List of thread objects. + */ + std::list > threads; + }; + + + template class Task; + + + namespace internal + { +#if (DEAL_II_USE_MT == 1) + + template class TaskDescriptor; + + + /** + * The task class for TBB that is + * used by the TaskDescriptor + * class. + */ + template + struct TaskEntryPoint : public tbb::task + { + TaskEntryPoint (TaskDescriptor &task_descriptor) + : + task_descriptor (task_descriptor) + {} + + virtual tbb::task * execute () + { + // call the function object + // and put the return value + // into the proper place + call (task_descriptor.function, task_descriptor.ret_val); + + // indicate that the task + // has finished, both + // through the flag and + // through the mutex that + // was locked before we + // started and that now + // needs to be + // released. this may + // also wake up all + // threads that may be + // waiting for the task's + // demise by blocking on + // completion_mutex.acquire() + // in + // TaskDescriptor::join(). + task_descriptor.task_is_done = true; + task_descriptor.completion_mutex.release (); + + return 0; + } + + /** + * A reference to the descriptor + * object of this task. + */ + TaskDescriptor &task_descriptor; + }; + + /** + * @internal + * Base class describing a + * task. This is the basic + * class abstracting the + * Threading Building Blocks + * implementation of tasks. + * It provides a mechanism + * to start a new task, as well + * as for joining it. + * + * Internally, the way things are + * implemented is that all Task<> + * objects keep a shared pointer + * to the task descriptor. When + * the last Task<> object goes + * out of scope, the destructor + * of the descriptor is + * called. Since tasks can not be + * abandoned, the destructor + * makes sure that the task is + * finished before it can + * continue to destroy the + * object. + * + * Note that unlike threads, + * tasks are not always started + * right away, and so the + * starting thread can't rely on + * the fact that the started task + * can copy things off the + * spawning thread's stack + * frame. As a consequence, the + * task description needs to + * include a way to store the + * function and its arguments + * that shall be run on the task. + * + * @author Wolfgang Bangerth, 2009 + */ + template + struct TaskDescriptor + { + private: + /** + * The function and its arguments + * that are to be run on the task. + */ + std_cxx1x::function function; + + /** + * Variable holding the data the TBB + * needs to work with a task. Set by + * the queue_up_task() function. Note + * that the object behind this + * pointer will be deleted upon + * termination of the task, so we do + * not have to do so ourselves. In + * particular, if all objects with + * pointers to this task_description + * object go out of scope then no + * action is needed on our behalf. + */ + tbb::task *task; + + /** + * A place where the task will + * deposit its return value. + */ + return_value ret_val; + + /** + * A flag indicating whether the task + * has terminated. + */ + bool task_is_done; + + /** + * Mutex used to indicate + * when the task is done. It + * is locked before the task + * is spawned; the join() + * function tries to acquire + * it, but that will fail + * unless the task has + * unlocked it, which it does + * upon completion. + */ + mutable ThreadMutex completion_mutex; + + public: + + /** + * Constructor. Take the function to + * be run on this task as argument. + */ + TaskDescriptor (const std_cxx1x::function &function); + + /** + * Default + * constructor. Throws an + * exception since we want to + * queue a task immediately + * upon construction of these + * objects to make sure that + * each TaskDescriptor object + * corresponds to exactly one + * task. + */ + TaskDescriptor (); + + /** + * Copy constructor. Throws + * an exception since we want + * to make sure that each + * TaskDescriptor object + * corresponds to exactly one + * task. + */ + TaskDescriptor (const TaskDescriptor &); + + /** + * Destructor. + */ + ~TaskDescriptor (); + + /** + * Queue up the task to the + * scheduler. We need to do + * this in a separate + * function since we the new + * tasks needs to access + * objects from the current + * object and that can only + * reliably happen if the + * current object is + * completely constructed + * already. + */ + void queue_task (); + + /** + * Join a task, i.e. wait + * for it to finish. This + * function can safely be + * called from different + * threads at the same time, + * and can also be called + * more than once. + */ + void join (); + + + template friend struct TaskEntryPoint; + template friend struct Task; + }; + + + + template + inline + TaskDescriptor::TaskDescriptor (const std_cxx1x::function &function) + : + function (function), + task_is_done (false) + {} + + + template + inline + void + TaskDescriptor::queue_task () + { + // lock the mutex. it will + // become unlocked when the + // task is done + completion_mutex.acquire (); + + // use the pattern described in + // the TBB book on pages + // 230/231 ("Start a large task + // in parallel with the main + // program) + task = new (tbb::task::allocate_root()) tbb::empty_task; + task->set_ref_count (2); + + tbb::task *worker = new (task->allocate_child()) TaskEntryPoint(*this); + task->spawn (*worker); + } + + + + template + TaskDescriptor::TaskDescriptor () + { + Assert (false, ExcInternalError()); + } + + + + template + TaskDescriptor::TaskDescriptor (const TaskDescriptor &) + { + Assert (false, ExcInternalError()); + } + + + + template + inline + TaskDescriptor::~TaskDescriptor () + { + // wait for the task to + // complete for sure + join (); + + // now destroy the empty task + // structure. the book + // recommends to spawn it as + // well and let the scheduler + // destroy the object when + // done, but this has the + // disadvantage that the + // scheduler may not get to + // actually finishing the task + // before it goes out of scope + // (at the end of the program, + // or if a thread is done on + // which it was run) and then + // we would get a + // hard-to-decipher warning + // about unfinished tasks when + // the scheduler "goes out of + // the arena". rather, let's + // explicitly destroy the empty + // task object + Assert (task != 0, ExcInternalError()); + task->wait_for_all (); + task->destroy (*task); + } + + + template + inline + void + TaskDescriptor::join () + { + // use Schmidt's double checking + // pattern: if thread has already + // indicated that it is done, then + // return immediately + if (task_is_done) + return; + + // acquire the lock; this can + // only succeed when the task + // is done + completion_mutex.acquire (); + + // release it again; at this + // point the task must have + // finished + completion_mutex.release (); + Assert (task_is_done == true, ExcInternalError()); + } + + + +#else // no threading enabled + + /** + * A way to describe tasks. Since + * we are in non-MT mode at this + * place, things are a lot + * simpler than in MT mode. + */ + template + struct TaskDescriptor + { + /** + * A place where the task will + * deposit its return value. + */ + return_value ret_val; + + /** + * Constructor. Call the + * given function and emplace + * the return value into the + * slot reserved for this + * purpose. + */ + TaskDescriptor (const std_cxx1x::function &function) + { + call (function, ret_val); + } + /** + * Wait for the task to + * return. Since we are in + * non-MT mode here, there is + * nothing to do. + */ + static void join () {} + + /** + * Run the task. Since we are + * here in non-MT mode, there + * is nothing to do that the + * constructor hasn't already + * done. + */ + static void queue_task () {} + }; + +#endif + + } + + + template + class Task + { + /** + * Construct a task object + * given a function object to + * execute on the task. + */ + public: + Task (const std_cxx1x::function &function_object) + { + // create a task descriptor and tell it + // to queue itself up with the scheduling + // system + task_descriptor = + std_cxx1x::shared_ptr > + (new internal::TaskDescriptor(function_object)); + task_descriptor->queue_task (); + } + + /** + * Default constructor. You + * can't do much with a task + * object constructed this way, + * except for assigning it a + * task object that holds + * data created by the + * spawn functions. + */ + Task () {} + + /** + * Join the task represented + * by this object, i.e. wait + * for it to finish. You can't + * call this function if you + * have used the default + * constructor of this class + * and have not assigned a + * task object to it. + */ + void join () const + { + AssertThrow (task_descriptor != 0, ExcNoTask()); + task_descriptor->join (); + } + + /** + * Get the return value of the + * function of the + * task. Since this is only + * available once the task + * finishes, this implicitely + * also calls join(). + */ + RT return_value () + { + join (); + return task_descriptor->ret_val.get(); + } + + + /** + * Check for equality of task + * objects. Since objects of + * this class store an implicit + * pointer to an object that + * exists exactly once for each + * task, the check is simply + * to compare these pointers. + */ + bool operator == (const Task &t) + { + AssertThrow (task_descriptor != 0, ExcNoTask()); + return task_descriptor == t.task_descriptor; + } + + /** @addtogroup Exceptions + * @{ */ + + /** + * Exception + */ + DeclException0 (ExcNoTask); + //@} + private: + /** + * Shared pointer to the object + * representing the task. Boost's + * shared pointer implementation will + * make sure that that object lives as + * long as there is at least one + * subscriber to it. + */ + std_cxx1x::shared_ptr > task_descriptor; + }; + + + + /** + * Overload of the new_task function for + * objects that can be converted to + * std_cxx1x::function, i.e. anything + * that can be called like a function + * object without arguments and returning + * an object of type RT (or void). + * + * @ingroup threads + */ + template + inline + Task + new_task (const std_cxx1x::function &function) + { + return Task(function); + } + + /** + * Overload of the new_task function for + * non-member or static member + * functions with no arguments. + * + * @ingroup threads + */ + template + inline + Task + new_task (RT (*fun_ptr)()) + { + return new_task(std_cxx1x::function(fun_ptr)); + } + + + /** + * Overload of the non-const new_task + * function for member functions with + * no arguments. + * + * @ingroup threads + */ + template + inline + Task + new_task (RT (C::*fun_ptr)(), + typename identity::type &c) + { + return + new_task + (std_cxx1x::bind(fun_ptr, std_cxx1x::ref(c))); + } + +#ifndef DEAL_II_CONST_MEMBER_DEDUCTION_BUG + /** + * Overload of the new_task function for + * const member functions with no + * arguments. + * + * @ingroup threads + */ + template + inline + Task + new_task (RT (C::*fun_ptr)() const, + const typename identity::type &c) + { + return + new_task + (std_cxx1x::bind(fun_ptr, std_cxx1x::cref(c))); + } +#endif + + + +// ----------- thread starters for unary functions + + /** + * Overload of the new_task function for + * non-member or static member + * functions with 1 argument. + * + * @ingroup threads + */ + template + inline + Task + new_task (RT (*fun_ptr)(Arg1), + typename identity::type arg1) + { + return + new_task + (std_cxx1x::bind(fun_ptr, + internal::maybe_make_ref::act(arg1))); + } + + + + /** + * Overload of the non-const new_task + * function for member functions with + * 1 argument. + * + * @ingroup threads + */ + template + inline + Task + new_task (RT (C::*fun_ptr)(Arg1), + typename identity::type &c, + typename identity::type arg1) + { + return + new_task + (std_cxx1x::bind(fun_ptr, std_cxx1x::ref(c), + internal::maybe_make_ref::act(arg1))); + } + +#ifndef DEAL_II_CONST_MEMBER_DEDUCTION_BUG + /** + * Overload of the new_task function for + * const member functions with 1 + * argument. + * + * @ingroup threads + */ + template + inline + Task + new_task (RT (C::*fun_ptr)(Arg1) const, + typename identity::type &c, + typename identity::type arg1) + { + return + new_task + (std_cxx1x::bind(fun_ptr, std_cxx1x::cref(c), + internal::maybe_make_ref::act(arg1))); + } +#endif + +// ----------- thread starters for binary functions + + /** + * Overload of the new_task function for + * non-member or static member + * functions with 2 arguments. + * + * @ingroup threads + */ + template + inline + Task + new_task (RT (*fun_ptr)(Arg1,Arg2), + typename identity::type arg1, + typename identity::type arg2) + { + return + new_task + (std_cxx1x::bind(fun_ptr, + internal::maybe_make_ref::act(arg1), + internal::maybe_make_ref::act(arg2))); + } + + + + /** + * Overload of the non-const new_task + * function for member functions with + * 2 arguments. + * + * @ingroup threads + */ + template + inline + Task + new_task (RT (C::*fun_ptr)(Arg1,Arg2), + typename identity::type &c, + typename identity::type arg1, + typename identity::type arg2) + { + return + new_task + (std_cxx1x::bind(fun_ptr, std_cxx1x::ref(c), + internal::maybe_make_ref::act(arg1), + internal::maybe_make_ref::act(arg2))); + } + +#ifndef DEAL_II_CONST_MEMBER_DEDUCTION_BUG + /** + * Overload of the new_task function for + * const member functions with 2 + * arguments. + * + * @ingroup threads + */ + template + inline + Task + new_task (RT (C::*fun_ptr)(Arg1,Arg2) const, + typename identity::type &c, + typename identity::type arg1, + typename identity::type arg2) + { + return + new_task + (std_cxx1x::bind(fun_ptr, std_cxx1x::cref(c), + internal::maybe_make_ref::act(arg1), + internal::maybe_make_ref::act(arg2))); + } +#endif + +// ----------- thread starters for ternary functions + + /** + * Overload of the new_task function for + * non-member or static member + * functions with 3 arguments. + * + * @ingroup threads + */ + template + inline + Task + new_task (RT (*fun_ptr)(Arg1,Arg2,Arg3), + typename identity::type arg1, + typename identity::type arg2, + typename identity::type arg3) + { + return + new_task + (std_cxx1x::bind(fun_ptr, + internal::maybe_make_ref::act(arg1), + internal::maybe_make_ref::act(arg2), + internal::maybe_make_ref::act(arg3))); + } + + + + /** + * Overload of the non-const new_task + * function for member functions with + * 3 arguments. + * + * @ingroup threads + */ + template + inline + Task + new_task (RT (C::*fun_ptr)(Arg1,Arg2,Arg3), + typename identity::type &c, + typename identity::type arg1, + typename identity::type arg2, + typename identity::type arg3) + { + return + new_task + (std_cxx1x::bind(fun_ptr, std_cxx1x::ref(c), + internal::maybe_make_ref::act(arg1), + internal::maybe_make_ref::act(arg2), + internal::maybe_make_ref::act(arg3))); + } + +#ifndef DEAL_II_CONST_MEMBER_DEDUCTION_BUG + /** + * Overload of the new_task function for + * const member functions with 3 + * arguments. + * + * @ingroup threads + */ + template + inline + Task + new_task (RT (C::*fun_ptr)(Arg1,Arg2,Arg3) const, + typename identity::type &c, + typename identity::type arg1, + typename identity::type arg2, + typename identity::type arg3) + { + return + new_task + (std_cxx1x::bind(fun_ptr, std_cxx1x::cref(c), + internal::maybe_make_ref::act(arg1), + internal::maybe_make_ref::act(arg2), + internal::maybe_make_ref::act(arg3))); + } +#endif + + +// ----------- thread starters for functions with 4 arguments + + /** + * Overload of the new_task function for + * non-member or static member + * functions with 4 arguments. + * + * @ingroup threads + */ + template + inline + Task + new_task (RT (*fun_ptr)(Arg1,Arg2,Arg3,Arg4), + typename identity::type arg1, + typename identity::type arg2, + typename identity::type arg3, + typename identity::type arg4) + { + return + new_task + (std_cxx1x::bind(fun_ptr, + internal::maybe_make_ref::act(arg1), + internal::maybe_make_ref::act(arg2), + internal::maybe_make_ref::act(arg3), + internal::maybe_make_ref::act(arg4))); + } + + + + /** + * Overload of the non-const new_task + * function for member functions with + * 4 arguments. + * + * @ingroup threads + */ + template + inline + Task + new_task (RT (C::*fun_ptr)(Arg1,Arg2,Arg3,Arg4), + typename identity::type &c, + typename identity::type arg1, + typename identity::type arg2, + typename identity::type arg3, + typename identity::type arg4) + { + return + new_task + (std_cxx1x::bind(fun_ptr, std_cxx1x::ref(c), + internal::maybe_make_ref::act(arg1), + internal::maybe_make_ref::act(arg2), + internal::maybe_make_ref::act(arg3), + internal::maybe_make_ref::act(arg4))); + } + +#ifndef DEAL_II_CONST_MEMBER_DEDUCTION_BUG + /** + * Overload of the new_task function for + * const member functions with 4 + * arguments. + * + * @ingroup threads + */ + template + inline + Task + new_task (RT (C::*fun_ptr)(Arg1,Arg2,Arg3,Arg4) const, + typename identity::type &c, + typename identity::type arg1, + typename identity::type arg2, + typename identity::type arg3, + typename identity::type arg4) + { + return + new_task + (std_cxx1x::bind(fun_ptr, std_cxx1x::cref(c), + internal::maybe_make_ref::act(arg1), + internal::maybe_make_ref::act(arg2), + internal::maybe_make_ref::act(arg3), + internal::maybe_make_ref::act(arg4))); + } +#endif + +// ----------- thread starters for functions with 5 arguments + + /** + * Overload of the new_task function for + * non-member or static member + * functions with 5 arguments. + * + * @ingroup threads + */ + template + inline + Task + new_task (RT (*fun_ptr)(Arg1,Arg2,Arg3,Arg4,Arg5), + typename identity::type arg1, + typename identity::type arg2, + typename identity::type arg3, + typename identity::type arg4, + typename identity::type arg5) + { + return + new_task + (std_cxx1x::bind(fun_ptr, + internal::maybe_make_ref::act(arg1), + internal::maybe_make_ref::act(arg2), + internal::maybe_make_ref::act(arg3), + internal::maybe_make_ref::act(arg4), + internal::maybe_make_ref::act(arg5))); + } + + + + /** + * Overload of the non-const new_task + * function for member functions with + * 5 arguments. + * + * @ingroup threads + */ + template + inline + Task + new_task (RT (C::*fun_ptr)(Arg1,Arg2,Arg3,Arg4,Arg5), + typename identity::type &c, + typename identity::type arg1, + typename identity::type arg2, + typename identity::type arg3, + typename identity::type arg4, + typename identity::type arg5) + { + return + new_task + (std_cxx1x::bind(fun_ptr, std_cxx1x::ref(c), + internal::maybe_make_ref::act(arg1), + internal::maybe_make_ref::act(arg2), + internal::maybe_make_ref::act(arg3), + internal::maybe_make_ref::act(arg4), + internal::maybe_make_ref::act(arg5))); + } + +#ifndef DEAL_II_CONST_MEMBER_DEDUCTION_BUG + /** + * Overload of the new_task function for + * const member functions with 5 + * arguments. + * + * @ingroup threads + */ + template + inline + Task + new_task (RT (C::*fun_ptr)(Arg1,Arg2,Arg3,Arg4,Arg5) const, + typename identity::type &c, + typename identity::type arg1, + typename identity::type arg2, + typename identity::type arg3, + typename identity::type arg4, + typename identity::type arg5) + { + return + new_task + (std_cxx1x::bind(fun_ptr, std_cxx1x::cref(c), + internal::maybe_make_ref::act(arg1), + internal::maybe_make_ref::act(arg2), + internal::maybe_make_ref::act(arg3), + internal::maybe_make_ref::act(arg4), + internal::maybe_make_ref::act(arg5))); + } +#endif + +// ----------- thread starters for functions with 6 arguments + + /** + * Overload of the new_task function for + * non-member or static member + * functions with 6 arguments. + * + * @ingroup threads + */ + template + inline + Task + new_task (RT (*fun_ptr)(Arg1,Arg2,Arg3,Arg4,Arg5,Arg6), + typename identity::type arg1, + typename identity::type arg2, + typename identity::type arg3, + typename identity::type arg4, + typename identity::type arg5, + typename identity::type arg6) + { + return + new_task + (std_cxx1x::bind(fun_ptr, + internal::maybe_make_ref::act(arg1), + internal::maybe_make_ref::act(arg2), + internal::maybe_make_ref::act(arg3), + internal::maybe_make_ref::act(arg4), + internal::maybe_make_ref::act(arg5), + internal::maybe_make_ref::act(arg6))); + } + + + + /** + * Overload of the non-const new_task + * function for member functions with + * 6 arguments. + * + * @ingroup threads + */ + template + inline + Task + new_task (RT (C::*fun_ptr)(Arg1,Arg2,Arg3,Arg4,Arg5,Arg6), + typename identity::type &c, + typename identity::type arg1, + typename identity::type arg2, + typename identity::type arg3, + typename identity::type arg4, + typename identity::type arg5, + typename identity::type arg6) + { + return + new_task + (std_cxx1x::bind(fun_ptr, std_cxx1x::ref(c), + internal::maybe_make_ref::act(arg1), + internal::maybe_make_ref::act(arg2), + internal::maybe_make_ref::act(arg3), + internal::maybe_make_ref::act(arg4), + internal::maybe_make_ref::act(arg5), + internal::maybe_make_ref::act(arg6))); + } + +#ifndef DEAL_II_CONST_MEMBER_DEDUCTION_BUG + /** + * Overload of the new_task function for + * const member functions with 6 + * arguments. + * + * @ingroup threads + */ + template + inline + Task + new_task (RT (C::*fun_ptr)(Arg1,Arg2,Arg3,Arg4,Arg5,Arg6) const, + typename identity::type &c, + typename identity::type arg1, + typename identity::type arg2, + typename identity::type arg3, + typename identity::type arg4, + typename identity::type arg5, + typename identity::type arg6) + { + return + new_task + (std_cxx1x::bind(fun_ptr, std_cxx1x::cref(c), + internal::maybe_make_ref::act(arg1), + internal::maybe_make_ref::act(arg2), + internal::maybe_make_ref::act(arg3), + internal::maybe_make_ref::act(arg4), + internal::maybe_make_ref::act(arg5), + internal::maybe_make_ref::act(arg6))); + } +#endif + +// ----------- thread starters for functions with 7 arguments + + /** + * Overload of the new_task function for + * non-member or static member + * functions with 7 arguments. + * + * @ingroup threads + */ + template + inline + Task + new_task (RT (*fun_ptr)(Arg1,Arg2,Arg3,Arg4,Arg5,Arg6,Arg7), + typename identity::type arg1, + typename identity::type arg2, + typename identity::type arg3, + typename identity::type arg4, + typename identity::type arg5, + typename identity::type arg6, + typename identity::type arg7) + { + return + new_task + (std_cxx1x::bind(fun_ptr, + internal::maybe_make_ref::act(arg1), + internal::maybe_make_ref::act(arg2), + internal::maybe_make_ref::act(arg3), + internal::maybe_make_ref::act(arg4), + internal::maybe_make_ref::act(arg5), + internal::maybe_make_ref::act(arg6), + internal::maybe_make_ref::act(arg7))); + } + + + + /** + * Overload of the non-const new_task + * function for member functions with + * 7 arguments. + * + * @ingroup threads + */ + template + inline + Task + new_task (RT (C::*fun_ptr)(Arg1,Arg2,Arg3,Arg4,Arg5,Arg6,Arg7), + typename identity::type &c, + typename identity::type arg1, + typename identity::type arg2, + typename identity::type arg3, + typename identity::type arg4, + typename identity::type arg5, + typename identity::type arg6, + typename identity::type arg7) + { + return + new_task + (std_cxx1x::bind(fun_ptr, std_cxx1x::ref(c), + internal::maybe_make_ref::act(arg1), + internal::maybe_make_ref::act(arg2), + internal::maybe_make_ref::act(arg3), + internal::maybe_make_ref::act(arg4), + internal::maybe_make_ref::act(arg5), + internal::maybe_make_ref::act(arg6), + internal::maybe_make_ref::act(arg7))); + } + +#ifndef DEAL_II_CONST_MEMBER_DEDUCTION_BUG + /** + * Overload of the new_task function for + * const member functions with 7 + * arguments. + * + * @ingroup threads + */ + template + inline + Task + new_task (RT (C::*fun_ptr)(Arg1,Arg2,Arg3,Arg4,Arg5,Arg6,Arg7) const, + typename identity::type &c, + typename identity::type arg1, + typename identity::type arg2, + typename identity::type arg3, + typename identity::type arg4, + typename identity::type arg5, + typename identity::type arg6, + typename identity::type arg7) + { + return + new_task + (std_cxx1x::bind(fun_ptr, std_cxx1x::cref(c), + internal::maybe_make_ref::act(arg1), + internal::maybe_make_ref::act(arg2), + internal::maybe_make_ref::act(arg3), + internal::maybe_make_ref::act(arg4), + internal::maybe_make_ref::act(arg5), + internal::maybe_make_ref::act(arg6), + internal::maybe_make_ref::act(arg7))); + } +#endif + +// ----------- thread starters for functions with 8 arguments + + /** + * Overload of the new_task function for + * non-member or static member + * functions with 8 arguments. + * + * @ingroup threads + */ + template + inline + Task + new_task (RT (*fun_ptr)(Arg1,Arg2,Arg3,Arg4,Arg5, + Arg6,Arg7,Arg8), + typename identity::type arg1, + typename identity::type arg2, + typename identity::type arg3, + typename identity::type arg4, + typename identity::type arg5, + typename identity::type arg6, + typename identity::type arg7, + typename identity::type arg8) + { + return + new_task + (std_cxx1x::bind(fun_ptr, + internal::maybe_make_ref::act(arg1), + internal::maybe_make_ref::act(arg2), + internal::maybe_make_ref::act(arg3), + internal::maybe_make_ref::act(arg4), + internal::maybe_make_ref::act(arg5), + internal::maybe_make_ref::act(arg6), + internal::maybe_make_ref::act(arg7), + internal::maybe_make_ref::act(arg8))); + } + + + + /** + * Overload of the non-const new_task + * function for member functions with + * 8 arguments. + * + * @ingroup threads + */ + template + inline + Task + new_task (RT (C::*fun_ptr)(Arg1,Arg2,Arg3,Arg4,Arg5, + Arg6,Arg7,Arg8), + typename identity::type &c, + typename identity::type arg1, + typename identity::type arg2, + typename identity::type arg3, + typename identity::type arg4, + typename identity::type arg5, + typename identity::type arg6, + typename identity::type arg7, + typename identity::type arg8) + { + return + new_task + (std_cxx1x::bind(fun_ptr, std_cxx1x::ref(c), + internal::maybe_make_ref::act(arg1), + internal::maybe_make_ref::act(arg2), + internal::maybe_make_ref::act(arg3), + internal::maybe_make_ref::act(arg4), + internal::maybe_make_ref::act(arg5), + internal::maybe_make_ref::act(arg6), + internal::maybe_make_ref::act(arg7), + internal::maybe_make_ref::act(arg8))); + } + +#ifndef DEAL_II_CONST_MEMBER_DEDUCTION_BUG + /** + * Overload of the new_task function for + * const member functions with 8 + * arguments. + * + * @ingroup threads + */ + template + inline + Task + new_task (RT (C::*fun_ptr)(Arg1,Arg2,Arg3,Arg4,Arg5, + Arg6,Arg7,Arg8) const, + typename identity::type &c, + typename identity::type arg1, + typename identity::type arg2, + typename identity::type arg3, + typename identity::type arg4, + typename identity::type arg5, + typename identity::type arg6, + typename identity::type arg7, + typename identity::type arg8) + { + return + new_task + (std_cxx1x::bind(fun_ptr, std_cxx1x::cref(c), + internal::maybe_make_ref::act(arg1), + internal::maybe_make_ref::act(arg2), + internal::maybe_make_ref::act(arg3), + internal::maybe_make_ref::act(arg4), + internal::maybe_make_ref::act(arg5), + internal::maybe_make_ref::act(arg6), + internal::maybe_make_ref::act(arg7), + internal::maybe_make_ref::act(arg8))); + } +#endif + +// ----------- thread starters for functions with 9 arguments + + /** + * Overload of the new_task function for + * non-member or static member + * functions with 9 arguments. + * + * @ingroup threads + */ + template + inline + Task + new_task (RT (*fun_ptr)(Arg1,Arg2,Arg3,Arg4,Arg5, + Arg6,Arg7,Arg8,Arg9), + typename identity::type arg1, + typename identity::type arg2, + typename identity::type arg3, + typename identity::type arg4, + typename identity::type arg5, + typename identity::type arg6, + typename identity::type arg7, + typename identity::type arg8, + typename identity::type arg9) + { + return + new_task + (std_cxx1x::bind(fun_ptr, + internal::maybe_make_ref::act(arg1), + internal::maybe_make_ref::act(arg2), + internal::maybe_make_ref::act(arg3), + internal::maybe_make_ref::act(arg4), + internal::maybe_make_ref::act(arg5), + internal::maybe_make_ref::act(arg6), + internal::maybe_make_ref::act(arg7), + internal::maybe_make_ref::act(arg8), + internal::maybe_make_ref::act(arg9))); + } + + + + /** + * Overload of the non-const new_task + * function for member functions with + * 9 arguments. + * + * @ingroup threads + */ + template + inline + Task + new_task (RT (C::*fun_ptr)(Arg1,Arg2,Arg3,Arg4,Arg5, + Arg6,Arg7,Arg8,Arg9), + typename identity::type &c, + typename identity::type arg1, + typename identity::type arg2, + typename identity::type arg3, + typename identity::type arg4, + typename identity::type arg5, + typename identity::type arg6, + typename identity::type arg7, + typename identity::type arg8, + typename identity::type arg9) + { + return + new_task + (std_cxx1x::bind(fun_ptr, std_cxx1x::ref(c), + internal::maybe_make_ref::act(arg1), + internal::maybe_make_ref::act(arg2), + internal::maybe_make_ref::act(arg3), + internal::maybe_make_ref::act(arg4), + internal::maybe_make_ref::act(arg5), + internal::maybe_make_ref::act(arg6), + internal::maybe_make_ref::act(arg7), + internal::maybe_make_ref::act(arg8), + internal::maybe_make_ref::act(arg9))); + } + +#ifndef DEAL_II_CONST_MEMBER_DEDUCTION_BUG + /** + * Overload of the new_task function for + * const member functions with 9 + * arguments. + * + * @ingroup threads + */ + template + inline + Task + new_task (RT (C::*fun_ptr)(Arg1,Arg2,Arg3,Arg4,Arg5, + Arg6,Arg7,Arg8,Arg9) const, + typename identity::type &c, + typename identity::type arg1, + typename identity::type arg2, + typename identity::type arg3, + typename identity::type arg4, + typename identity::type arg5, + typename identity::type arg6, + typename identity::type arg7, + typename identity::type arg8, + typename identity::type arg9) + { + return + new_task + (std_cxx1x::bind(fun_ptr, std_cxx1x::cref(c), + internal::maybe_make_ref::act(arg1), + internal::maybe_make_ref::act(arg2), + internal::maybe_make_ref::act(arg3), + internal::maybe_make_ref::act(arg4), + internal::maybe_make_ref::act(arg5), + internal::maybe_make_ref::act(arg6), + internal::maybe_make_ref::act(arg7), + internal::maybe_make_ref::act(arg8), + internal::maybe_make_ref::act(arg9))); + } +#endif + + +// ------------------------ TaskGroup ------------------------------------- + + /** + * A container for task + * objects. Allows to add new + * task objects and wait for them + * all together. The task objects + * need to have the same return + * value for the called function. + * + * @author Wolfgang Bangerth, 2003 + * @ingroup tasks + */ + template + class TaskGroup + { + public: + /** + * Add another task object to + * the collection. + */ + TaskGroup & operator += (const Task &t) + { + tasks.push_back (t); + return *this; + } + + /** + * Wait for all tasks in the + * collection to finish. It is + * not a problem if some of + * them have already been + * waited for, i.e. you may + * call this function more than + * once, and you can also add + * new task objects between + * subsequent calls to this + * function if you want. + */ + void join_all () const { + for (typename std::list >::const_iterator + t=tasks.begin(); t!=tasks.end(); ++t) + t->join (); + } + + private: + /** + * List of task objects. + */ + std::list > tasks; + }; + +} // end of implementation of namespace Threads + +/** + * @} + */ //--------------------------------------------------------------------------- diff --git a/deal.II/base/include/base/work_stream.h b/deal.II/base/include/base/work_stream.h new file mode 100644 index 0000000000..ec6d5ea073 --- /dev/null +++ b/deal.II/base/include/base/work_stream.h @@ -0,0 +1,623 @@ +//--------------------------------------------------------------------------- +// $Id$ +// Version: $Name$ +// +// Copyright (C) 2008, 2009 by the deal.II authors +// +// This file is subject to QPL and may not be distributed +// without copyright and license information. Please refer +// to the file deal.II/doc/license.html for the text and +// further information on this license. +// +//--------------------------------------------------------------------------- +#ifndef __deal2__work_stream_h +#define __deal2__work_stream_h + + +#include +#include +#include +#include +#include + +#if DEAL_II_USE_MT == 1 +# include +# include +#endif + +#include +#include + + +DEAL_II_NAMESPACE_OPEN + + + +/** + * A class whose main template function supports running multiple + * threads each of which operates on a subset of the given range of + * objects. The class uses the Intel Threading Building Blocks (TBB) + * to load balance the individual subranges onto the available + * threads. For a lengthy discussion of the rationale of this class, + * see the @ref threads "Parallel computing with multiple processors" + * module. + * + * The class is built on the following premise: One frequently has some work + * that needs to be done on a sequence of objects; a prototypical example is + * assembling cell contributions to a system matrix or right hand side. In + * many such examples, part of the work can be done entirely independently and + * in parallel, possibly using several processor cores on a machine with + * shared memory. However, some other part of this work may need to be + * synchronised and be done in order. In the example of assembling a matrix, + * the computation of local contributions can be done entirely in parallel, + * but copying the the local contributions into the global matrix requires + * some care: First, several threads can't write at the same time, but need to + * synchronise writing using a mutex; secondly, we want the order in which + * local contributions are added to the global matrix to be always the same + * because floating point addition is not commutative and adding local + * contributions to the global matrix in different orders leads to subtly + * different results that can affect the number of iterations for iterative + * solvers as well as the round-off error in the solution in random + * ways. Consequently, we want to ensure that only one thread at a time writes + * into the global matrix, and that results are copied in a stable and + * reproducible order. + * + * This class implements a framework for this work model. It works with a + * stream of objects given by an iterator range, runs a worker function in + * parallel on all of these objects and then passes each object to a + * postprocessor function that runs sequentially and gets objects in exactly + * the order in which they appear in the input iterator range. None of the + * synchronisation work is exposed to the user of this class. + * + * Internally, the range given to the run() function of this class is split + * into a sequence of "items", which are then distributed according to some + * %internal algorithm onto the number of available threads. An item is an + * element of the range of iterators on which we are to operate; for example, + * for the purpose of assembling matrices or evaluating error indicators, an + * item could be a cell. The TBB library determines how many threads are + * created (typically as many as there are processor cores), but the number of + * items that may be active at any given time is specified by the argument to + * the constructor. It should be bigger or equal to the number of processor + * cores - the default is four times the number of cores on the current system. + * + * Items are created upon request by the TBB whenever one of the worker + * threads is idle or is expected to become idle. It is then handed off to a + * worker function, typically a member function of a main class. These worker + * functions are run in parallel on a number of threads, and there is no + * guarantee that they are asked to work on items in any particular order, in + * particular not necessarily in the order in which items are generated from + * the iterator range. + * + * Typically, worker functions need additional data, for example FEValues + * objects, input data vectors, etc, some of which can not be shared among + * threads. To this end, the run() function takes another template argument, + * ScratchData, which designates a type objects of which are stored with + * each item and which threads can use as private data without having to + * share them with other threads. The run() function takes an additional + * argument with an object of type ScratchData that is going to be copied + * for the arguments passed to each of the worker functions. + * + * In addition, worker functions store their results in objects of template type + * CopyData. These are then handed off to a separate function, called copier, + * that may use the stored results to transfer them into permanent + * storage. For example, it may copy the results of local contributions to a + * matrix computed by a worker function into the global matrix. In contrast to + * the worker function, however, only one instance of the copier is run at any + * given time; it can therefore safely copy local contributions into the + * global matrix without the need to lock the global object using a mutex or + * similar means. Furthermore, it is guaranteed that the copier is run with + * CopyData objects in the same order in which their associated items + * were created; consequently, even if worker threads may compute results in + * unspecified order, the copier always receives the results in exactly the + * same order as the items were created. + * + * Once an item is processed by the copier, it is deleted and the + * ScratchData and CopyData objects that were used in its computation + * are considered unused and may be re-used for the next invokation of + * the worker function, on this or another thread. + * + * This class only really works in parallel when multithread mode was selected + * during deal.II configuration. Otherwise it simply works on each item + * sequentially. + * + * @author Wolfgang Bangerth, 2007, 2008, 2009 + */ +namespace WorkStream +{ + +#if DEAL_II_USE_MT == 1 + + + namespace internal + { + /** + * A class that creates a sequence of + * items from a range of iterators. + */ + template + class IteratorRangeToItemStream : public tbb::filter + { + public: + /** + * A data type that we use to identify + * items to be worked on. + * + * The first element indicates an array + * of iterators to work on; the second + * the scratch space; the third an + * array of copy data spaces; and the + * last the number of elements to work + * on. This last argument is an integer + * between one and chunk_size. The + * arrays have a length of chunk_size. + */ + typedef + std_cxx1x::tuple, + ScratchData*, + std::vector, + unsigned int> + ItemType; + + + /** + * Constructor. Take an iterator + * range, the size of a buffer that + * can hold items, and the sample + * additional data object that will + * be passed to each worker and + * copier function invokation. + */ + IteratorRangeToItemStream (const Iterator &begin, + const Iterator &end, + const unsigned int buffer_size, + const unsigned int chunk_size, + const ScratchData &sample_scratch_data, + const CopyData &sample_copy_data) + : + tbb::filter (/*is_serial=*/true), + remaining_iterator_range (begin, end), + ring_buffer (buffer_size), + n_emitted_items (0), + chunk_size (chunk_size) + { + // initialize copies of + // additional_data. since + // this is frequently + // expensive (creating + // FEValues objects etc) do + // that in parallel + Threads::TaskGroup<> tasks; + for (unsigned int i=0; i(ring_buffer[i]); + } + + /** + * Create a item and return a + * pointer to it. + */ + virtual void * operator () (void *) + { + // store the current + // position of the pointer + ItemType *current_item + = &ring_buffer[n_emitted_items % ring_buffer.size()]; + + // initialize the next item. it may + // consist of at most chunk_size + // elements + std_cxx1x::get<3>(*current_item) = 0; + while ((remaining_iterator_range.first != + remaining_iterator_range.second) + && + (std_cxx1x::get<3>(*current_item) < chunk_size)) + { + std_cxx1x::get<0>(*current_item)[std_cxx1x::get<3>(*current_item)] + = remaining_iterator_range.first; + + ++remaining_iterator_range.first; + ++std_cxx1x::get<3>(*current_item); + } + + if (std_cxx1x::get<3>(*current_item) == 0) + // there were no items + // left. terminate the pipeline + return 0; + else + { + ++n_emitted_items; + return current_item; + } + } + + private: + /** + * The interval of iterators still to + * be worked on. This range will shrink + * over time. + */ + std::pair remaining_iterator_range; + + /** + * A ring buffer that will store items. + */ + std::vector ring_buffer; + + /** + * Counter for the number of emitted + * items. Each item may consist of up + * to chunk_size iterator elements. + */ + unsigned int n_emitted_items; + + /** + * Number of elements of the + * iterator range that each + * thread should work on + * sequentially; a large number + * makes sure that each thread + * gets a significant amount of + * work before the next task + * switch happens, whereas a + * small number is better for + * load balancing. + */ + const unsigned int chunk_size; + + /** + * Initialize the pointers and vector + * elements in the specified entry of + * the ring buffer. + */ + void init_buffer_elements (const unsigned int element, + const ScratchData &sample_scratch_data, + const CopyData &sample_copy_data) + { + Assert (std_cxx1x::get<1>(ring_buffer[element]) == 0, + ExcInternalError()); + + std_cxx1x::get<0>(ring_buffer[element]) + .resize (chunk_size, remaining_iterator_range.second); + std_cxx1x::get<1>(ring_buffer[element]) + = new ScratchData(sample_scratch_data); + std_cxx1x::get<2>(ring_buffer[element]) + .resize (chunk_size, sample_copy_data); + } + }; + + + + /** + * A class that manages calling the + * worker function on a number of + * parallel threads. Note that it is, in + * the TBB notation, a filter that can + * run in parallel. + */ + template + class Worker : public tbb::filter + { + public: + /** + * Constructor. Takes a + * reference to the object on + * which we will operate as + * well as a pointer to the + * function that will do the + * assembly. + */ + Worker (const std_cxx1x::function &worker) + : + tbb::filter (/* is_serial= */ false), + worker (worker) + {} + + + /** + * Work on an item. + */ + void * operator () (void *item) + { + // first unpack the current item + typedef + typename IteratorRangeToItemStream::ItemType + ItemType; + + ItemType *current_item = reinterpret_cast (item); + + // then call the worker function on + // each element of the chunk we + // were given + for (unsigned int i=0; i(*current_item); ++i) + worker (std_cxx1x::get<0>(*current_item)[i], + *std_cxx1x::get<1>(*current_item), + std_cxx1x::get<2>(*current_item)[i]); + + // then return the original pointer + // to the now modified object + return item; + } + + + private: + /** + * Pointer to the function + * that does the assembling + * on the sequence of cells. + */ + const std_cxx1x::function worker; + }; + + + + /** + * A class that manages calling the + * copier function. Note that it is, in + * the TBB notation, a filter that runs + * sequentially, ensuring that all items + * are copied in the same order in which + * they are created. + */ + template + class Copier : public tbb::filter + { + public: + /** + * Constructor. Takes a + * reference to the object on + * which we will operate as + * well as a pointer to the + * function that will do the + * copying from the + * additional data object to + * the global matrix or + * similar. + */ + Copier (const std_cxx1x::function &copier) + : + tbb::filter (/* is_serial= */ true), + copier (copier) + {} + + + /** + * Work on a single item. + */ + void * operator () (void *item) + { + // first unpack the current item + typedef + typename IteratorRangeToItemStream::ItemType + ItemType; + + ItemType *current_item = reinterpret_cast (item); + + // initiate copying data + for (unsigned int i=0; i(*current_item); ++i) + copier (std_cxx1x::get<2>(*current_item)[i]); + + // return an invalid + // item since we are at + // the end of the + // pipeline + return 0; + } + + + private: + /** + * Pointer to the function + * that does the copying of + * data. + */ + const std_cxx1x::function copier; + }; + + } + + +#endif // DEAL_II_USE_MT + + + + /** + * This is the main function of the + * WorkStream concept, doing work as + * described in the introduction to this + * namespace. + * + * This is the function that can be used + * for worker and copier objects that are + * either pointers to non-member + * functions or objects that allow to be + * called with an operator(), for example + * objects created by std::bind. + * + * The argument passed as @p end must be + * convertible to the same type as + * @p begin, but doesn't have to be of the + * same type itself. This allows to write + * code like + * WorkStream().run(dof_handler.begin_active(), + * dof_handler.end(), ... where + * the first is of type + * DoFHandler::active_cell_iterator + * whereas the second is of type + * DoFHandler::raw_cell_iterator. + * + * The @p queue_length argument indicates + * the number of items that can be live + * at any given time. Each item consists + * of @p chunk_size elements of the input + * stream that will be worked on by the + * worker and copier functions one after + * the other on the same thread. + */ + template + void + run (const Iterator &begin, + const typename identity::type &end, + Worker worker, + Copier copier, + const ScratchData &sample_scratch_data, + const CopyData &sample_copy_data, + const unsigned int queue_length = 2*multithread_info.n_default_threads, + const unsigned int chunk_size = 8) + { + Assert (queue_length > 0, + ExcMessage ("The queue length must be at least one, and preferably " + "larger than the number of processors on this system.")); + Assert (chunk_size > 0, + ExcMessage ("The chunk_size must be at least one.")); + + // if no work then skip. (only use + // operator!= for iterators since we may + // not have an equality comparison + // operator) + if (!(begin != end)) + return; + +#if DEAL_II_USE_MT == 1 + // create the three stages of the + // pipeline + internal::IteratorRangeToItemStream + iterator_range_to_item_stream (begin, end, + queue_length * chunk_size, + chunk_size, + sample_scratch_data, + sample_copy_data); + + internal::Worker worker_filter (worker); + internal::Copier copier_filter (copier); + + // now create a pipeline from + // these stages + tbb::pipeline assembly_line; + assembly_line.add_filter (iterator_range_to_item_stream); + assembly_line.add_filter (worker_filter); + assembly_line.add_filter (copier_filter); + + // and run it + assembly_line.run (queue_length); + + assembly_line.clear (); + +#else + + // need to copy the sample since it is + // marked const + ScratchData scratch_data = sample_scratch_data; + CopyData copy_data = sample_copy_data; + + for (Iterator i=begin; i!=end; ++i) + { + worker (i, scratch_data, copy_data); + copier (copy_data); + } +#endif + } + + + + /** + * This is the main function of the + * WorkStream concept, doing work as + * described in the introduction to this + * namespace. + * + * This is the function that can be + * used for worker and copier functions + * that are member functions of a class. + * + * The argument passed as @p end must be + * convertible to the same type as + * @p begin, but doesn't have to be of the + * same type itself. This allows to write + * code like + * WorkStream().run(dof_handler.begin_active(), + * dof_handler.end(), ... where + * the first is of type + * DoFHandler::active_cell_iterator + * whereas the second is of type + * DoFHandler::raw_cell_iterator. + * + * The @p queue_length argument indicates + * the number of items that can be live + * at any given time. Each item consists + * of @p chunk_size elements of the input + * stream that will be worked on by the + * worker and copier functions one after + * the other on the same thread. + */ + template + void + run (const Iterator &begin, + const typename identity::type &end, + MainClass &main_object, + void (MainClass::*worker) (const Iterator &, + ScratchData &, + CopyData &), + void (MainClass::*copier) (const CopyData &), + const ScratchData &sample_scratch_data, + const CopyData &sample_copy_data, + const unsigned int queue_length = 2*multithread_info.n_default_threads, + const unsigned int chunk_size = 8) + { + // forward to the other function + run (begin, end, + std_cxx1x::bind (worker, + std_cxx1x::ref (main_object), + _1, _2, _3), + std_cxx1x::bind (copier, + std_cxx1x::ref (main_object), + _1), + sample_scratch_data, + sample_copy_data, + queue_length, + chunk_size); + } + +} + + + + +DEAL_II_NAMESPACE_CLOSE + + + + +//---------------------------- work_stream.h --------------------------- +// end of #ifndef __deal2__work_stream_h +#endif +//---------------------------- work_stream.h --------------------------- diff --git a/deal.II/base/source/data_out_base.cc b/deal.II/base/source/data_out_base.cc index 71c166b609..eb3cd5e21c 100644 --- a/deal.II/base/source/data_out_base.cc +++ b/deal.II/base/source/data_out_base.cc @@ -3104,14 +3104,14 @@ void DataOutBase::write_gmv (const std::vector > &patches, // this copying of data vectors can // be done while we already output // the vertices, so do this on a - // separate thread and when wanting + // separate task and when wanting // to write out the data, we wait - // for that thread to finish + // for that task to finish Table<2,double> data_vectors (n_data_sets, n_nodes); void (*fun_ptr) (const std::vector > &, Table<2,double> &) = &DataOutBase::template write_gmv_reorder_data_vectors; - Threads::Thread<> reorder_thread = Threads::spawn (fun_ptr)(patches, data_vectors); + Threads::Task<> reorder_task = Threads::new_task (fun_ptr, patches, data_vectors); /////////////////////////////// // first make up a list of used @@ -3149,7 +3149,7 @@ void DataOutBase::write_gmv (const std::vector > &patches, // now write the data vectors to // @p{out} first make sure that all // data is in place - reorder_thread.join (); + reorder_task.join (); // then write data. // the '1' means: node data (as opposed @@ -3286,16 +3286,16 @@ void DataOutBase::write_tecplot (const std::vector > &patche // this copying of data vectors can // be done while we already output // the vertices, so do this on a - // separate thread and when wanting + // separate task and when wanting // to write out the data, we wait - // for that thread to finish + // for that task to finish Table<2,double> data_vectors (n_data_sets, n_nodes); void (*fun_ptr) (const std::vector > &, Table<2,double> &) = &DataOutBase::template write_gmv_reorder_data_vectors; - Threads::Thread<> reorder_thread = Threads::spawn (fun_ptr)(patches, data_vectors); + Threads::Task<> reorder_task = Threads::new_task (fun_ptr, patches, data_vectors); /////////////////////////////// // first make up a list of used @@ -3317,7 +3317,7 @@ void DataOutBase::write_tecplot (const std::vector > &patche // now write the data vectors to // @p{out} first make sure that all // data is in place - reorder_thread.join (); + reorder_task.join (); // then write data. for (unsigned int data_set=0; data_set > // this copying of data vectors can // be done while we already output // the vertices, so do this on a - // separate thread and when wanting + // separate task and when wanting // to write out the data, we wait - // for that thread to finish + // for that task to finish Table<2,double> data_vectors (n_data_sets, n_nodes); void (*fun_ptr) (const std::vector > &, Table<2,double> &) = &DataOutBase::template write_gmv_reorder_data_vectors; - Threads::Thread<> reorder_thread = Threads::spawn (fun_ptr)(patches, data_vectors); + Threads::Task<> reorder_task = Threads::new_task (fun_ptr, patches, data_vectors); /////////////////////////////// // first make up a list of used @@ -3619,7 +3619,7 @@ void DataOutBase::write_tecplot_binary (const std::vector > /////////////////////////////////////// // data output. // - reorder_thread.join (); + reorder_task.join (); // then write data. for (unsigned int data_set=0; data_set > &patches, // this copying of data vectors can // be done while we already output // the vertices, so do this on a - // separate thread and when wanting + // separate task and when wanting // to write out the data, we wait - // for that thread to finish + // for that task to finish Table<2,double> data_vectors (n_data_sets, n_nodes); void (*fun_ptr) (const std::vector > &, Table<2,double> &) = &DataOutBase::template write_gmv_reorder_data_vectors; - Threads::Thread<> reorder_thread = Threads::spawn (fun_ptr)(patches, data_vectors); + Threads::Task<> reorder_task = Threads::new_task (fun_ptr, patches, data_vectors); /////////////////////////////// // first make up a list of used @@ -3863,7 +3863,7 @@ DataOutBase::write_vtk (const std::vector > &patches, // now write the data vectors to // @p{out} first make sure that all // data is in place - reorder_thread.join (); + reorder_task.join (); // then write data. the // 'POINT_DATA' means: node data diff --git a/deal.II/base/source/quadrature_lib.cc b/deal.II/base/source/quadrature_lib.cc index a92ff3ec79..b6a9ba31a0 100644 --- a/deal.II/base/source/quadrature_lib.cc +++ b/deal.II/base/source/quadrature_lib.cc @@ -2,7 +2,7 @@ // $Id$ // Version: $Name$ // -// Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003, 2005, 2006, 2007, 2008 by the deal.II authors +// Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003, 2005, 2006, 2007, 2008, 2009 by the deal.II authors // // This file is subject to QPL and may not be distributed // without copyright and license information. Please refer @@ -971,7 +971,6 @@ QGaussLogR<1>::QGaussLogR(const unsigned int n, this->quadrature_points[j] = quad.point(i)*fraction; this->weights[j] = -std::log(alpha/fraction)*quad.weight(i)*fraction; } - // In case we need the second quadrature as well, do it now. if(fraction != 1) { this->quadrature_points[i+n] = quad2.point(i)*(1-fraction)+Point<1>(fraction); diff --git a/deal.II/base/source/thread_management.cc b/deal.II/base/source/thread_management.cc index fa9ef7fa17..b3669671ac 100644 --- a/deal.II/base/source/thread_management.cc +++ b/deal.II/base/source/thread_management.cc @@ -250,8 +250,17 @@ namespace Threads return_values[i].second = end; }; return return_values; - } - + } } // end namespace Thread + + +#if DEAL_II_USE_MT == 1 +/** + * A scheduler for tasks. + */ +tbb::task_scheduler_init scheduler; +#endif + + DEAL_II_NAMESPACE_CLOSE diff --git a/deal.II/common/Make.global_options.in b/deal.II/common/Make.global_options.in index fd60f055c9..9ad8dc73b1 100644 --- a/deal.II/common/Make.global_options.in +++ b/deal.II/common/Make.global_options.in @@ -38,8 +38,7 @@ F77LIBS = @F77LIBS@ LIBS = @LIBS@ LIBDIR = $D/lib PERL = @PERL@ -enable-multithreading= @enablemultithreading@ -with-multithreading = @withmultithreading@ +enable-threads = @enablethreads@ enable-shared = @enableshared@ enable-parser = @enableparser@ DEAL_II_VERSION = @PACKAGE_VERSION@ @@ -221,12 +220,20 @@ ifeq ($(USE_CONTRIB_HSL),yes) lib-hsl = $(LIBDIR)/libhsl$(lib-suffix) endif +# need to link with libtbb if we're using threads +ifeq ($(enable-threads),yes) + lib-contrib-tbb.g = $D/lib/libtbb_debug$(lib-suffix) + lib-contrib-tbb.o = $D/lib/libtbb$(lib-suffix) +endif + # set paths to all the libraries we need: lib-base.o = $(LIBDIR)/libbase$(lib-suffix) \ - $(lib-contrib-trilinos) + $(lib-contrib-trilinos) \ + $(lib-contrib-tbb.o) lib-base.g = $(LIBDIR)/libbase.g$(lib-suffix) \ - $(lib-contrib-trilinos) + $(lib-contrib-trilinos) \ + $(lib-contrib-tbb.g) lib-lac.o = $(LIBDIR)/liblac$(lib-suffix) \ $(lib-hsl) \ @@ -295,6 +302,9 @@ ifeq ($(USE_CONTRIB_METIS),yes) INCLUDE += -I$(include-path-metis) endif +ifeq ($(enable-threads),yes) + INCLUDE += -I$(shell echo $D/contrib/tbb/tbb*/include) +endif # compiler flags for debug and optimized mode CXXFLAGS.g = @DEFS@ @CXXFLAGSG@ $(INCLUDE) @@ -306,6 +316,12 @@ F77FLAGS.o = @DEFS@ @F77FLAGSO@ $(INCLUDE) # compile flags for C compiler CFLAGS = @CFLAGS@ +# if in debug mode, add TBB assertions +ifeq ($(enable-threads),yes) + CXXFLAGS.g += -DTBB_DO_ASSERT=1 +endif + + # PETSc wants to see a whole lot of other flags being passed... ifeq ($(USE_CONTRIB_PETSC),yes) # set PETSC_DIR and PETSC_ARCH to be used in variables file @@ -324,3 +340,8 @@ ifeq ($(USE_CONTRIB_PETSC),yes) CXXFLAGS.o += $(OCXX_PETSCFLAGS) endif +ifneq ($(enable-threads),no) + MT = MT +else + MT = == +endif diff --git a/deal.II/common/Make.rules b/deal.II/common/Make.rules index 13abec560d..ebfeba3ca8 100644 --- a/deal.II/common/Make.rules +++ b/deal.II/common/Make.rules @@ -11,7 +11,7 @@ include $D/common/Make.global_options # Targets for compilation ############################################################ -ifneq ($(with-multithreading),no) +ifneq ($(enable-threads),no) MT = MT else MT = == diff --git a/deal.II/configure b/deal.II/configure index 1cd37a3fc5..f4be092e03 100755 --- a/deal.II/configure +++ b/deal.II/configure @@ -691,8 +691,7 @@ SHLIBLD SHLIBFLAGS CC_VERSION CFLAGSPIC -enablemultithreading -withmultithreading +enablethreads CXXCPP GREP EGREP @@ -1347,8 +1346,7 @@ if test -n "$ac_init_help"; then Optional Features: --disable-FEATURE do not include FEATURE (same as --enable-FEATURE=no) --enable-FEATURE[=ARG] include FEATURE [ARG=yes] - --enable-multithreading Set compiler flags to allow for multithreaded - programs + --enable-threads Use multiple threads inside deal.II --enable-shared Set compiler flags to generate shared libraries --enable-parser While switched on by default, this option allows to switch off support for the function parser in the @@ -1359,9 +1357,6 @@ Optional Features: Optional Packages: --with-PACKAGE[=ARG] use PACKAGE [ARG=yes] --without-PACKAGE do not use PACKAGE (same as --with-PACKAGE=no) - --with-multithreading=name - If name==posix, or no name given, then use POSIX - threads. --with-boost=/path/to/boost Use an installed boost library instead of the contributed one. The optional argument points to the @@ -4588,7 +4583,7 @@ rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi case "$target" in - *apple-darwin*) + *apple-darwin*) OLD_CXXFLAGS="$CXXFLAGS" CXXFLAGS=-Wno-long-double @@ -4644,6 +4639,13 @@ fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + if test "`/usr/sbin/sysctl -n hw.optional.x86_64`" = "1" ; then + CXXFLAGS="$CXXFLAGS -m64" + CXXFLAGSG="$CXXFLAGSG -m64" + CXXFLAGSO="$CXXFLAGSO -m64" + LDFLAGS="$LDFLAGS -m64" + fi + CXXFLAGS="${OLD_CXXFLAGS}" ;; @@ -5426,6 +5428,12 @@ echo "${ECHO_T}Unrecognized compiler -- still trying" >&6; } CFLAGSPIC= ;; + *apple-darwin*) + if test "`/usr/sbin/sysctl -n hw.optional.x86_64`" = "1" ; then + CFLAGS="$CFLAGS -m64" + fi + ;; + *) CFLAGSPIC="-fPIC" ;; @@ -5619,16 +5627,35 @@ rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ - # Check whether --enable-multithreading was given. -if test "${enable_multithreading+set}" = set; then - enableval=$enable_multithreading; enablemultithreading="$enableval" + # Check whether --enable-threads was given. +if test "${enable_threads+set}" = set; then + enableval=$enable_threads; + enablethreads="$enableval" + else - enablemultithreading=no + + case "$target" in + *cygwin* ) + enablethreads=no + ;; + + * ) + enablethreads=yes + ;; + esac + fi + if test "$enablethreads" = yes ; then + case "$target" in + *cygwin* ) + { { echo "$as_me:$LINENO: error: Multithreading is not supported on CygWin" >&5 +echo "$as_me: error: Multithreading is not supported on CygWin" >&2;} + { (exit 1); exit 1; }; } + ;; + esac - if test "$enablemultithreading" = yes ; then if test "$GXX" = yes ; then case "$target" in @@ -5789,97 +5816,8 @@ echo "$as_me: error: No threading compiler options for this C++ compiler ;; esac fi - fi - - - if test "$enablemultithreading" = yes ; then - case "$GXX_VERSION" in - gcc*) - { echo "$as_me:$LINENO: checking for only partly bracketed mutex initializer" >&5 -echo $ECHO_N "checking for only partly bracketed mutex initializer... $ECHO_C" >&6; } - ac_ext=cpp -ac_cpp='$CXXCPP $CPPFLAGS' -ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_cxx_compiler_gnu - - CXXFLAGS="$CXXFLAGSG -Werror" - cat >conftest.$ac_ext <<_ACEOF -/* confdefs.h. */ -_ACEOF -cat confdefs.h >>conftest.$ac_ext -cat >>conftest.$ac_ext <<_ACEOF -/* end confdefs.h. */ - -# include - -int -main () -{ -; - ; - return 0; -} -_ACEOF -rm -f conftest.$ac_objext -if { (ac_try="$ac_compile" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 - (eval "$ac_compile") 2>conftest.er1 - ac_status=$? - grep -v '^ *+' conftest.er1 >conftest.err - rm -f conftest.er1 - cat conftest.err >&5 - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - (exit $ac_status); } && { - test -z "$ac_cxx_werror_flag" || - test ! -s conftest.err - } && test -s conftest.$ac_objext; then - - { echo "$as_me:$LINENO: result: no" >&5 -echo "${ECHO_T}no" >&6; } - -else - echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - - - { echo "$as_me:$LINENO: result: yes" >&5 -echo "${ECHO_T}yes" >&6; } - CXXFLAGSG="$CXXFLAGSG -Wno-missing-braces" - CXXFLAGSO="$CXXFLAGSO -Wno-missing-braces" - -fi - -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - ;; - *) - ;; - esac - fi - - - -# Check whether --with-multithreading was given. -if test "${with_multithreading+set}" = set; then - withval=$with_multithreading; withmultithreading="$withval" -else - withmultithreading=no -fi - - - if test "x$withmultithreading" = "xyes" ; then - withmultithreading=posix - fi - - if test "x$withmultithreading" != "xno" ; then - if test "x$withmultithreading" = "xposix" ; then - { echo "$as_me:$LINENO: checking for posix thread functions" >&5 echo $ECHO_N "checking for posix thread functions... $ECHO_C" >&6; } ac_ext=cpp @@ -6154,12 +6092,11 @@ cat >>confdefs.h <<\_ACEOF #define DEAL_II_USE_MT_POSIX 1 _ACEOF - else - { { echo "$as_me:$LINENO: error: Invalid flag for --with-multithreading" >&5 -echo "$as_me: error: Invalid flag for --with-multithreading" >&2;} - { (exit 1); exit 1; }; } - fi + LDFLAGS="$LDFLAGS -ldl" + fi + + if test "x$enablethreads" != "xno" ; then DEAL_II_USE_MT_VAL=1 else DEAL_II_USE_MT_VAL=0 @@ -6172,6 +6109,76 @@ _ACEOF + if test "$enablethreads" = yes ; then + case "$GXX_VERSION" in + gcc*) + { echo "$as_me:$LINENO: checking for only partly bracketed mutex initializer" >&5 +echo $ECHO_N "checking for only partly bracketed mutex initializer... $ECHO_C" >&6; } + ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + + CXXFLAGS="$CXXFLAGSG -Werror" + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +# include + +int +main () +{ +; + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_cxx_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + + { echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6; } + +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + + { echo "$as_me:$LINENO: result: yes" >&5 +echo "${ECHO_T}yes" >&6; } + CXXFLAGSG="$CXXFLAGSG -Wno-missing-braces" + CXXFLAGSO="$CXXFLAGSO -Wno-missing-braces" + +fi + +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + ;; + *) + ;; + esac + fi + + @@ -8082,6 +8089,80 @@ sed 's/^/| /' conftest.$ac_ext >&5 { echo "$as_me:$LINENO: result: no" >&5 echo "${ECHO_T}no" >&6; } +fi + +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + + + { echo "$as_me:$LINENO: checking for const member deduction bug" >&5 +echo $ECHO_N "checking for const member deduction bug... $ECHO_C" >&6; } + ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + + CXXFLAGS="$CXXFLAGSG" + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + + template struct identity { typedef T type; }; + + template void new_thread (void (C::*fun_ptr)(), + typename identity::type &c); + template void new_thread (void (C::*fun_ptr)() const, + const typename identity::type &c); + struct X { void f() const; }; + +int +main () +{ + + X x; + new_thread (&X::f, x); + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_cxx_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + + { echo "$as_me:$LINENO: result: no" >&5 +echo "${ECHO_T}no" >&6; } + +else + echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + + { echo "$as_me:$LINENO: result: yes" >&5 +echo "${ECHO_T}yes" >&6; } + +cat >>confdefs.h <<\_ACEOF +#define DEAL_II_CONST_MEMBER_DEDUCTION_BUG 1 +_ACEOF + + fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext @@ -12214,7 +12295,7 @@ if test "x$enableshared" = "xyes" ; then OLD_LDFLAGS="$LDFLAGS" - LDFLAGS="-Wl,-soname,libbase.so.6.2.1 $LDFLAGS $SHLIBFLAGS" + LDFLAGS="-Wl,-soname,libbase.so.6.2.1 $LDFLAGS $LDFLAGSPIC $SHLIBFLAGS" { echo "$as_me:$LINENO: checking whether compiler understands option -Wl,-soname" >&5 echo $ECHO_N "checking whether compiler understands option -Wl,-soname... $ECHO_C" >&6; } cat >conftest.$ac_ext <<_ACEOF @@ -17536,8 +17617,7 @@ SHLIBLD!$SHLIBLD$ac_delim SHLIBFLAGS!$SHLIBFLAGS$ac_delim CC_VERSION!$CC_VERSION$ac_delim CFLAGSPIC!$CFLAGSPIC$ac_delim -enablemultithreading!$enablemultithreading$ac_delim -withmultithreading!$withmultithreading$ac_delim +enablethreads!$enablethreads$ac_delim CXXCPP!$CXXCPP$ac_delim GREP!$GREP$ac_delim EGREP!$EGREP$ac_delim @@ -17560,6 +17640,7 @@ DEAL_II_DISABLE_PARSER!$DEAL_II_DISABLE_PARSER$ac_delim DEAL_II_EXPAND_PETSC_VECTOR!$DEAL_II_EXPAND_PETSC_VECTOR$ac_delim DEAL_II_EXPAND_PETSC_BLOCKVECTOR!$DEAL_II_EXPAND_PETSC_BLOCKVECTOR$ac_delim USE_CONTRIB_PETSC!$USE_CONTRIB_PETSC$ac_delim +DEAL_II_PETSC_DIR!$DEAL_II_PETSC_DIR$ac_delim _ACEOF if test `sed -n "s/.*$ac_delim\$/X/p" conf$$subs.sed | grep -c X` = 97; then @@ -17601,7 +17682,6 @@ _ACEOF ac_delim='%!_!# ' for ac_last_try in false false false false false :; do cat >conf$$subs.sed <<_ACEOF -DEAL_II_PETSC_DIR!$DEAL_II_PETSC_DIR$ac_delim DEAL_II_PETSC_ARCH!$DEAL_II_PETSC_ARCH$ac_delim DEAL_II_PETSC_VERSION_MAJOR!$DEAL_II_PETSC_VERSION_MAJOR$ac_delim DEAL_II_PETSC_VERSION_MINOR!$DEAL_II_PETSC_VERSION_MINOR$ac_delim @@ -17647,7 +17727,7 @@ LIBOBJS!$LIBOBJS$ac_delim LTLIBOBJS!$LTLIBOBJS$ac_delim _ACEOF - if test `sed -n "s/.*$ac_delim\$/X/p" conf$$subs.sed | grep -c X` = 44; then + if test `sed -n "s/.*$ac_delim\$/X/p" conf$$subs.sed | grep -c X` = 43; then break elif $ac_last_try; then { { echo "$as_me:$LINENO: error: could not make $CONFIG_STATUS" >&5 diff --git a/deal.II/configure.in b/deal.II/configure.in index e958c5f49e..484e7dcef3 100644 --- a/deal.II/configure.in +++ b/deal.II/configure.in @@ -145,13 +145,8 @@ dnl Test whether multithreading support is requested. This dnl does not tell deal.II to actually use it, but the dnl compiler flags are set to allow for it. DEAL_II_CHECK_MULTITHREADING -DEAL_II_SET_MULTITHREADING_FLAGS DEAL_II_CHECK_PARTLY_BRACKETED_INITIALIZER -AC_SUBST(enablemultithreading) - -dnl Next also check whether the MT code shall be used through POSIX -DEAL_II_CHECK_USE_MT -AC_SUBST(withmultithreading) +AC_SUBST(enablethreads) @@ -192,6 +187,7 @@ DEAL_II_CHECK_SFINAE_BUG DEAL_II_CHECK_TEMPL_OP_DISAMBIGUATION_BUG DEAL_II_CHECK_ARRAY_CONDITIONAL_DECAY_BUG DEAL_II_CHECK_EXPLICIT_CONSTRUCTOR_BUG +DEAL_II_CHECK_CONST_MEMBER_DEDUCTION_BUG DEAL_II_CHECK_TYPE_QUALIFIER_BUG DEAL_II_CHECK_WSYNTH_AND_STD_COMPLEX DEAL_II_CHECK_CTOR_DTOR_PRIVACY diff --git a/deal.II/contrib/Makefile.in b/deal.II/contrib/Makefile.in index 92d11eb998..8772b62aab 100644 --- a/deal.II/contrib/Makefile.in +++ b/deal.II/contrib/Makefile.in @@ -7,7 +7,7 @@ include ../common/Make.global_options # by default make sublibs. the functionparser is not part of the # default make list, since it needs to be built *before* the rest of # this stuff as it is needed by libbase -default: hsl umfpack +default: hsl umfpack functionparser tbb ifeq ($(USE_CONTRIB_HSL),yes) @@ -38,8 +38,19 @@ functionparser: endif +ifeq ($(enable-threads),yes) +tbb: + @cd tbb ; $(MAKE) + +tbb-clean: + @cd tbb ; $(MAKE) clean +else +tbb tbb-clean: +endif + + # generic targets -clean: hsl-clean - @-rm -f ../lib/contrib/*/*.o +clean: hsl-clean tbb-clean + @-rm -f ../lib/contrib/*/*.o -.PHONY: default hsl clean umfpack functionparser +.PHONY: default hsl clean hsl-clean umfpack tbb tbb-clean functionparser diff --git a/deal.II/contrib/functionparser/Makefile b/deal.II/contrib/functionparser/Makefile index c289e91a78..8b83d5d999 100644 --- a/deal.II/contrib/functionparser/Makefile +++ b/deal.II/contrib/functionparser/Makefile @@ -1,19 +1,11 @@ # $Id$ -# Copyright (C) 2005 by Wolfgang Bangerth +# Copyright (C) 2005, 2008 by Wolfgang Bangerth D = ../.. include $D/common/Make.global_options -ifneq ($(with-multithreading),no) - MT = MT -else - MT = == -endif - - - # rules $(LIBDIR)/contrib/functionparser/fparser.$(OBJEXT): fparser.cc fparser.h @echo =====function parser==optimized==$(MT)== $( > &accessor, const InputVector &values, - Vector &local_values) + dealii::Vector &local_values) { typedef dealii::DoFAccessor > @@ -2114,7 +2114,7 @@ namespace internal void get_dof_values (const DoFCellAccessor > &accessor, const InputVector &values, - Vector &local_values) + dealii::Vector &local_values) { // no caching for hp::DoFHandler // implemented @@ -2137,7 +2137,7 @@ namespace internal static void set_dof_values (const DoFCellAccessor > &accessor, - const Vector &local_values, + const dealii::Vector &local_values, OutputVector &values) { typedef @@ -2167,7 +2167,7 @@ namespace internal static void set_dof_values (const DoFCellAccessor > &accessor, - const Vector &local_values, + const dealii::Vector &local_values, OutputVector &values) { // no caching for hp::DoFHandler diff --git a/deal.II/deal.II/include/grid/filtered_iterator.h b/deal.II/deal.II/include/grid/filtered_iterator.h index d13a3e6b19..effe6f6dd1 100644 --- a/deal.II/deal.II/include/grid/filtered_iterator.h +++ b/deal.II/deal.II/include/grid/filtered_iterator.h @@ -2,7 +2,7 @@ // $Id$ // Version: $Name$ // -// Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 by the deal.II authors +// Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2009 by the deal.II authors // // This file is subject to QPL and may not be distributed // without copyright and license information. Please refer @@ -406,9 +406,28 @@ class FilteredIterator : public BaseIterator * Constructor. Use the given * predicate for filtering and * initialize the iterator with - * the given value. This initial - * value has to satisfy the - * predicate. + * the given value. + * + * If the initial value @p bi does + * not satisfy the predicate @p p + * then it is advanced until we + * either hit the the + * past-the-end iterator, or the + * predicate is satisfied. This + * allows, for example, to write + * code like + * @code + * FilteredIterator::active_cell_iterator> + * cell (IteratorFilters::SubdomainEqualTo(13), + * triangulation.begin_active()); + * @endcode + * + * If the cell + * triangulation.begin_active() + * does not have a subdomain_id + * equal to 13, then the iterator + * will automatically be advanced + * to the first cell that has. */ template FilteredIterator (Predicate p, @@ -595,7 +614,7 @@ class FilteredIterator : public BaseIterator BaseIterator, << "The element " << arg1 << " with which you want to compare or which you want to" - << " assign is invalid since it does not satisfy the predicate."); + << " assign from is invalid since it does not satisfy the predicate."); private: @@ -712,6 +731,28 @@ class FilteredIterator : public BaseIterator +/** + * Create an object of type FilteredIterator given the base iterator + * and predicate. This function makes the creation of temporary + * objects (for example as function arguments) a lot simpler because + * one does not have to explicitly specify the type of the base + * iterator by hand -- it is deduced automatically here. + * + * @author Wolfgang Bangerth + * @relates FilteredIterator + */ +template +FilteredIterator +make_filtered_iterator (const BaseIterator &i, + const Predicate &p) +{ + FilteredIterator fi(p); + fi.set_to_next_positive (i); + return fi; +} + + + /* ------------------ Inline functions and templates ------------ */ @@ -736,8 +777,9 @@ FilteredIterator (Predicate p, BaseIterator (bi), predicate (new PredicateTemplate(p)) { - Assert ((this->state() != IteratorState::valid) || (*predicate) (*this), - ExcInvalidElement(bi)); + if ((this->state() == IteratorState::valid) && + ! (*predicate) (*this)) + set_to_next_positive (bi); } diff --git a/deal.II/deal.II/include/hp/fe_values.h b/deal.II/deal.II/include/hp/fe_values.h index d4cc487e70..0e91c643f7 100644 --- a/deal.II/deal.II/include/hp/fe_values.h +++ b/deal.II/deal.II/include/hp/fe_values.h @@ -2,7 +2,7 @@ // $Id$ // Version: $Name$ // -// Copyright (C) 2003, 2004, 2006, 2007 by the deal.II authors +// Copyright (C) 2003, 2004, 2006, 2007, 2008 by the deal.II authors // // This file is subject to QPL and may not be distributed // without copyright and license information. Please refer @@ -81,7 +81,15 @@ namespace internal FEValuesBase (const dealii::hp::FECollection &fe_collection, const dealii::hp::QCollection &q_collection, const UpdateFlags update_flags); - + + /** + * Get a reference to the + * collection of finite + * element objects used here. + */ + const dealii::hp::FECollection & + get_fe_collection () const; + /** * Return a reference to the @p * FEValues object selected by the @@ -1088,6 +1096,16 @@ namespace internal { return *fe_values_table(present_fe_values_index); } + + + + template + inline + const dealii::hp::FECollection & + FEValuesBase::get_fe_collection () const + { + return *fe_collection; + } } } diff --git a/deal.II/deal.II/include/numerics/data_out.h b/deal.II/deal.II/include/numerics/data_out.h index fbff012754..cf4ce13da2 100644 --- a/deal.II/deal.II/include/numerics/data_out.h +++ b/deal.II/deal.II/include/numerics/data_out.h @@ -2,7 +2,7 @@ // $Id$ // Version: $Name$ // -// Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 by the deal.II authors +// Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 by the deal.II authors // // This file is subject to QPL and may not be distributed // without copyright and license information. Please refer @@ -17,10 +17,13 @@ #include #include -#include #include #include #include +#include +#include +#include +#include #include #include @@ -42,45 +45,85 @@ namespace internal * structures are created * globally rather than on each * cell to avoid allocation of - * memory in the threads. + * memory in the threads. This is + * a base class for the + * AdditionalData kind of data + * structure discussed in the + * documentation of the + * WorkStream class. * - * The #cell_to_patch_index_map is - * an array that stores for index + * The + * cell_to_patch_index_map + * is an array that stores for index * [i][j] the number of the - * patch that associated with the - * cell with index @p j on level - * @p i. This information is set - * up prior to generation of the - * patches, and is needed to - * generate neighborship + * patch that associated with the cell + * with index @p j on level @p i. This + * information is set up prior to + * generation of the patches, and is + * needed to generate neighborship * information. * * This structure is used by * several of the DataOut* - * classes, not all of which use - * all fields. + * classes, which derived their + * own ParallelData classes from + * it for additional fields. */ template - struct ParallelData + struct ParallelDataBase { - unsigned int n_threads; - unsigned int this_thread; - unsigned int n_components; - unsigned int n_datasets; - unsigned int n_subdivisions; - unsigned int n_patches_per_circle; - SmartPointer > mapping; - std::vector patch_values; - std::vector > patch_values_system; - std::vector > patch_gradients; - std::vector > > patch_gradients_system; - std::vector > patch_hessians; - std::vector > > patch_hessians_system; - std::vector > dummy_normals; - std::vector > patch_normals; - std::vector > > postprocessed_values; - - std::vector > *cell_to_patch_index_map; + template + ParallelDataBase (const unsigned int n_components, + const unsigned int n_datasets, + const unsigned int n_subdivisions, + const unsigned int n_q_points, + const std::vector &n_postprocessor_outputs, + const FE &finite_elements); + + const unsigned int n_components; + const unsigned int n_datasets; + const unsigned int n_subdivisions; + + std::vector patch_values; + std::vector > patch_values_system; + std::vector > patch_gradients; + std::vector > > patch_gradients_system; + std::vector > patch_hessians; + std::vector > > patch_hessians_system; + std::vector > > postprocessed_values; + + const dealii::hp::FECollection fe_collection; + }; + + + /** + * A derived class for use in the + * DataOut class. This is + * a class for the + * AdditionalData kind of data + * structure discussed in the + * documentation of the + * WorkStream context. + */ + template + struct ParallelData : public ParallelDataBase + { + template + ParallelData (const Quadrature &quadrature, + const unsigned int n_components, + const unsigned int n_datasets, + const unsigned int n_subdivisions, + const std::vector &n_postprocessor_outputs, + const Mapping &mapping, + const std::vector > &cell_to_patch_index_map, + const FE &finite_elements, + const UpdateFlags update_flags); + + const dealii::hp::QCollection q_collection; + const dealii::hp::MappingCollection mapping_collection; + dealii::hp::FEValues x_fe_values; + + const std::vector > *cell_to_patch_index_map; }; } } @@ -1199,17 +1242,8 @@ class DataOut : public DataOut_DoFData * stored in * DataOutInterface::default_subdivisions * is to be used. - * - * The function supports - * multithreading, if deal.II is - * compiled in multithreading - * mode. The default number of - * threads to be used to build - * the patches is set to - * multithread_info.n_default_threads. */ - virtual void build_patches (const unsigned int n_subdivisions = 0, - const unsigned int n_threads = multithread_info.n_default_threads); + virtual void build_patches (const unsigned int n_subdivisions = 0); /** * Same as above, except that the @@ -1261,9 +1295,8 @@ class DataOut : public DataOut_DoFData * replaced by a hp::MappingCollection in * case of a hp::DoFHandler. */ - virtual void build_patches (const Mapping &mapping, + virtual void build_patches (const Mapping &mapping, const unsigned int n_subdivisions = 0, - const unsigned int n_threads = multithread_info.n_default_threads, const CurvedCellRegion curved_region = curved_boundary); /** @@ -1309,26 +1342,64 @@ class DataOut : public DataOut_DoFData private: /** - * Builds every @p n_threads's - * patch. This function may be - * called in parallel. - * If multithreading is not - * used, the function is called - * once and generates all patches. - */ - void build_some_patches (internal::DataOut::ParallelData &data); - - /** - * Store in which region cells shall be - * curved, if a Mapping is provided. + * Build one patch. This function + * is called in a WorkStream + * context. */ - CurvedCellRegion curved_cell_region; + void build_one_patch (const std::pair *cell_and_index, + internal::DataOut::ParallelData &data, + DataOutBase::Patch &patch, + const CurvedCellRegion curved_cell_region); }; // -------------------- template and inline functions ------------------------ + +namespace internal +{ + namespace DataOut + { + template + template + ParallelDataBase:: + ParallelDataBase (const unsigned int n_components, + const unsigned int n_datasets, + const unsigned int n_subdivisions, + const unsigned int n_q_points, + const std::vector &n_postprocessor_outputs, + const FE &finite_elements) + : + n_components (n_components), + n_datasets (n_datasets), + n_subdivisions (n_subdivisions), + patch_values (n_q_points), + patch_values_system (n_q_points), + patch_gradients (n_q_points), + patch_gradients_system (n_q_points), + patch_hessians (n_q_points), + patch_hessians_system (n_q_points), + postprocessed_values (n_postprocessor_outputs.size()), + fe_collection (finite_elements) + { + for (unsigned int k=0; k(n_postprocessor_outputs[dataset])); + } + } +} + + template template void diff --git a/deal.II/deal.II/include/numerics/data_out_faces.h b/deal.II/deal.II/include/numerics/data_out_faces.h index a7ea418032..c889a67ed7 100644 --- a/deal.II/deal.II/include/numerics/data_out_faces.h +++ b/deal.II/deal.II/include/numerics/data_out_faces.h @@ -2,7 +2,7 @@ // $Id$ // Version: $Name$ // -// Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2008 by the deal.II authors +// Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2008, 2009 by the deal.II authors // // This file is subject to QPL and may not be distributed // without copyright and license information. Please refer @@ -23,6 +23,40 @@ DEAL_II_NAMESPACE_OPEN +namespace internal +{ + namespace DataOutFaces + { + /** + * A derived class for use in the + * DataOutFaces class. This is + * a class for the + * AdditionalData kind of data + * structure discussed in the + * documentation of the + * WorkStream context. + */ + template + struct ParallelData : public internal::DataOut::ParallelDataBase + { + template + ParallelData (const Quadrature &quadrature, + const unsigned int n_components, + const unsigned int n_datasets, + const unsigned int n_subdivisions, + const std::vector &n_postprocessor_outputs, + const FE &finite_elements, + const UpdateFlags update_flags); + + const dealii::hp::QCollection q_collection; + dealii::hp::FEFaceValues x_fe_values; + + std::vector > patch_normals; + }; + } +} + + /** * This class generates output from faces of a triangulation rather * than from cells, as do for example the DataOut and @@ -110,8 +144,7 @@ class DataOutFaces : public DataOut_DoFDatamultithread_info.n_default_threads. */ virtual void - build_patches (const unsigned int n_subdivisions = 0, - const unsigned int n_threads = multithread_info.n_default_threads); + build_patches (const unsigned int n_subdivisions = 0); /** * Declare a way to describe a @@ -182,14 +215,13 @@ class DataOutFaces : public DataOut_DoFData &data); + void build_one_patch (const FaceDescriptor *cell_and_face, + internal::DataOutFaces::ParallelData &data, + DataOutBase::Patch &patch); }; diff --git a/deal.II/deal.II/include/numerics/data_out_rotation.h b/deal.II/deal.II/include/numerics/data_out_rotation.h index 65b1565a92..af8d501a9e 100644 --- a/deal.II/deal.II/include/numerics/data_out_rotation.h +++ b/deal.II/deal.II/include/numerics/data_out_rotation.h @@ -2,7 +2,7 @@ // $Id$ // Version: $Name$ // -// Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2008 by the deal.II authors +// Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2008, 2009 by the deal.II authors // // This file is subject to QPL and may not be distributed // without copyright and license information. Please refer @@ -23,6 +23,42 @@ DEAL_II_NAMESPACE_OPEN +namespace internal +{ + namespace DataOutRotation + { + /** + * A derived class for use in the + * DataOutFaces class. This is + * a class for the + * AdditionalData kind of data + * structure discussed in the + * documentation of the + * WorkStream class. + */ + template + struct ParallelData : public internal::DataOut::ParallelDataBase + { + template + ParallelData (const Quadrature &quadrature, + const unsigned int n_components, + const unsigned int n_datasets, + const unsigned int n_subdivisions, + const unsigned int n_patches_per_circle, + const std::vector &n_postprocessor_outputs, + const FE &finite_elements, + const UpdateFlags update_flags); + + const unsigned int n_patches_per_circle; + + const dealii::hp::QCollection q_collection; + dealii::hp::FEValues x_fe_values; + }; + } +} + + + /** * This class generates output in the full domain of computations that * were done using rotational symmetry of domain and solution. In @@ -119,8 +155,7 @@ class DataOutRotation : public DataOut_DoFData * multithread_info.n_default_threads. */ virtual void build_patches (const unsigned int n_patches_per_circle, - const unsigned int n_subdivisions = 0, - const unsigned int n_threads = multithread_info.n_default_threads); + const unsigned int n_subdivisions = 0); /** * Return the first cell which we @@ -177,7 +212,10 @@ class DataOutRotation : public DataOut_DoFData * used, the function is called * once and generates all patches. */ - void build_some_patches (internal::DataOut::ParallelData &data); + void + build_one_patch (const cell_iterator *cell, + internal::DataOutRotation::ParallelData &data, + std::vector > &patches); }; diff --git a/deal.II/deal.II/include/numerics/error_estimator.h b/deal.II/deal.II/include/numerics/error_estimator.h index 71c0333dae..46ae47d84f 100644 --- a/deal.II/deal.II/include/numerics/error_estimator.h +++ b/deal.II/deal.II/include/numerics/error_estimator.h @@ -32,9 +32,6 @@ namespace hp { template class DoFHandler; template class QCollection; - template class MappingCollection; - template class FEFaceValues; - template class FESubfaceValues; } @@ -256,19 +253,6 @@ class KellyErrorEstimator * entries, or an empty * bit-vector. * - * The estimator supports - * multithreading and splits the - * cells to - * multithread_info.n_default_threads - * (default) threads. The number - * of threads to be used in - * multithreaded mode can be set - * with the last parameter of the - * error estimator. - * Multithreading is only - * implemented in two and three - * dimensions. - * * The @p subdomain_id parameter * indicates whether we shall compute * indicators for all cells (in case its @@ -307,6 +291,16 @@ class KellyErrorEstimator * indicators will only be computed for * cells with this particular material * id. + * + * The @p n_threads parameter used to + * indicate the number of threads to be + * used to compute the error + * estimator. This parameter is now + * ignored, with the number of threads + * determined automatically. The + * parameter is retained for + * compatibility with old versions of the + * library. */ template static void estimate (const Mapping &mapping, @@ -505,280 +499,6 @@ class KellyErrorEstimator * Exception */ DeclException0 (ExcNoSolutions); - - private: - - - /** - * Declare a data type to - * represent the mapping between - * faces and integrated jumps of - * gradients of each of the - * solution vectors. See the - * general documentation of this - * class for more information. - */ - template - struct FaceIntegrals - { - typedef - std::map > - type; - }; - - - /** - * All small temporary data - * objects that are needed once - * per thread by the several - * functions of the error - * estimator are gathered in this - * struct. The reason for this - * structure is mainly that we - * have a number of functions - * that operate on cells or faces - * and need a number of small - * temporary data objects. Since - * these functions may run in - * parallel, we cannot make these - * objects member variables of - * the enclosing class. On the - * other hand, declaring them - * locally in each of these - * functions would require their - * reallocating every time we - * visit the next cell or face, - * which we found can take a - * significant amount of time if - * it happens often even in the - * single threaded case (10-20 - * per cent in our measurements); - * however, most importantly, - * memory allocation requires - * synchronisation in - * multithreaded mode. While that - * is done by the C++ library and - * has not to be handcoded, it - * nevertheless seriously damages - * the ability to efficiently run - * the functions of this class in - * parallel, since they are quite - * often blocked by these - * synchronisation points, - * slowing everything down by a - * factor of two or three. - * - * Thus, every thread gets an - * instance of this class to work - * with and needs not allocate - * memory itself, or synchronise - * with other threads. - * - * The sizes of the arrays are - * initialized with the maximal number of - * entries necessary for the hp - * case. Within the loop over individual - * cells, we then resize the arrays as - * necessary. Since for std::vector - * resizing to a smaller size doesn't - * imply memory allocation, this is fast. - */ - struct PerThreadData - { - /** - * A vector to store the jump - * of the normal vectors in - * the quadrature points for - * each of the solution - * vectors (i.e. a temporary - * value). This vector is not - * allocated inside the - * functions that use it, but - * rather globally, since - * memory allocation is slow, - * in particular in presence - * of multiple threads where - * synchronisation makes - * things even slower. - */ - std::vector > > phi; - - /** - * A vector for the gradients of - * the finite element function - * on one cell - * - * Let psi be a short name - * for a grad u_h, where - * the third index be the - * component of the finite - * element, and the second - * index the number of the - * quadrature point. The - * first index denotes the - * index of the solution - * vector. - */ - std::vector > > > psi; - - /** - * The same vector for a neighbor cell - */ - std::vector > > > neighbor_psi; - - /** - * The normal vectors of the finite - * element function on one face - */ - std::vector > normal_vectors; - - /** - * Two arrays needed for the - * values of coefficients in - * the jumps, if they are - * given. - */ - std::vector coefficient_values1; - std::vector > coefficient_values; - - /** - * Array for the products of - * Jacobian determinants and - * weights of quadraturs - * points. - */ - std::vector JxW_values; - - /** - * The subdomain id we are to care - * for. - */ - const unsigned int subdomain_id; - /** - * The material id we are to care - * for. - */ - const unsigned int material_id; - - /** - * Constructor. - */ - PerThreadData (const unsigned int n_solution_vectors, - const unsigned int n_components, - const unsigned int max_n_q_points, - const unsigned int subdomain_id, - const unsigned int material_id); - - /** - * Constructor. - */ - void resize (const unsigned int n_components, - const unsigned int max_n_q_points); - }; - - - /** - * Computates the error on all cells - * of the domain with the number n, - * satisfying - * n=this_thread (mod n_threads) - * This enumeration is chosen to - * generate a random distribution - * of all cells. - * - * This function is only needed - * in two or three dimensions. - * The error estimator in one - * dimension is implemented - * seperatly. - */ - template - static void estimate_some (const hp::MappingCollection &mapping, - const DH &dof_handler, - const hp::QCollection &quadrature, - const typename FunctionMap::type &neumann_bc, - const std::vector &solutions, - const std::pair*,const Function*> &component_mask_and_coefficients, - const std::pair this_thread, - typename FaceIntegrals::type &face_integrals, - PerThreadData &per_thread_data); - - /** - * Actually do the computation on - * a face which has no hanging - * nodes (it is regular), i.e. - * either on the other side there - * is nirvana (face is at - * boundary), or the other side's - * refinement level is the same - * as that of this side, then - * handle the integration of - * these both cases together. - * - * The meaning of the parameters - * becomes clear when looking at - * the source code. This function - * is only externalized from - * @p estimate_error to avoid - * ending up with a function of - * 500 lines of code. - */ - template - static - void - integrate_over_regular_face (const DH &dof_handler, - const hp::QCollection &quadrature, - const typename FunctionMap::type &neumann_bc, - const std::vector &solutions, - const std::vector &component_mask, - const Function *coefficients, - typename FaceIntegrals::type &face_integrals, - PerThreadData &per_thread_data, - const typename DH::active_cell_iterator &cell, - const unsigned int face_no, - hp::FEFaceValues &fe_face_values_cell, - hp::FEFaceValues &fe_face_values_neighbor); - - - /** - * The same applies as for the - * function above, except that - * integration is over face - * @p face_no of @p cell, where - * the respective neighbor is - * refined, so that the - * integration is a bit more - * complex. - */ - template - static - void - integrate_over_irregular_face (const DH &dof_handler, - const hp::QCollection &quadrature, - const std::vector &solutions, - const std::vector &component_mask, - const Function *coefficients, - typename FaceIntegrals::type &face_integrals, - PerThreadData &per_thread_data, - const typename DH::active_cell_iterator &cell, - const unsigned int face_no, - hp::FEFaceValues &fe_face_values, - hp::FESubfaceValues &fe_subface_values); - - /** - * By the resolution of Defect - * Report 45 to the ISO C++ 1998 - * standard, nested classes - * automatically have access to - * members of the enclosing - * class. Nevertheless, some - * compilers don't implement this - * resolution yet, so we have to - * make them @p friend, which - * doesn't hurt on the other - * compilers as well. - */ - friend class PerThreadData; }; diff --git a/deal.II/deal.II/include/numerics/matrices.h b/deal.II/deal.II/include/numerics/matrices.h index a19accf838..4374632304 100644 --- a/deal.II/deal.II/include/numerics/matrices.h +++ b/deal.II/deal.II/include/numerics/matrices.h @@ -629,160 +629,6 @@ class MatrixCreator active_cell_iterator first, second; }; - - /** - * Version of the same function - * (without suffix @p _1) with - * the same argument list that - * operates only on an interval - * of iterators. Used for - * parallelization. The mutex is - * used to synchronise access to - * the matrix. - */ - template - static - void create_mass_matrix_1 (const Mapping &mapping, - const DoFHandler &dof, - const Quadrature &q, - SparseMatrix &matrix, - const Function * const a, - const IteratorRange > range, - Threads::ThreadMutex &mutex); - - /** - * Version of the same function - * (without suffix @p _2) with - * the same argument list that - * operates only on an interval - * of iterators. Used for - * parallelization. The mutex is - * used to synchronise access to - * the matrix. - */ - template - static - void create_mass_matrix_2 (const Mapping &mapping, - const DoFHandler &dof, - const Quadrature &q, - SparseMatrix &matrix, - const Function &rhs, - Vector &rhs_vector, - const Function * const a, - const IteratorRange > range, - Threads::ThreadMutex &mutex); - - /** - * Same function as above, but for hp - * objects. - */ - template - static - void create_mass_matrix_1 (const hp::MappingCollection &mapping, - const hp::DoFHandler &dof, - const hp::QCollection &q, - SparseMatrix &matrix, - const Function * const a, - const IteratorRange > range, - Threads::ThreadMutex &mutex); - - template - static - void create_mass_matrix_2 (const hp::MappingCollection &mapping, - const hp::DoFHandler &dof, - const hp::QCollection &q, - SparseMatrix &matrix, - const Function &rhs, - Vector &rhs_vector, - const Function * const a, - const IteratorRange > range, - Threads::ThreadMutex &mutex); - - -/** - * Version of the same function - * (without suffix @p _1) with - * the same argument list that - * operates only on an interval - * of iterators. Used for - * parallelization. The mutex is - * used to synchronise access to - * the matrix. - */ - template - static - void create_laplace_matrix_1 (const Mapping &mapping, - const DoFHandler &dof, - const Quadrature &q, - SparseMatrix &matrix, - const Function * const a, - const IteratorRange > range, - Threads::ThreadMutex &mutex); - - /** - * Version of the same function - * (without suffix @p _2) with - * the same argument list that - * operates only on an interval - * of iterators. Used for - * parallelization. The mutex is - * used to synchronise access to - * the matrix. - */ - template - static - void create_laplace_matrix_2 (const Mapping &mapping, - const DoFHandler &dof, - const Quadrature &q, - SparseMatrix &matrix, - const Function &rhs, - Vector &rhs_vector, - const Function * const a, - const IteratorRange > range, - Threads::ThreadMutex &mutex); - - /** - * Version of the same function - * (without suffix @p _1) with - * the same argument list that - * operates only on an interval - * of iterators. Used for - * parallelization. The mutex is - * used to synchronise access to - * the matrix. - */ - template - static - void create_laplace_matrix_1 (const hp::MappingCollection &mapping, - const hp::DoFHandler &dof, - const hp::QCollection &q, - SparseMatrix &matrix, - const Function * const a, - const IteratorRange > range, - Threads::ThreadMutex &mutex); - - /** - * Version of the same function - * (without suffix @p _2) with - * the same argument list that - * operates only on an interval - * of iterators. Used for - * parallelization. The mutex is - * used to synchronise access to - * the matrix. - */ - template - static - void create_laplace_matrix_2 (const hp::MappingCollection &mapping, - const hp::DoFHandler &dof, - const hp::QCollection &q, - SparseMatrix &matrix, - const Function &rhs, - Vector &rhs_vector, - const Function * const a, - const IteratorRange > range, - Threads::ThreadMutex &mutex); - /** * Version of the same function * (without suffix @p _1) with diff --git a/deal.II/deal.II/include/numerics/vectors.templates.h b/deal.II/deal.II/include/numerics/vectors.templates.h index 1a883eb954..b934d2f490 100644 --- a/deal.II/deal.II/include/numerics/vectors.templates.h +++ b/deal.II/deal.II/include/numerics/vectors.templates.h @@ -3507,18 +3507,18 @@ namespace internal const unsigned int max_n_q_points = q.max_n_quadrature_points (); - std::vector< Vector > - function_values (max_n_q_points, Vector(n_components)); + std::vector< dealii::Vector > + function_values (max_n_q_points, dealii::Vector(n_components)); std::vector > > function_grads (max_n_q_points, std::vector >(n_components)); std::vector weight_values (max_n_q_points); - std::vector > - weight_vectors (max_n_q_points, Vector(n_components)); + std::vector > + weight_vectors (max_n_q_points, dealii::Vector(n_components)); - std::vector > - psi_values (max_n_q_points, Vector(n_components)); + std::vector > + psi_values (max_n_q_points, dealii::Vector(n_components)); std::vector > > psi_grads (max_n_q_points, std::vector >(n_components)); std::vector @@ -3547,16 +3547,16 @@ namespace internal // quadrature points we use // for the present cell function_values.resize (n_q_points, - Vector(n_components)); + dealii::Vector(n_components)); function_grads.resize (n_q_points, std::vector >(n_components)); weight_values.resize (n_q_points); weight_vectors.resize (n_q_points, - Vector(n_components)); + dealii::Vector(n_components)); psi_values.resize (n_q_points, - Vector(n_components)); + dealii::Vector(n_components)); psi_grads.resize (n_q_points, std::vector >(n_components)); psi_scalar.resize (n_q_points); diff --git a/deal.II/deal.II/source/dofs/dof_renumbering.cc b/deal.II/deal.II/source/dofs/dof_renumbering.cc index 67d3c39526..a79a813d00 100644 --- a/deal.II/deal.II/source/dofs/dof_renumbering.cc +++ b/deal.II/deal.II/source/dofs/dof_renumbering.cc @@ -563,7 +563,7 @@ namespace DoFRenumbering Assert (result == dof_handler.n_dofs(level), ExcRenumberingIncomplete()); - + if (renumbering.size()!=0) dof_handler.renumber_dofs (level, renumbering); } @@ -575,19 +575,23 @@ namespace DoFRenumbering component_wise (MGDoFHandler &dof_handler, const std::vector &component_order_arg) { - Threads::ThreadGroup<> threads; - + // renumber the non-MG part of + // the DoFHandler in parallel to + // the MG part. Because + // MGDoFHandler::renumber_dofs + // uses the user flags we can't + // run renumbering on individual + // levels in parallel to the + // other levels void (*non_mg_part) (DoFHandler &, const std::vector &) = &component_wise; - void (*mg_part) (MGDoFHandler &, unsigned int, const std::vector &) - = &component_wise; - - threads += Threads::spawn (non_mg_part) (static_cast&> (dof_handler), - component_order_arg); + Threads::Task<> + task = Threads::new_task (non_mg_part, dof_handler, component_order_arg); + for (unsigned int level=0; level > component_select (n_components, std::vector(n_components, false)); - Threads::ThreadGroup<> threads; + Threads::TaskGroup<> tasks; for (unsigned int i=0; i &, @@ -4008,10 +4008,11 @@ DoFTools::count_dofs_per_component ( bool) = &DoFTools::template extract_dofs; component_select[i][i] = true; - threads += Threads::spawn (fun_ptr)(dof_handler, component_select[i], - dofs_in_component[i], false); - }; - threads.join_all (); + tasks += Threads::new_task (fun_ptr, + dof_handler, component_select[i], + dofs_in_component[i], false); + } + tasks.join_all (); // next count what we got unsigned int component = 0; @@ -4099,7 +4100,7 @@ DoFTools::count_dofs_per_block ( dofs_in_block (n_blocks, std::vector(dof_handler.n_dofs(), false)); std::vector > block_select (n_blocks, std::vector(n_blocks, false)); - Threads::ThreadGroup<> threads; + Threads::TaskGroup<> tasks; for (unsigned int i=0; i &, @@ -4108,10 +4109,11 @@ DoFTools::count_dofs_per_block ( bool) = &DoFTools::template extract_dofs; block_select[i][i] = true; - threads += Threads::spawn (fun_ptr)(dof_handler, block_select[i], - dofs_in_block[i], true); + tasks += Threads::new_task (fun_ptr, + dof_handler, block_select[i], + dofs_in_block[i], true); }; - threads.join_all (); + tasks.join_all (); // next count what we got for (unsigned int block=0;block > parameter_dofs (coarse_dofs_per_cell_component, - Vector(fine_dofs_per_cell)); + std::vector > + parameter_dofs (coarse_dofs_per_cell_component, + dealii::Vector(fine_dofs_per_cell)); // for each coarse dof: find its // position within the fine element // and set this value to one in the @@ -4399,7 +4402,7 @@ namespace internal const dealii::DoFHandler &coarse_grid, const unsigned int coarse_component, const InterGridMap > &coarse_to_fine_grid_map, - const std::vector > ¶meter_dofs, + const std::vector > ¶meter_dofs, const std::vector &weight_mapping, std::vector > &weights, const typename dealii::DoFHandler::active_cell_iterator &begin, @@ -4479,7 +4482,7 @@ namespace internal // coarse grid (for the selected fe) // on the fine grid const unsigned int n_fine_dofs = weight_mapping.size(); - Vector global_parameter_representation (n_fine_dofs); + dealii::Vector global_parameter_representation (n_fine_dofs); typename dealii::DoFHandler::active_cell_iterator cell; std::vector parameter_dof_indices (coarse_fe.dofs_per_cell); @@ -4590,7 +4593,7 @@ namespace internal const dealii::DoFHandler &coarse_grid, const unsigned int coarse_component, const InterGridMap > &coarse_to_fine_grid_map, - const std::vector > ¶meter_dofs, + const std::vector > ¶meter_dofs, const std::vector &weight_mapping, std::vector > &weights) { @@ -4602,25 +4605,27 @@ namespace internal coarse_grid.end(), multithread_info.n_default_threads); - Threads::ThreadGroup<> threads; +//TODO: use WorkStream here + Threads::TaskGroup<> tasks; void (*fun_ptr) (const dealii::DoFHandler &, const unsigned int , const InterGridMap > &, - const std::vector > &, + const std::vector > &, const std::vector &, std::vector > &, const typename dealii::DoFHandler::active_cell_iterator &, const typename dealii::DoFHandler::active_cell_iterator &) = &compute_intergrid_weights_3; for (unsigned int i=0; i &tria, us[i].reinit (dof_handler.n_dofs()); // solve linear systems in parallel - Threads::ThreadGroup<> threads; + Threads::TaskGroup<> tasks; for (unsigned int i=0; i &accessor, const int level, - const Vector &values, - Vector &dof_values) + const dealii::Vector &values, + dealii::Vector &dof_values) { const unsigned int dofs_per_vertex = accessor.get_dof_handler().get_fe().dofs_per_vertex, dofs_per_line = accessor.get_dof_handler().get_fe().dofs_per_line; - typename Vector::iterator next_dof_value=dof_values.begin(); + typename dealii::Vector::iterator next_dof_value=dof_values.begin(); for (unsigned int vertex=0; vertex<2; ++vertex) for (unsigned int d=0; d &accessor, const int level, - const Vector &values, - Vector &dof_values) + const dealii::Vector &values, + dealii::Vector &dof_values) { const unsigned int dofs_per_vertex = accessor.get_dof_handler().get_fe().dofs_per_vertex, dofs_per_line = accessor.get_dof_handler().get_fe().dofs_per_line, dofs_per_quad = accessor.get_dof_handler().get_fe().dofs_per_quad; - typename Vector::iterator next_dof_value=dof_values.begin(); + typename dealii::Vector::iterator next_dof_value=dof_values.begin(); for (unsigned int vertex=0; vertex<4; ++vertex) for (unsigned int d=0; d &accessor, const int level, - const Vector &values, - Vector &dof_values) + const dealii::Vector &values, + dealii::Vector &dof_values) { const unsigned int dofs_per_vertex = accessor.get_dof_handler().get_fe().dofs_per_vertex, dofs_per_line = accessor.get_dof_handler().get_fe().dofs_per_line, dofs_per_quad = accessor.get_dof_handler().get_fe().dofs_per_quad, dofs_per_hex = accessor.get_dof_handler().get_fe().dofs_per_hex; - typename Vector::iterator next_dof_value=dof_values.begin(); + typename dealii::Vector::iterator next_dof_value=dof_values.begin(); for (unsigned int vertex=0; vertex<8; ++vertex) for (unsigned int d=0; d > component_select (n_components, std::vector(n_components, false)); - Threads::ThreadGroup<> threads; + Threads::TaskGroup<> tasks; for (unsigned int i=0; i; component_select[i][i] = true; - threads += Threads::spawn (fun_ptr)(l, dof_handler, component_select[i], - dofs_in_component[i], false); + tasks += Threads::new_task (fun_ptr, + l, dof_handler, + component_select[i], + dofs_in_component[i], false); } - threads.join_all(); + tasks.join_all(); // next count what we got unsigned int component = 0; @@ -1067,7 +1069,7 @@ MGTools::count_dofs_per_block ( dofs_in_block (n_blocks, std::vector(dof_handler.n_dofs(l), false)); std::vector > block_select (n_blocks, std::vector(n_blocks, false)); - Threads::ThreadGroup<> threads; + Threads::TaskGroup<> tasks; for (unsigned int i=0; i; block_select[i][i] = true; - threads += Threads::spawn (fun_ptr)(l, dof_handler, block_select[i], - dofs_in_block[i], true); + tasks += Threads::new_task (fun_ptr, + l, dof_handler, block_select[i], + dofs_in_block[i], true); }; - threads.join_all (); + tasks.join_all (); // next count what we got for (unsigned int block=0;block +#include #include -#include #include #include #include @@ -35,6 +35,59 @@ DEAL_II_NAMESPACE_OPEN +namespace internal +{ + namespace DataOut + { + template + template + ParallelData:: + ParallelData (const Quadrature &quadrature, + const unsigned int n_components, + const unsigned int n_datasets, + const unsigned int n_subdivisions, + const std::vector &n_postprocessor_outputs, + const Mapping &mapping, + const std::vector > &cell_to_patch_index_map, + const FE &finite_elements, + const UpdateFlags update_flags) + : + ParallelDataBase (n_components, + n_datasets, + n_subdivisions, + quadrature.n_quadrature_points, + n_postprocessor_outputs, + finite_elements), + q_collection (quadrature), + mapping_collection (mapping), + x_fe_values (this->mapping_collection, + this->fe_collection, + q_collection, + update_flags), + cell_to_patch_index_map (&cell_to_patch_index_map) + {} + + + + + /** + * In a WorkStream context, use + * this function to append the + * patch computed by the parallel + * stage to the array of patches. + */ + template + void + append_patch_to_list (const DataOutBase::Patch &patch, + std::vector > &patches) + { + patches.push_back (patch); + patches.back().patch_index = patches.size()-1; + } + } +} + + template DataOut_DoFData:: DataEntryBase::DataEntryBase (const std::vector &names_in, @@ -618,380 +671,272 @@ DataOut_DoFData::memory_consumption () const /* ---------------------------------------------------------------------- */ - template -void DataOut::build_some_patches (internal::DataOut::ParallelData &data) -{ - // Check consistency of redundant - // template parameter - Assert (dim==DH::dimension, ExcDimensionMismatch(dim, DH::dimension)); - - QTrapez<1> q_trapez; - QIterated patch_points (q_trapez, data.n_subdivisions); - -// We use the mapping to transform the vertex coordinates and the shape -// functions (necessary for example for Raviart-Thomas elements). On the -// boundary, general mappings do not reduce to a MappingQ1, therefore the mapped -// (quadrature) points are stored in the patch, whereas for cells in the -// interior of the domain these points are obtained by a dim-linear mapping and -// can be recovered from the vertices later on, thus they need not to be stored. - - // create collection objects from - // single quadratures, - // mappings and finite elements. if we have - // an hp DoFHandler, - // dof_handler.get_fe() returns a - // collection of which we do a - // shallow copy instead - const hp::QCollection q_collection (patch_points); - const hp::FECollection fe_collection(this->dofs->get_fe()); - const hp::MappingCollection mapping_collection(*(data.mapping)); - - UpdateFlags update_flags=update_values; - if (curved_cell_region != no_curved_cells) - update_flags |= update_quadrature_points; - - for (unsigned int i=0; idof_data.size(); ++i) - if (this->dof_data[i]->postprocessor) - update_flags |= this->dof_data[i]->postprocessor->get_needed_update_flags(); - // perhaps update_normal_vectors is present, - // which would only be useful on faces, but - // we may not use it here. - Assert (!(update_flags & update_normal_vectors), - ExcMessage("The update of normal vectors may not be requested for evaluation of data on cells via DataPostprocessor.")); - - hp::FEValues x_fe_patch_values (mapping_collection, - fe_collection, - q_collection, - update_flags); - - const unsigned int n_q_points = patch_points.n_quadrature_points; - - typename std::vector< dealii::DataOutBase::Patch >::iterator - patch = this->patches.begin(); - cell_iterator cell=first_cell(); - - // keep track of the index of the - // current cell so we can - // efficiently evaluate cell-based - // data (as opposed to DoF-based - // data). we do so only if - // this->cell_data.size() != 0 - unsigned int cell_index = (this->cell_data.size() != 0 - ? - std::distance (this->dofs->begin_active(), - active_cell_iterator (cell)) - : - numbers::invalid_unsigned_int); - - - // get first cell in this thread - for (unsigned int i=0; (idofs->end()); ++i) - { - ++patch; - - const cell_iterator old_cell = cell; +void +DataOut:: +build_one_patch (const std::pair *cell_and_index, + internal::DataOut::ParallelData &data, + DataOutBase::Patch &patch, + const CurvedCellRegion curved_cell_region) +{ + // use ucd_to_deal map as patch + // vertices are in the old, + // unnatural ordering + for (unsigned int vertex=0; vertex::vertices_per_cell; ++vertex) + patch.vertices[vertex] = data.mapping_collection[0].transform_unit_to_real_cell + (cell_and_index->first, + GeometryInfo::unit_cell_vertex (vertex)); - cell = next_cell(cell); - - if (this->cell_data.size() != 0) - cell_index += std::distance (active_cell_iterator(old_cell), - active_cell_iterator(cell)); - } - - // now loop over all cells and - // actually create the patches - for (; cell != this->dofs->end();) + if (data.n_datasets > 0) { - Assert (patch != this->patches.end(), ExcInternalError()); - - // use ucd_to_deal map as patch - // vertices are in the old, - // unnatural ordering - for (unsigned int vertex=0; vertex::vertices_per_cell; ++vertex) - patch->vertices[vertex] = data.mapping->transform_unit_to_real_cell - (cell, GeometryInfo::unit_cell_vertex (vertex)); + data.x_fe_values.reinit (cell_and_index->first); + const FEValues &fe_patch_values + = data.x_fe_values.get_present_fe_values (); + + const unsigned int n_q_points = fe_patch_values.n_quadrature_points; - if (data.n_datasets > 0) + // depending on the requested output + // of curved cells, if necessary + // append the quadrature points to + // the last rows of the patch.data + // member. THis is the case if we + // want to produce curved cells at + // the boundary and this cell + // actually is at the boundary, or + // else if we want to produce curved + // cells everywhere + if (curved_cell_region==curved_inner_cells || + (curved_cell_region==curved_boundary && cell_and_index->first->at_boundary())) { - x_fe_patch_values.reinit (cell); - const FEValues &fe_patch_values - = x_fe_patch_values.get_present_fe_values (); - - // depending on the requested output - // of curved cells, if necessary - // append the quadrature points to - // the last rows of the patch->data - // member. THis is the case if we - // want to produce curved cells at - // the boundary and this cell - // actually is at the boundary, or - // else if we want to produce curved - // cells everywhere - if (curved_cell_region==curved_inner_cells || - (curved_cell_region==curved_boundary && cell->at_boundary())) - { - Assert(patch->space_dim==dim, ExcInternalError()); - const std::vector > & q_points=fe_patch_values.get_quadrature_points(); - // resize the patch->data member - // in order to have enough memory - // for the quadrature points as - // well - patch->data.reinit(patch->data.size(0)+dim,patch->data.size(1)); - // set the flag indicating that - // for this cell the points are - // explicitly given - patch->points_are_available=true; - // copy points to patch->data - for (unsigned int i=0; idata(patch->data.size(0)-dim+i,q)=q_points[q][i]; - } + Assert(patch.space_dim==dim, ExcInternalError()); + const std::vector > & q_points=fe_patch_values.get_quadrature_points(); + // resize the patch.data member + // in order to have enough memory + // for the quadrature points as + // well + patch.data.reinit (data.n_datasets+DH::space_dimension, n_q_points); + // set the flag indicating that + // for this cell the points are + // explicitly given + patch.points_are_available=true; + // copy points to patch.data + for (unsigned int i=0; idof_data.size(); ++dataset) - { - const DataPostprocessor *postprocessor=this->dof_data[dataset]->postprocessor; + // first fill dof_data + for (unsigned int dataset=0; datasetdof_data.size(); ++dataset) + { + const DataPostprocessor *postprocessor=this->dof_data[dataset]->postprocessor; - if (postprocessor != 0) + if (postprocessor != 0) + { + // we have to postprocess the + // data, so determine, which + // fields have to be updated + const UpdateFlags update_flags=postprocessor->get_needed_update_flags(); + if (data.n_components == 1) { - // we have to postprocess the - // data, so determine, which - // fields have to be updated - const UpdateFlags update_flags=postprocessor->get_needed_update_flags(); - if (data.n_components == 1) - { - // at each point there is - // only one component of - // value, gradient etc. - if (update_flags & update_values) - this->dof_data[dataset]->get_function_values (fe_patch_values, - data.patch_values); - if (update_flags & update_gradients) - this->dof_data[dataset]->get_function_gradients (fe_patch_values, - data.patch_gradients); - if (update_flags & update_hessians) - this->dof_data[dataset]->get_function_hessians (fe_patch_values, - data.patch_hessians); - postprocessor-> - compute_derived_quantities_scalar(data.patch_values, - data.patch_gradients, - data.patch_hessians, - data.dummy_normals, - data.postprocessed_values[dataset]); - } - else - { - // at each point there is - // a vector valued - // function and its - // derivative... - if (update_flags & update_values) - this->dof_data[dataset]->get_function_values (fe_patch_values, - data.patch_values_system); - if (update_flags & update_gradients) - this->dof_data[dataset]->get_function_gradients (fe_patch_values, - data.patch_gradients_system); - if (update_flags & update_hessians) - this->dof_data[dataset]->get_function_hessians (fe_patch_values, - data.patch_hessians_system); - postprocessor-> - compute_derived_quantities_vector(data.patch_values_system, - data.patch_gradients_system, - data.patch_hessians_system, - data.dummy_normals, - data.postprocessed_values[dataset]); - } - - for (unsigned int q=0; qdof_data[dataset]->n_output_variables;++component) - patch->data(offset+component,q)= data.postprocessed_values[dataset][q](component); - } - else - // now we use the given data - // vector without - // modifications. again, we - // treat single component - // functions separately for - // efficiency reasons. - if (data.n_components == 1) - { + // at each point there is + // only one component of + // value, gradient etc. + if (update_flags & update_values) this->dof_data[dataset]->get_function_values (fe_patch_values, data.patch_values); - for (unsigned int q=0; qdata(offset,q) = data.patch_values[q]; - } - else - { + if (update_flags & update_gradients) + this->dof_data[dataset]->get_function_gradients (fe_patch_values, + data.patch_gradients); + if (update_flags & update_hessians) + this->dof_data[dataset]->get_function_hessians (fe_patch_values, + data.patch_hessians); + std::vector > dummy_normals; + postprocessor-> + compute_derived_quantities_scalar(data.patch_values, + data.patch_gradients, + data.patch_hessians, + dummy_normals, + data.postprocessed_values[dataset]); + } + else + { + // at each point there is + // a vector valued + // function and its + // derivative... + if (update_flags & update_values) this->dof_data[dataset]->get_function_values (fe_patch_values, data.patch_values_system); - for (unsigned int component=0; componentdata(offset+component,q) = - data.patch_values_system[q](component); - } - // increment the counter for the - // actual data record - offset+=this->dof_data[dataset]->n_output_variables; + if (update_flags & update_gradients) + this->dof_data[dataset]->get_function_gradients (fe_patch_values, + data.patch_gradients_system); + if (update_flags & update_hessians) + this->dof_data[dataset]->get_function_hessians (fe_patch_values, + data.patch_hessians_system); + std::vector > dummy_normals; + postprocessor-> + compute_derived_quantities_vector(data.patch_values_system, + data.patch_gradients_system, + data.patch_hessians_system, + dummy_normals, + data.postprocessed_values[dataset]); + } + + for (unsigned int q=0; qdof_data[dataset]->n_output_variables; + ++component) + patch.data(offset+component,q) + = data.postprocessed_values[dataset][q](component); } + else + // now we use the given data + // vector without + // modifications. again, we + // treat single component + // functions separately for + // efficiency reasons. + if (data.n_components == 1) + { + this->dof_data[dataset]->get_function_values (fe_patch_values, + data.patch_values); + for (unsigned int q=0; qdof_data[dataset]->get_function_values (fe_patch_values, + data.patch_values_system); + for (unsigned int component=0; componentdof_data[dataset]->n_output_variables; + } - // then do the cell data. only - // compute the number of a cell if - // needed; also make sure that we - // only access cell data if the - // first_cell/next_cell functions - // only return active cells - if (this->cell_data.size() != 0) - { - Assert (!cell->has_children(), ExcNotImplemented()); + // then do the cell data. only + // compute the number of a cell if + // needed; also make sure that we + // only access cell data if the + // first_cell/next_cell functions + // only return active cells + if (this->cell_data.size() != 0) + { + Assert (!cell_and_index->first->has_children(), ExcNotImplemented()); - for (unsigned int dataset=0; datasetcell_data.size(); ++dataset) - { - const double value - = this->cell_data[dataset]->get_cell_data_value (cell_index); - for (unsigned int q=0; qdata(offset+dataset,q) = - value; - } - } + for (unsigned int dataset=0; datasetcell_data.size(); ++dataset) + { + const double value + = this->cell_data[dataset]->get_cell_data_value (cell_and_index->second); + for (unsigned int q=0; q::faces_per_cell; ++f) - { - // let's look up whether - // the neighbor behind that - // face is noted in the - // table of cells which we - // treat. this can only - // happen if the neighbor - // exists, and is on the - // same level as this cell, - // but it may also happen - // that the neighbor is not - // a member of the range of - // cells over which we - // loop, in which case the - // respective entry in the - // cell_to_patch_index_map - // will have the value - // no_neighbor. (note that - // since we allocated only - // as much space in this - // array as the maximum - // index of the cells we - // loop over, not every - // neighbor may have its - // space in it, so we have - // to assume that it is - // extended by values - // no_neighbor) - if (cell->at_boundary(f) - || - (cell->neighbor(f)->level() != cell->level())) - continue; - - const cell_iterator neighbor = cell->neighbor(f); - Assert (static_cast(neighbor->level()) < - data.cell_to_patch_index_map->size(), - ExcInternalError()); - if ((static_cast(neighbor->index()) >= - (*data.cell_to_patch_index_map)[neighbor->level()].size()) - || - ((*data.cell_to_patch_index_map)[neighbor->level()][neighbor->index()] - == - dealii::DataOutBase::Patch::no_neighbor)) - continue; - - // now, there is a - // neighbor, so get its - // patch number and set it - // for the neighbor index - patch->neighbors[f] = this->patches[(*data.cell_to_patch_index_map) - [neighbor->level()][neighbor->index()]].patch_index; - } - - // next cell (patch) in this - // thread - for (unsigned int i=0; - (idofs->end()); ++i) + for (unsigned int f=0; f::faces_per_cell; ++f) + { + // let's look up whether + // the neighbor behind that + // face is noted in the + // table of cells which we + // treat. this can only + // happen if the neighbor + // exists, and is on the + // same level as this cell, + // but it may also happen + // that the neighbor is not + // a member of the range of + // cells over which we + // loop, in which case the + // respective entry in the + // cell_to_patch_index_map + // will have the value + // no_neighbor. (note that + // since we allocated only + // as much space in this + // array as the maximum + // index of the cells we + // loop over, not every + // neighbor may have its + // space in it, so we have + // to assume that it is + // extended by values + // no_neighbor) + if (cell_and_index->first->at_boundary(f) + || + (cell_and_index->first->neighbor(f)->level() != cell_and_index->first->level())) { - ++patch; - - const cell_iterator old_cell = cell; - - cell = next_cell(cell); + patch.neighbors[f] = numbers::invalid_unsigned_int; + continue; + } - if (this->cell_data.size() != 0) - cell_index += std::distance (active_cell_iterator(old_cell), - active_cell_iterator(cell)); + const cell_iterator neighbor = cell_and_index->first->neighbor(f); + Assert (static_cast(neighbor->level()) < + data.cell_to_patch_index_map->size(), + ExcInternalError()); + if ((static_cast(neighbor->index()) >= + (*data.cell_to_patch_index_map)[neighbor->level()].size()) + || + ((*data.cell_to_patch_index_map)[neighbor->level()][neighbor->index()] + == + dealii::DataOutBase::Patch::no_neighbor)) + { + patch.neighbors[f] = numbers::invalid_unsigned_int; + continue; } + + // now, there is a + // neighbor, so get its + // patch number and set it + // for the neighbor index + patch.neighbors[f] + = (*data.cell_to_patch_index_map)[neighbor->level()][neighbor->index()]; } } template -void DataOut::build_patches (const unsigned int n_subdivisions, - const unsigned int n_threads_) +void DataOut::build_patches (const unsigned int n_subdivisions) { build_patches (StaticMappingQ1::mapping, - n_subdivisions, n_threads_, no_curved_cells); + n_subdivisions, no_curved_cells); } template void DataOut::build_patches (const Mapping &mapping, const unsigned int nnnn_subdivisions, - const unsigned int n_threads_, const CurvedCellRegion curved_region) { - unsigned int n_subdivisions = (nnnn_subdivisions != 0) - ? nnnn_subdivisions - : this->default_subdivisions; - // store the region in which cells shall be - // curved. If only one subdivision is - // requested then there is no need to do this - // at all - curved_cell_region=curved_region; - if (n_subdivisions<2) - curved_cell_region=no_curved_cells; + // Check consistency of redundant + // template parameter + Assert (dim==DH::dimension, ExcDimensionMismatch(dim, DH::dimension)); - Assert (n_subdivisions >= 1, - ExcInvalidNumberOfSubdivisions(n_subdivisions)); - typedef DataOut_DoFData BaseClass; Assert (this->dofs != 0, typename BaseClass::ExcNoDoFHandlerSelected()); - - Assert (!DEAL_II_USE_MT || (n_threads_ >= 1), - ExcMessage ("Must run with at least one thread!")); - - const unsigned int n_threads = (DEAL_II_USE_MT ? n_threads_ : 1); - - // before we start the loop: - // create a quadrature rule that - // actually has the points on this - // patch - QTrapez<1> q_trapez; - QIterated patch_points (q_trapez, n_subdivisions); - - const unsigned int n_q_points = patch_points.n_quadrature_points; - const unsigned int n_components = this->dofs->get_fe().n_components(); - unsigned int n_datasets=this->cell_data.size(); - for (unsigned int i=0; idof_data.size(); ++i) - n_datasets+= this->dof_data[i]->n_output_variables; - - // clear the patches array - if (true) - { - std::vector< dealii::DataOutBase::Patch > dummy; - this->patches.swap (dummy); - }; + const unsigned int n_subdivisions = (nnnn_subdivisions != 0) + ? nnnn_subdivisions + : this->default_subdivisions; + Assert (n_subdivisions >= 1, + ExcInvalidNumberOfSubdivisions(n_subdivisions)); + // first count the cells we want to // create patches of. also fill the // object that maps the cell @@ -1012,79 +957,91 @@ void DataOut::build_patches (const Mapping::no_neighbor); - }; + } - unsigned int n_patches = 0; - for (cell_iterator cell=first_cell(); cell != this->dofs->end(); - cell = next_cell(cell)) - { - Assert (static_cast(cell->level()) < - cell_to_patch_index_map.size(), - ExcInternalError()); - Assert (static_cast(cell->index()) < - cell_to_patch_index_map[cell->level()].size(), - ExcInternalError()); - - cell_to_patch_index_map[cell->level()][cell->index()] = n_patches; - ++n_patches; - }; - - // create the patches with default - // values. allocate as many patches - // as are needed, as this reduces - // expensive copying when push_back - // or similar operations are used - // which would regularly overflow - // the allocated amount of memory - // - // then number the patches - // consecutively - dealii::DataOutBase::Patch default_patch; - default_patch.n_subdivisions = n_subdivisions; - default_patch.data.reinit (n_datasets, n_q_points); - this->patches.insert (this->patches.end(), n_patches, default_patch); - - for (unsigned int i=0; ipatches.size(); ++i) - this->patches[i].patch_index = i; + std::vector > all_cells; + { + unsigned int index = 0; + for (cell_iterator cell=first_cell(); cell != this->dofs->end(); + cell = next_cell(cell), ++index) + { + Assert (static_cast(cell->level()) < + cell_to_patch_index_map.size(), + ExcInternalError()); + Assert (static_cast(cell->index()) < + cell_to_patch_index_map[cell->level()].size(), + ExcInternalError()); + + cell_to_patch_index_map[cell->level()][cell->index()] = all_cells.size(); + + all_cells.push_back (std::make_pair(cell, index)); + } + } - - // init data for the threads - std::vector > thread_data(n_threads); - for (unsigned int i=0;idof_data.size()); - thread_data[i].mapping = &mapping; - - thread_data[i].cell_to_patch_index_map = &cell_to_patch_index_map; - - for (unsigned int k=0; kdof_data.size(); ++dataset) - if (this->dof_data[dataset]->postprocessor) - thread_data[i].postprocessed_values[dataset].resize(n_q_points,Vector(this->dof_data[dataset]->n_output_variables)); - } - - Threads::ThreadGroup<> threads; - for (unsigned int l=0; l::build_some_patches)(thread_data[l]); - threads.join_all(); + this->patches.clear (); + this->patches.reserve (all_cells.size()); + Assert (this->patches.size() == 0, ExcInternalError()); + + // now create a default patch and a + // default object for the + // WorkStream object to work with + const QTrapez<1> q_trapez; + const QIterated patch_points (q_trapez, n_subdivisions); + + const unsigned int n_components = this->dofs->get_fe().n_components(); + unsigned int n_datasets=this->cell_data.size(); + for (unsigned int i=0; idof_data.size(); ++i) + n_datasets += this->dof_data[i]->n_output_variables; + + std::vector n_postprocessor_outputs (this->dof_data.size()); + for (unsigned int dataset=0; datasetdof_data.size(); ++dataset) + if (this->dof_data[dataset]->postprocessor) + n_postprocessor_outputs[dataset] = this->dof_data[dataset]->n_output_variables; + else + n_postprocessor_outputs[dataset] = 0; + + const CurvedCellRegion curved_cell_region + = (n_subdivisions<2 ? no_curved_cells : curved_region); + + UpdateFlags update_flags = update_values; + if (curved_cell_region != no_curved_cells) + update_flags |= update_quadrature_points; + + for (unsigned int i=0; idof_data.size(); ++i) + if (this->dof_data[i]->postprocessor) + update_flags |= this->dof_data[i]->postprocessor->get_needed_update_flags(); + // perhaps update_normal_vectors is present, + // which would only be useful on faces, but + // we may not use it here. + Assert (!(update_flags & update_normal_vectors), + ExcMessage("The update of normal vectors may not be requested for evaluation of " + "data on cells via DataPostprocessor.")); + + internal::DataOut::ParallelData + thread_data (patch_points, + n_components, n_datasets, n_subdivisions, + n_postprocessor_outputs, + mapping, + cell_to_patch_index_map, + this->dofs->get_fe(), + update_flags); + + DataOutBase::Patch sample_patch; + sample_patch.n_subdivisions = n_subdivisions; + sample_patch.data.reinit (n_datasets, patch_points.n_quadrature_points); + + + + // now build the patches in parallel + WorkStream::run (&all_cells[0], + &all_cells[0]+all_cells.size(), + std_cxx1x::bind(&DataOut::build_one_patch, + *this, _1, _2, _3, + curved_cell_region), + std_cxx1x::bind(&internal::DataOut::append_patch_to_list, + _1, std_cxx1x::ref(this->patches)), + thread_data, + sample_patch); } @@ -1177,6 +1134,7 @@ INSTANTIATE_VECTORS(hp::DoFHandler,deal_II_dimension,deal_II_dimension,deal_II_d template class DataOut >; + #if deal_II_dimension == 2 || deal_II_dimension ==1 // now do actual instantiations, first for DoFHandler... template class DataOut_DoFData,deal_II_dimension, deal_II_dimension+1>; diff --git a/deal.II/deal.II/source/numerics/data_out_faces.cc b/deal.II/deal.II/source/numerics/data_out_faces.cc index b4e7865109..399a7af9b8 100644 --- a/deal.II/deal.II/source/numerics/data_out_faces.cc +++ b/deal.II/deal.II/source/numerics/data_out_faces.cc @@ -2,7 +2,7 @@ // $Id$ // Version: $Name$ // -// Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 by the deal.II authors +// Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 by the deal.II authors // // This file is subject to QPL and may not be distributed // without copyright and license information. Please refer @@ -13,7 +13,7 @@ #include -#include +#include #include #include #include @@ -29,202 +29,198 @@ DEAL_II_NAMESPACE_OPEN - -template -void DataOutFaces::build_some_patches (internal::DataOut::ParallelData &data) +namespace internal { - // Check consistency of redundant - // template parameter - Assert (dim==DH::dimension, ExcDimensionMismatch(dim, DH::dimension)); - - QTrapez<1> q_trapez; - QIterated patch_points (q_trapez, data.n_subdivisions); - -//TODO[?]: This is strange -- Data has a member 'mapping' that should -//be used here, but it isn't. Rather, up until version 1.94, we were -//actually initializing a local mapping object and used that... While -//we use the mapping to transform the vertex coordinates, we do not -//use the mapping to transform the shape functions (necessary for -//example for Raviart-Thomas elements). This could lead to trouble -//when someone tries to use MappingEulerian with such elements - - // create collection objects from - // single quadratures, - // and finite elements. if we have - // an hp DoFHandler, - // dof_handler.get_fe() returns a - // collection of which we do a - // shallow copy instead - const hp::QCollection q_collection (patch_points); - const hp::FECollection fe_collection(this->dofs->get_fe()); - - UpdateFlags update_flags=update_values; - for (unsigned int i=0; idof_data.size(); ++i) - if (this->dof_data[i]->postprocessor) - update_flags |= this->dof_data[i]->postprocessor->get_needed_update_flags(); - - hp::FEFaceValues x_fe_patch_values (fe_collection, q_collection, - update_flags); + namespace DataOutFaces + { + template + template + ParallelData:: + ParallelData (const Quadrature &quadrature, + const unsigned int n_components, + const unsigned int n_datasets, + const unsigned int n_subdivisions, + const std::vector &n_postprocessor_outputs, + const FE &finite_elements, + const UpdateFlags update_flags) + : + internal::DataOut:: + ParallelDataBase (n_components, + n_datasets, + n_subdivisions, + quadrature.n_quadrature_points, + n_postprocessor_outputs, + finite_elements), + q_collection (quadrature), + x_fe_values (this->fe_collection, + q_collection, + update_flags) + {} + + + /** + * In a WorkStream context, use + * this function to append the + * patch computed by the parallel + * stage to the array of patches. + */ + template + void + append_patch_to_list (const DataOutBase::Patch &patch, + std::vector > &patches) + { + patches.push_back (patch); + patches.back().patch_index = patches.size()-1; + } + } +} - const unsigned int n_q_points = patch_points.n_quadrature_points; - - typename std::vector< dealii::DataOutBase::Patch >::iterator patch = this->patches.begin(); - FaceDescriptor face=first_face(); - // get first face in this thread - for (unsigned int i=0; (idofs->end()); ++i) - { - ++patch; - face=next_face(face); - } - // now loop over all cells and - // actually create the patches - for (; face.first != this->dofs->end();) - { - Assert (patch != this->patches.end(), ExcInternalError()); - - for (unsigned int vertex=0; vertex::vertices_per_cell; ++vertex) - patch->vertices[vertex] = face.first->face(face.second)->vertex(vertex); +template +void +DataOutFaces:: +build_one_patch (const FaceDescriptor *cell_and_face, + internal::DataOutFaces::ParallelData &data, + DataOutBase::Patch &patch) +{ + for (unsigned int vertex=0; vertex::vertices_per_cell; ++vertex) + patch.vertices[vertex] = cell_and_face->first->face(cell_and_face->second)->vertex(vertex); - if (data.n_datasets > 0) - { - x_fe_patch_values.reinit (face.first, face.second); - const FEFaceValues &fe_patch_values - = x_fe_patch_values.get_present_fe_values (); + if (data.n_datasets > 0) + { + data.x_fe_values.reinit (cell_and_face->first, cell_and_face->second); + const FEFaceValues &fe_patch_values + = data.x_fe_values.get_present_fe_values (); + + const unsigned int n_q_points = fe_patch_values.n_quadrature_points; + - // counter for data records - unsigned int offset=0; + // counter for data records + unsigned int offset=0; - // first fill dof_data - for (unsigned int dataset=0; datasetdof_data.size(); ++dataset) + // first fill dof_data + for (unsigned int dataset=0; datasetdof_data.size(); ++dataset) + { + const DataPostprocessor *postprocessor=this->dof_data[dataset]->postprocessor; + if (postprocessor != 0) { - const DataPostprocessor *postprocessor=this->dof_data[dataset]->postprocessor; - if (postprocessor != 0) - { - // we have to postprocess the - // data, so determine, which - // fields have to be updated - const UpdateFlags update_flags=postprocessor->get_needed_update_flags(); - - // get normals, if - // needed. this is a - // geometrical information - // and thus does not depend - // on the number of - // components of the data - // vector - if (update_flags & update_normal_vectors) - data.patch_normals=fe_patch_values.get_normal_vectors(); - - if (data.n_components == 1) - { - // at each point there is - // only one component of - // value, gradient etc. - if (update_flags & update_values) - this->dof_data[dataset]->get_function_values (fe_patch_values, - data.patch_values); - if (update_flags & update_gradients) - this->dof_data[dataset]->get_function_gradients (fe_patch_values, - data.patch_gradients); - if (update_flags & update_hessians) - this->dof_data[dataset]->get_function_hessians (fe_patch_values, - data.patch_hessians); - postprocessor-> - compute_derived_quantities_scalar(data.patch_values, - data.patch_gradients, - data.patch_hessians, - data.patch_normals, - data.postprocessed_values[dataset]); - } - else - { - // at each point there is - // a vector valued - // function and its - // derivative... - if (update_flags & update_values) - this->dof_data[dataset]->get_function_values (fe_patch_values, - data.patch_values_system); - if (update_flags & update_gradients) - this->dof_data[dataset]->get_function_gradients (fe_patch_values, - data.patch_gradients_system); - if (update_flags & update_hessians) - this->dof_data[dataset]->get_function_hessians (fe_patch_values, - data.patch_hessians_system); - postprocessor-> - compute_derived_quantities_vector(data.patch_values_system, - data.patch_gradients_system, - data.patch_hessians_system, - data.patch_normals, - data.postprocessed_values[dataset]); - } + // we have to postprocess the + // data, so determine, which + // fields have to be updated + const UpdateFlags update_flags=postprocessor->get_needed_update_flags(); - for (unsigned int q=0; qdof_data[dataset]->n_output_variables;++component) - patch->data(offset+component,q)= data.postprocessed_values[dataset][q](component); - } - else - // now we use the given data - // vector without - // modifications. again, we - // treat single component - // functions separately for - // efficiency reasons. - if (data.n_components == 1) - { + // get normals, if + // needed. this is a + // geometrical information + // and thus does not depend + // on the number of + // components of the data + // vector + if (update_flags & update_normal_vectors) + data.patch_normals=fe_patch_values.get_normal_vectors(); + + if (data.n_components == 1) + { + // at each point there is + // only one component of + // value, gradient etc. + if (update_flags & update_values) this->dof_data[dataset]->get_function_values (fe_patch_values, data.patch_values); - for (unsigned int q=0; qdata(offset,q) = data.patch_values[q]; - } - else - { + if (update_flags & update_gradients) + this->dof_data[dataset]->get_function_gradients (fe_patch_values, + data.patch_gradients); + if (update_flags & update_hessians) + this->dof_data[dataset]->get_function_hessians (fe_patch_values, + data.patch_hessians); + postprocessor-> + compute_derived_quantities_scalar(data.patch_values, + data.patch_gradients, + data.patch_hessians, + data.patch_normals, + data.postprocessed_values[dataset]); + } + else + { + // at each point there is + // a vector valued + // function and its + // derivative... + if (update_flags & update_values) this->dof_data[dataset]->get_function_values (fe_patch_values, data.patch_values_system); - for (unsigned int component=0; componentdata(offset+component,q) = - data.patch_values_system[q](component); - } - // increment the counter for the - // actual data record - offset+=this->dof_data[dataset]->n_output_variables; - } - - // then do the cell data - for (unsigned int dataset=0; datasetcell_data.size(); ++dataset) - { - // we need to get at - // the number of the - // cell to which this - // face belongs in - // order to access the - // cell data. this is - // not readily - // available, so choose - // the following rather - // inefficient way: - Assert (face.first->active(), ExcCellNotActiveForCellData()); - const unsigned int cell_number - = std::distance (this->dofs->begin_active(), - typename DH::active_cell_iterator(face.first)); - - const double value - = this->cell_data[dataset]->get_cell_data_value (cell_number); - for (unsigned int q=0; qdata(dataset+offset,q) = - value; + if (update_flags & update_gradients) + this->dof_data[dataset]->get_function_gradients (fe_patch_values, + data.patch_gradients_system); + if (update_flags & update_hessians) + this->dof_data[dataset]->get_function_hessians (fe_patch_values, + data.patch_hessians_system); + postprocessor-> + compute_derived_quantities_vector(data.patch_values_system, + data.patch_gradients_system, + data.patch_hessians_system, + data.patch_normals, + data.postprocessed_values[dataset]); + } + + for (unsigned int q=0; qdof_data[dataset]->n_output_variables;++component) + patch.data(offset+component,q) + = data.postprocessed_values[dataset][q](component); } + else + // now we use the given data + // vector without + // modifications. again, we + // treat single component + // functions separately for + // efficiency reasons. + if (data.n_components == 1) + { + this->dof_data[dataset]->get_function_values (fe_patch_values, + data.patch_values); + for (unsigned int q=0; qdof_data[dataset]->get_function_values (fe_patch_values, + data.patch_values_system); + for (unsigned int component=0; componentdof_data[dataset]->n_output_variables; } - // next cell (patch) in this thread - for (unsigned int i=0; - (idofs->end()); ++i) + + // then do the cell data + for (unsigned int dataset=0; datasetcell_data.size(); ++dataset) { - ++patch; - face=next_face(face); + // we need to get at + // the number of the + // cell to which this + // face belongs in + // order to access the + // cell data. this is + // not readily + // available, so choose + // the following rather + // inefficient way: + Assert (cell_and_face->first->active(), ExcCellNotActiveForCellData()); + const unsigned int cell_number + = std::distance (this->dofs->begin_active(), + typename DH::active_cell_iterator(cell_and_face->first)); + + const double value + = this->cell_data[dataset]->get_cell_data_value (cell_number); + for (unsigned int q=0; q::build_some_patches (internal::DataOut::ParallelData -void DataOutFaces::build_patches (const unsigned int nnnn_subdivisions, - const unsigned int n_threads_) +void DataOutFaces::build_patches (const unsigned int n_subdivisions_) { - unsigned int n_subdivisions = (nnnn_subdivisions != 0) - ? nnnn_subdivisions - : this->default_subdivisions; + // Check consistency of redundant + // template parameter + Assert (dim==DH::dimension, ExcDimensionMismatch(dim, DH::dimension)); + + const unsigned int n_subdivisions = (n_subdivisions_ != 0) + ? n_subdivisions_ + : this->default_subdivisions; Assert (n_subdivisions >= 1, ExcInvalidNumberOfSubdivisions(n_subdivisions)); @@ -246,89 +245,68 @@ void DataOutFaces::build_patches (const unsigned int nnnn_subdivisions, typedef DataOut_DoFData BaseClass; Assert (this->dofs != 0, typename BaseClass::ExcNoDoFHandlerSelected()); - Assert (!DEAL_II_USE_MT || (n_threads_ >= 1), - ExcMessage ("Must run with at least one thread!")); - const unsigned int n_threads = (DEAL_II_USE_MT ? n_threads_ : 1); - // before we start the loop: // create a quadrature rule that // actually has the points on this // patch - QTrapez<1> q_trapez; - QIterated patch_points (q_trapez, n_subdivisions); + const QTrapez<1> q_trapez; + const QIterated patch_points (q_trapez, n_subdivisions); - const unsigned int n_q_points = patch_points.n_quadrature_points; const unsigned int n_components = this->dofs->get_fe().n_components(); + unsigned int n_datasets = this->cell_data.size(); for (unsigned int i=0; idof_data.size(); ++i) - n_datasets+= this->dof_data[i]->n_output_variables; - - // clear the patches array - if (true) - { - std::vector< dealii::DataOutBase::Patch > dummy; - this->patches.swap (dummy); - }; + n_datasets += this->dof_data[i]->n_output_variables; // first count the cells we want to // create patches of and make sure // there is enough memory for that - unsigned int n_patches = 0; + std::vector all_faces; for (FaceDescriptor face=first_face(); face.first != this->dofs->end(); face = next_face(face)) - ++n_patches; - - std::vector > thread_data(n_threads); - - // init data for the threads - for (unsigned int i=0;idof_data.size()); - - for (unsigned int k=0; kdof_data.size(); ++dataset) - if (this->dof_data[dataset]->postprocessor) - thread_data[i].postprocessed_values[dataset].resize(n_q_points,Vector(this->dof_data[dataset]->n_output_variables)); - } - - // create the patches with default - // values. note that the evaluation - // points on the face have to be - // repeated in angular direction - dealii::DataOutBase::Patch default_patch; - default_patch.n_subdivisions = n_subdivisions; - default_patch.data.reinit (n_datasets, n_q_points); - this->patches.insert (this->patches.end(), n_patches, default_patch); + all_faces.push_back (face); + + // clear the patches array and + // allocate the right number of + // elements + this->patches.clear (); + this->patches.reserve (all_faces.size()); + Assert (this->patches.size() == 0, ExcInternalError()); + + + std::vector n_postprocessor_outputs (this->dof_data.size()); + for (unsigned int dataset=0; datasetdof_data.size(); ++dataset) + if (this->dof_data[dataset]->postprocessor) + n_postprocessor_outputs[dataset] = this->dof_data[dataset]->n_output_variables; + else + n_postprocessor_outputs[dataset] = 0; - if (DEAL_II_USE_MT) - { - Threads::ThreadGroup<> threads; - for (unsigned int l=0;l::build_some_patches)(thread_data[l]); - threads.join_all(); - } - else - // just one thread - build_some_patches(thread_data[0]); + UpdateFlags update_flags=update_values; + for (unsigned int i=0; idof_data.size(); ++i) + if (this->dof_data[i]->postprocessor) + update_flags |= this->dof_data[i]->postprocessor->get_needed_update_flags(); + + internal::DataOutFaces::ParallelData + thread_data (patch_points, n_components, n_datasets, + n_subdivisions, + n_postprocessor_outputs, this->dofs->get_fe(), + update_flags); + DataOutBase::Patch sample_patch; + sample_patch.n_subdivisions = n_subdivisions; + sample_patch.data.reinit (n_datasets, + patch_points.n_quadrature_points); + + // now build the patches in parallel + WorkStream::run (&all_faces[0], + &all_faces[0]+all_faces.size(), + std_cxx1x::bind(&DataOutFaces::build_one_patch, + *this, _1, _2, _3), + std_cxx1x::bind(&internal::DataOutFaces:: + append_patch_to_list, + _1, std_cxx1x::ref(this->patches)), + thread_data, + sample_patch); } diff --git a/deal.II/deal.II/source/numerics/data_out_rotation.cc b/deal.II/deal.II/source/numerics/data_out_rotation.cc index 44d2202ec1..d456b165d0 100644 --- a/deal.II/deal.II/source/numerics/data_out_rotation.cc +++ b/deal.II/deal.II/source/numerics/data_out_rotation.cc @@ -2,7 +2,7 @@ // $Id$ // Version: $Name$ // -// Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 by the deal.II authors +// Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 by the deal.II authors // // This file is subject to QPL and may not be distributed // without copyright and license information. Please refer @@ -13,7 +13,7 @@ #include -#include +#include #include #include #include @@ -41,42 +41,68 @@ DEAL_II_NAMESPACE_OPEN // Not implemented for 3D + +namespace internal +{ + namespace DataOutRotation + { + template + template + ParallelData:: + ParallelData (const Quadrature &quadrature, + const unsigned int n_components, + const unsigned int n_datasets, + const unsigned int n_subdivisions, + const unsigned int n_patches_per_circle, + const std::vector &n_postprocessor_outputs, + const FE &finite_elements, + const UpdateFlags update_flags) + : + internal::DataOut:: + ParallelDataBase (n_components, + n_datasets, + n_subdivisions, + quadrature.n_quadrature_points, + n_postprocessor_outputs, + finite_elements), + n_patches_per_circle (n_patches_per_circle), + q_collection (quadrature), + x_fe_values (this->fe_collection, + q_collection, + update_flags) + {} + + + /** + * In a WorkStream context, use + * this function to append the + * patch computed by the parallel + * stage to the array of patches. + */ + template + void + append_patch_to_list (const std::vector > &new_patches, + std::vector > &patches) + { + for (unsigned int i=0; i -void DataOutRotation::build_some_patches (internal::DataOut::ParallelData &data) +void +DataOutRotation:: +build_one_patch (const cell_iterator *cell, + internal::DataOutRotation::ParallelData &data, + std::vector > &patches) { - QTrapez<1> q_trapez; - QIterated patch_points (q_trapez, data.n_subdivisions); - - // create collection objects from - // single quadratures, - // and finite elements. if we have - // an hp DoFHandler, - // dof_handler.get_fe() returns a - // collection of which we do a - // shallow copy instead - // - // since most output formats can't - // handle cells that are not - // transformed using a Q1 mapping, - // we don't support anything else - // as well - const hp::QCollection q_collection (patch_points); - const hp::FECollection fe_collection(this->dofs->get_fe()); - - UpdateFlags update_flags=update_values | update_quadrature_points; - for (unsigned int i=0; idof_data.size(); ++i) - if (this->dof_data[i]->postprocessor) - update_flags |= this->dof_data[i]->postprocessor->get_needed_update_flags(); - // perhaps update_normal_vectors is present, - // which would only be useful on faces, but - // we may not use it here. - Assert (!(update_flags & update_normal_vectors), - ExcMessage("The update of normal vectors may not be requested for evaluation of data on cells via DataPostprocessor.")); - - hp::FEValues x_fe_patch_values (fe_collection, q_collection, - update_flags); - const unsigned int n_patches_per_circle = data.n_patches_per_circle; // another abbreviation denoting @@ -100,303 +126,278 @@ void DataOutRotation::build_some_patches (internal::DataOut::ParallelDat i/n_patches_per_circle); angle_directions[i][DH::dimension] = std::sin(2*numbers::PI * i/n_patches_per_circle); - }; - - - unsigned int cell_number = 0; - typename std::vector< dealii::DataOutBase::Patch >::iterator - patch = this->patches.begin(); - cell_iterator cell=first_cell(); - - // get first cell in this thread - for (unsigned int i=0; (idofs->end()); ++i) - { - std::advance (patch, n_patches_per_circle); - ++cell_number; - cell=next_cell(cell); } - - // now loop over all cells and - // actually create the patches - for (; cell != this->dofs->end(); ) + + for (unsigned int angle=0; anglepatches.end(), ExcInternalError()); - - - // first compute the - // vertices of the - // patch. note that they - // will have to be computed - // from the vertices of the - // cell, which has one - // dimension less, however. - switch (DH::dimension) - { - case 1: - { - const double r1 = cell->vertex(0)(0), - r2 = cell->vertex(1)(0); - Assert (r1 >= 0, ExcRadialVariableHasNegativeValues(r1)); - Assert (r2 >= 0, ExcRadialVariableHasNegativeValues(r2)); + case 1: + { + const double r1 = (*cell)->vertex(0)(0), + r2 = (*cell)->vertex(1)(0); + Assert (r1 >= 0, ExcRadialVariableHasNegativeValues(r1)); + Assert (r2 >= 0, ExcRadialVariableHasNegativeValues(r2)); - patch->vertices[0] = r1*angle_directions[angle]; - patch->vertices[1] = r2*angle_directions[angle]; - patch->vertices[2] = r1*angle_directions[angle+1]; - patch->vertices[3] = r2*angle_directions[angle+1]; + patches[angle].vertices[0] = r1*angle_directions[angle]; + patches[angle].vertices[1] = r2*angle_directions[angle]; + patches[angle].vertices[2] = r1*angle_directions[angle+1]; + patches[angle].vertices[3] = r2*angle_directions[angle+1]; - break; - }; + break; + }; - case 2: + case 2: + { + for (unsigned int vertex=0; + vertex::vertices_per_cell; + ++vertex) { - for (unsigned int vertex=0; - vertex::vertices_per_cell; - ++vertex) - { - const Point v = cell->vertex(vertex); + const Point v = (*cell)->vertex(vertex); - // make sure that the - // radial variable does - // attain negative - // values - Assert (v(0) >= 0, ExcRadialVariableHasNegativeValues(v(0))); + // make sure that the + // radial variable does + // attain negative + // values + Assert (v(0) >= 0, ExcRadialVariableHasNegativeValues(v(0))); - // now set the vertices - // of the patch - patch->vertices[vertex] = v(0) * angle_directions[angle]; - patch->vertices[vertex][0] = v(1); + // now set the vertices + // of the patch + patches[angle].vertices[vertex] = v(0) * angle_directions[angle]; + patches[angle].vertices[vertex][0] = v(1); - patch->vertices[vertex+GeometryInfo::vertices_per_cell] - = v(0) * angle_directions[angle+1]; - patch->vertices[vertex+GeometryInfo::vertices_per_cell][0] - = v(1); - }; - - break; + patches[angle].vertices[vertex+GeometryInfo::vertices_per_cell] + = v(0) * angle_directions[angle+1]; + patches[angle].vertices[vertex+GeometryInfo::vertices_per_cell][0] + = v(1); }; + + break; + }; - default: - Assert (false, ExcNotImplemented()); - }; + default: + Assert (false, ExcNotImplemented()); + }; - unsigned int offset=0; + unsigned int offset=0; - // then fill in data - if (data.n_datasets > 0) - { - x_fe_patch_values.reinit (cell); - const FEValues &fe_patch_values - = x_fe_patch_values.get_present_fe_values (); + // then fill in data + if (data.n_datasets > 0) + { + data.x_fe_values.reinit (*cell); + const FEValues &fe_patch_values + = data.x_fe_values.get_present_fe_values (); - // first fill dof_data - for (unsigned int dataset=0; datasetdof_data.size(); ++dataset) + // first fill dof_data + for (unsigned int dataset=0; datasetdof_data.size(); ++dataset) + { + const DataPostprocessor *postprocessor=this->dof_data[dataset]->postprocessor; + if (postprocessor != 0) { - const DataPostprocessor *postprocessor=this->dof_data[dataset]->postprocessor; - if (postprocessor != 0) - { - // we have to postprocess the - // data, so determine, which - // fields have to be updated - const UpdateFlags update_flags=postprocessor->get_needed_update_flags(); + // we have to postprocess the + // data, so determine, which + // fields have to be updated + const UpdateFlags update_flags=postprocessor->get_needed_update_flags(); - if (data.n_components == 1) - { - // at each point there is - // only one component of - // value, gradient etc. - if (update_flags & update_values) - this->dof_data[dataset]->get_function_values (fe_patch_values, - data.patch_values); - if (update_flags & update_gradients) - this->dof_data[dataset]->get_function_gradients (fe_patch_values, - data.patch_gradients); - if (update_flags & update_hessians) - this->dof_data[dataset]->get_function_hessians (fe_patch_values, - data.patch_hessians); - postprocessor-> - compute_derived_quantities_scalar(data.patch_values, - data.patch_gradients, - data.patch_hessians, - data.dummy_normals, - data.postprocessed_values[dataset]); - } - else - { - // at each point there is - // a vector valued - // function and its - // derivative... - if (update_flags & update_values) - this->dof_data[dataset]->get_function_values (fe_patch_values, - data.patch_values_system); - if (update_flags & update_gradients) - this->dof_data[dataset]->get_function_gradients (fe_patch_values, - data.patch_gradients_system); - if (update_flags & update_hessians) - this->dof_data[dataset]->get_function_hessians (fe_patch_values, - data.patch_hessians_system); - postprocessor-> - compute_derived_quantities_vector(data.patch_values_system, - data.patch_gradients_system, - data.patch_hessians_system, - data.dummy_normals, - data.postprocessed_values[dataset]); - } + if (data.n_components == 1) + { + // at each point there is + // only one component of + // value, gradient etc. + if (update_flags & update_values) + this->dof_data[dataset]->get_function_values (fe_patch_values, + data.patch_values); + if (update_flags & update_gradients) + this->dof_data[dataset]->get_function_gradients (fe_patch_values, + data.patch_gradients); + if (update_flags & update_hessians) + this->dof_data[dataset]->get_function_hessians (fe_patch_values, + data.patch_hessians); + + std::vector > dummy_normals; + postprocessor-> + compute_derived_quantities_scalar(data.patch_values, + data.patch_gradients, + data.patch_hessians, + dummy_normals, + data.postprocessed_values[dataset]); + } + else + { + // at each point there is + // a vector valued + // function and its + // derivative... + if (update_flags & update_values) + this->dof_data[dataset]->get_function_values (fe_patch_values, + data.patch_values_system); + if (update_flags & update_gradients) + this->dof_data[dataset]->get_function_gradients (fe_patch_values, + data.patch_gradients_system); + if (update_flags & update_hessians) + this->dof_data[dataset]->get_function_hessians (fe_patch_values, + data.patch_hessians_system); + std::vector > dummy_normals; + postprocessor-> + compute_derived_quantities_vector(data.patch_values_system, + data.patch_gradients_system, + data.patch_hessians_system, + dummy_normals, + data.postprocessed_values[dataset]); + } - for (unsigned int component=0; - componentdof_data[dataset]->n_output_variables; - ++component) + for (unsigned int component=0; + componentdof_data[dataset]->n_output_variables; + ++component) + { + switch (DH::dimension) { - switch (DH::dimension) - { - case 1: - for (unsigned int x=0; xdata(offset+component, - x*n_points + y) - = data.postprocessed_values[dataset][x](component); - break; + case 1: + for (unsigned int x=0; xdata(offset+component, - x*n_points*n_points + - y*n_points + - z) - = data.postprocessed_values[dataset][x*n_points+z](component); - break; + case 2: + for (unsigned int x=0; xdof_data[dataset]->get_function_values (fe_patch_values, + data.patch_values); + + switch (DH::dimension) { - this->dof_data[dataset]->get_function_values (fe_patch_values, - data.patch_values); + case 1: + for (unsigned int x=0; xdof_data[dataset]->get_function_values (fe_patch_values, + data.patch_values_system); + for (unsigned int component=0; componentdata(offset, + patches[angle].data(offset+component, x*n_points + y) - = data.patch_values[x]; + = data.patch_values_system[x](component); break; - + case 2: for (unsigned int x=0; xdata(offset, + patches[angle].data(offset+component, x*n_points*n_points + - y + - z*n_points) - = data.patch_values[x*n_points+z]; + y*n_points + + z) + = data.patch_values_system[x*n_points+z](component); break; - + default: Assert (false, ExcNotImplemented()); } } - else - // system of components - { - this->dof_data[dataset]->get_function_values (fe_patch_values, - data.patch_values_system); - - for (unsigned int component=0; componentdata(offset+component, - x*n_points + y) - = data.patch_values_system[x](component); - break; - - case 2: - for (unsigned int x=0; xdata(offset+component, - x*n_points*n_points + - y*n_points + - z) - = data.patch_values_system[x*n_points+z](component); - break; - - default: - Assert (false, ExcNotImplemented()); - } - } - } - offset+=this->dof_data[dataset]->n_output_variables; - } + } + offset+=this->dof_data[dataset]->n_output_variables; + } - // then do the cell data - for (unsigned int dataset=0; datasetcell_data.size(); ++dataset) + // then do the cell data + for (unsigned int dataset=0; datasetcell_data.size(); ++dataset) + { + // we need to get at + // the number of the + // cell to which this + // face belongs in + // order to access the + // cell data. this is + // not readily + // available, so choose + // the following rather + // inefficient way: + Assert ((*cell)->active(), + ExcMessage("Cell must be active for cell data")); + const unsigned int cell_number + = std::distance (this->dofs->begin_active(), + typename DH::active_cell_iterator(*cell)); + const double value + = this->cell_data[dataset]->get_cell_data_value (cell_number); + switch (DH::dimension) { - const double value - = this->cell_data[dataset]->get_cell_data_value (cell_number); - switch (DH::dimension) - { - case 1: - for (unsigned int x=0; xdata(dataset+offset, - x*n_points + - y) - = value; - break; + case 1: + for (unsigned int x=0; xdata(dataset+offset, - x*n_points*n_points + - y*n_points + - z) - = value; - break; - - default: - Assert (false, ExcNotImplemented()); - } + case 2: + for (unsigned int x=0; x(data.n_threads)-1; - for (int i=0; (idofs->end()); ++i) - std::advance (patch, n_patches_per_circle); - - // however, cell and cell - // number have not yet been - // increased - for (unsigned int i=0; (idofs->end()); ++i) - { - ++cell_number; - cell=next_cell(cell); } } } @@ -406,7 +407,11 @@ void DataOutRotation::build_some_patches (internal::DataOut::ParallelDat #else template -void DataOutRotation::build_some_patches (internal::DataOut::ParallelData &) +void +DataOutRotation:: +build_one_patch (const cell_iterator *, + internal::DataOutRotation::ParallelData &, + std::vector > &) { // would this function make any // sense after all? who would want @@ -420,118 +425,89 @@ void DataOutRotation::build_some_patches (internal::DataOut::ParallelDat template -void DataOutRotation::build_patches ( - const unsigned int n_patches_per_circle, - const unsigned int nnnn_subdivisions, - const unsigned int n_threads_) +void DataOutRotation::build_patches (const unsigned int n_patches_per_circle, + const unsigned int nnnn_subdivisions) { // Check consistency of redundant // template parameter Assert (dim==DH::dimension, ExcDimensionMismatch(dim, DH::dimension)); - - unsigned int n_subdivisions = (nnnn_subdivisions != 0) - ? nnnn_subdivisions - : this->default_subdivisions; - - Assert (n_subdivisions >= 1, - ExcInvalidNumberOfSubdivisions(n_subdivisions)); - typedef DataOut_DoFData BaseClass; Assert (this->dofs != 0, typename BaseClass::ExcNoDoFHandlerSelected()); - Assert (!DEAL_II_USE_MT || (n_threads_ >= 1), - ExcMessage ("Must run with at least one thread!")); - const unsigned int n_threads = (DEAL_II_USE_MT ? n_threads_ : 1); - - // before we start the loop: - // create a quadrature rule that - // actually has the points on this - // patch - QTrapez<1> q_trapez; - QIterated patch_points (q_trapez, n_subdivisions); + const unsigned int n_subdivisions = (nnnn_subdivisions != 0) + ? nnnn_subdivisions + : this->default_subdivisions; + Assert (n_subdivisions >= 1, + ExcInvalidNumberOfSubdivisions(n_subdivisions)); - const unsigned int n_q_points = patch_points.n_quadrature_points; + const QTrapez<1> q_trapez; + const QIterated patch_points (q_trapez, n_subdivisions); + const unsigned int n_components = this->dofs->get_fe().n_components(); unsigned int n_datasets=this->cell_data.size(); for (unsigned int i=0; idof_data.size(); ++i) n_datasets+= this->dof_data[i]->n_output_variables; - - // clear the patches array - if (true) - { - std::vector< dealii::DataOutBase::Patch > dummy; - this->patches.swap (dummy); - }; + + UpdateFlags update_flags=update_values | update_quadrature_points; + for (unsigned int i=0; idof_data.size(); ++i) + if (this->dof_data[i]->postprocessor) + update_flags |= this->dof_data[i]->postprocessor->get_needed_update_flags(); + // perhaps update_normal_vectors is present, + // which would only be useful on faces, but + // we may not use it here. + Assert (!(update_flags & update_normal_vectors), + ExcMessage("The update of normal vectors may not be requested for " + "evaluation of data on cells via DataPostprocessor.")); // first count the cells we want to // create patches of and make sure // there is enough memory for that - unsigned int n_patches = 0; + std::vector all_cells; for (cell_iterator cell=first_cell(); cell != this->dofs->end(); cell = next_cell(cell)) - ++n_patches; + all_cells.push_back (cell); + // then also take into account that // we want more than one patch to // come out of every cell, as they // are repeated around the axis of // rotation - n_patches *= n_patches_per_circle; - - std::vector > thread_data(n_threads); - - // init data for the threads - for (unsigned int i=0;ipatches.clear(); + this->patches.reserve (all_cells.size() * n_patches_per_circle); + + + std::vector n_postprocessor_outputs (this->dof_data.size()); + for (unsigned int dataset=0; datasetdof_data.size(); ++dataset) + if (this->dof_data[dataset]->postprocessor) + n_postprocessor_outputs[dataset] = this->dof_data[dataset]->n_output_variables; + else + n_postprocessor_outputs[dataset] = 0; + + internal::DataOutRotation::ParallelData + thread_data (patch_points, n_components, n_datasets, + n_subdivisions, n_patches_per_circle, + n_postprocessor_outputs, this->dofs->get_fe(), + update_flags); + std::vector > + new_patches (n_patches_per_circle); + for (unsigned int i=0; idof_data.size()); - - for (unsigned int k=0; kdof_data.size(); ++dataset) - if (this->dof_data[dataset]->postprocessor) - thread_data[i].postprocessed_values[dataset].resize(n_q_points,Vector(this->dof_data[dataset]->n_output_variables)); + new_patches[i].n_subdivisions = n_subdivisions; + new_patches[i].data.reinit (n_datasets, + patch_points.n_quadrature_points + * (n_subdivisions+1)); } - // create the patches with default - // values. note that the evaluation - // points on the cell have to be - // repeated in angular direction - dealii::DataOutBase::Patch default_patch; - default_patch.n_subdivisions = n_subdivisions; - default_patch.data.reinit (n_datasets, - n_q_points * (n_subdivisions+1)); - this->patches.insert (this->patches.end(), n_patches, default_patch); - - if (DEAL_II_USE_MT) - { - void (DataOutRotation::*p) (internal::DataOut::ParallelData &) - = &DataOutRotation::build_some_patches; - - Threads::ThreadGroup<> threads; - for (unsigned int l=0;l::build_one_patch, + *this, _1, _2, _3), + std_cxx1x::bind(&internal::DataOutRotation + ::append_patch_to_list, + _1, std_cxx1x::ref(this->patches)), + thread_data, + new_patches); } diff --git a/deal.II/deal.II/source/numerics/derivative_approximation.cc b/deal.II/deal.II/source/numerics/derivative_approximation.cc index 3cae0d0705..8aeaaa50b0 100644 --- a/deal.II/deal.II/source/numerics/derivative_approximation.cc +++ b/deal.II/deal.II/source/numerics/derivative_approximation.cc @@ -2,7 +2,7 @@ // $Id$ // Version: $Name$ // -// Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 by the deal.II authors +// Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 by the deal.II authors // // This file is subject to QPL and may not be distributed // without copyright and license information. Please refer @@ -636,8 +636,6 @@ approximate_derivative (const Mapping &mapping, = Threads::split_interval (0, dof_handler.get_tria().n_active_cells(), n_threads); - Threads::ThreadGroup<> threads; - typedef void (*FunPtr) (const Mapping &, const DH &, const InputVector &, @@ -647,13 +645,16 @@ approximate_derivative (const Mapping &mapping, FunPtr fun_ptr = &DerivativeApproximation:: template approximate; - + +//TODO: Use WorkStream here + Threads::TaskGroup<> tasks; for (unsigned int i=0; i #include #include +#include #include #include #include @@ -34,6 +35,8 @@ #include #include +#include + #include #include #include @@ -42,33 +45,960 @@ DEAL_II_NAMESPACE_OPEN - -static -inline -double sqr (const double x) +namespace { - return x*x; + inline + double sqr (const double x) + { + return x*x; + } + + + template + inline + void advance_by_n (CellIterator &cell, + const unsigned int n) + { + // store a pointer to the end + // iterator, since we can't get at + // it any more once cell is already + // the end iterator (in that case + // dereferencing cell-> triggers an + // assertion) + const CellIterator endc = cell->get_dof_handler().end(); + for (unsigned int t=0; ((t -inline -void advance_by_n (CellIterator &cell, - const unsigned int n) +namespace internal { - // store a pointer to the end - // iterator, since we can't get at - // it any more once cell is already - // the end iterator (in that case - // dereferencing cell-> triggers an - // assertion) - const CellIterator endc = cell->get_dof_handler().end(); - for (unsigned int t=0; ((t + struct ParallelData + { + static const unsigned int dim = DH::dimension; + static const unsigned int spacedim = DH::space_dimension; + + /** + * The finite element to be used. + */ + const dealii::hp::FECollection finite_element; + + /** + * The quadrature formulas to be used for + * the faces. + */ + const dealii::hp::QCollection face_quadratures; + + /** + * FEFaceValues objects to integrate over + * the faces of the current and + * potentially of neighbor cells. + */ + dealii::hp::FEFaceValues fe_face_values_cell; + dealii::hp::FEFaceValues fe_face_values_neighbor; + dealii::hp::FESubfaceValues fe_subface_values; + + /** + * A vector to store the jump + * of the normal vectors in + * the quadrature points for + * each of the solution + * vectors (i.e. a temporary + * value). This vector is not + * allocated inside the + * functions that use it, but + * rather globally, since + * memory allocation is slow, + * in particular in presence + * of multiple threads where + * synchronisation makes + * things even slower. + */ + std::vector > > phi; + + /** + * A vector for the gradients of + * the finite element function + * on one cell + * + * Let psi be a short name + * for a grad u_h, where + * the third index be the + * component of the finite + * element, and the second + * index the number of the + * quadrature point. The + * first index denotes the + * index of the solution + * vector. + */ + std::vector > > > psi; + + /** + * The same vector for a neighbor cell + */ + std::vector > > > neighbor_psi; + + /** + * The normal vectors of the finite + * element function on one face + */ + std::vector > normal_vectors; + + /** + * Two arrays needed for the + * values of coefficients in + * the jumps, if they are + * given. + */ + std::vector coefficient_values1; + std::vector > coefficient_values; + + /** + * Array for the products of + * Jacobian determinants and + * weights of quadraturs + * points. + */ + std::vector JxW_values; + + /** + * The subdomain id we are to care + * for. + */ + const unsigned int subdomain_id; + /** + * The material id we are to care + * for. + */ + const unsigned int material_id; + + /** + * Some more references to input data to + * the KellyErrorEstimator::estimate() + * function. + */ + const typename FunctionMap::type *neumann_bc; + const std::vector component_mask; + const Function *coefficients; + + /** + * Constructor. + */ + template + ParallelData (const FE &fe, + const dealii::hp::QCollection &face_quadratures, + const dealii::hp::MappingCollection &mapping, + const bool need_quadrature_points, + const unsigned int n_solution_vectors, + const unsigned int subdomain_id, + const unsigned int material_id, + const typename FunctionMap::type *neumann_bc, + const std::vector component_mask, + const Function *coefficients); + + /** + * Resize the arrays so that they fit the + * number of quadrature points associated + * with the given finite element index + * into the hp collections. + */ + void resize (const unsigned int active_fe_index); + }; + + + template + template + ParallelData:: + ParallelData (const FE &fe, + const dealii::hp::QCollection &face_quadratures, + const dealii::hp::MappingCollection &mapping, + const bool need_quadrature_points, + const unsigned int n_solution_vectors, + const unsigned int subdomain_id, + const unsigned int material_id, + const typename FunctionMap::type *neumann_bc, + const std::vector component_mask, + const Function *coefficients) + : + finite_element (fe), + face_quadratures (face_quadratures), + fe_face_values_cell (mapping, + finite_element, + face_quadratures, + update_gradients | + update_JxW_values | + (need_quadrature_points ? + update_quadrature_points : + UpdateFlags()) | + update_normal_vectors), + fe_face_values_neighbor (mapping, + finite_element, + face_quadratures, + update_gradients), + fe_subface_values (mapping, + finite_element, + face_quadratures, + update_gradients), + phi (n_solution_vectors, + std::vector > + (face_quadratures.max_n_quadrature_points(), + std::vector (fe.n_components()))), + psi (n_solution_vectors, + std::vector > > + (face_quadratures.max_n_quadrature_points(), + std::vector > (fe.n_components()))), + neighbor_psi (n_solution_vectors, + std::vector > > + (face_quadratures.max_n_quadrature_points(), + std::vector > (fe.n_components()))), + normal_vectors (face_quadratures.max_n_quadrature_points()), + coefficient_values1 (face_quadratures.max_n_quadrature_points()), + coefficient_values (face_quadratures.max_n_quadrature_points(), + dealii::Vector (fe.n_components())), + JxW_values (face_quadratures.max_n_quadrature_points()), + subdomain_id (subdomain_id), + material_id (material_id), + neumann_bc (neumann_bc), + component_mask (component_mask), + coefficients (coefficients) + {} + + + + template + void + ParallelData::resize (const unsigned int active_fe_index) + { + const unsigned int n_q_points = face_quadratures[active_fe_index].n_quadrature_points; + const unsigned int n_components = finite_element.n_components(); + + normal_vectors.resize(n_q_points); + coefficient_values1.resize(n_q_points); + coefficient_values.resize(n_q_points); + JxW_values.resize(n_q_points); + + for (unsigned int i=0; i + void + copy_local_to_global (const std::map > &local_face_integrals, + std::map > &face_integrals) + { + + // now copy locally computed elements + // into the global map + for (typename std::map >::const_iterator + p=local_face_integrals.begin(); + p!=local_face_integrals.end(); + ++p) + { + // double check that the + // element does not already + // exists in the global map + Assert (face_integrals.find (p->first) == face_integrals.end(), + ExcInternalError()); + for (unsigned int i=0; isecond.size(); ++i) + Assert (p->second[i] >= 0, ExcInternalError()); + + face_integrals[p->first] = p->second; + } + } + + + /** + * Actually do the computation on + * a face which has no hanging + * nodes (it is regular), i.e. + * either on the other side there + * is nirvana (face is at + * boundary), or the other side's + * refinement level is the same + * as that of this side, then + * handle the integration of + * these both cases together. + */ + template + void + integrate_over_regular_face (const std::vector &solutions, + ParallelData ¶llel_data, + std::map > &local_face_integrals, + const typename DH::active_cell_iterator &cell, + const unsigned int face_no, + dealii::hp::FEFaceValues &fe_face_values_cell, + dealii::hp::FEFaceValues &fe_face_values_neighbor) + { + const unsigned int dim = DH::dimension; + + const typename DH::face_iterator face = cell->face(face_no); + const unsigned int n_q_points = parallel_data.face_quadratures[cell->active_fe_index()] + .n_quadrature_points, + n_components = parallel_data.finite_element.n_components(), + n_solution_vectors = solutions.size(); + + + // initialize data of the restriction + // of this cell to the present face + fe_face_values_cell.reinit (cell, face_no, + cell->active_fe_index()); + + // get gradients of the finite element + // function on this cell + for (unsigned int n=0; nat_boundary() == false) + // internal face; integrate jump + // of gradient across this face + { + Assert (cell->neighbor(face_no).state() == IteratorState::valid, + ExcInternalError()); + + const typename DH::active_cell_iterator neighbor = cell->neighbor(face_no); + + // find which number the + // current face has relative to + // the neighboring cell + const unsigned int neighbor_neighbor + = cell->neighbor_of_neighbor (face_no); + Assert (neighbor_neighbor::faces_per_cell, + ExcInternalError()); + + // get restriction of finite element + // function of @p{neighbor} to the + // common face. in the hp case, use the + // quadrature formula that matches the + // one we would use for the present + // cell + fe_face_values_neighbor.reinit (neighbor, neighbor_neighbor, + cell->active_fe_index()); + + // get gradients on neighbor cell + for (unsigned int n=0; nn_components == 1) + { + + parallel_data.coefficients + ->value_list (fe_face_values_cell.get_present_fe_values() + .get_quadrature_points(), + parallel_data.coefficient_values1); + for (unsigned int n=0; nvector_value_list (fe_face_values_cell.get_present_fe_values() + .get_quadrature_points(), + parallel_data.coefficient_values); + for (unsigned int n=0; nat_boundary() == true) + // neumann boundary face. compute + // difference between normal + // derivative and boundary function + { + const unsigned char boundary_indicator = face->boundary_indicator(); + + Assert (parallel_data.neumann_bc->find(boundary_indicator) != + parallel_data.neumann_bc->end(), + ExcInternalError ()); + // get the values of the boundary + // function at the quadrature + // points + if (n_components == 1) + { + std::vector g(n_q_points); + parallel_data.neumann_bc->find(boundary_indicator)->second + ->value_list (fe_face_values_cell.get_present_fe_values() + .get_quadrature_points(), g); + + for (unsigned int n=0; n > + g(n_q_points, dealii::Vector(n_components)); + parallel_data.neumann_bc->find(boundary_indicator)->second + ->vector_value_list (fe_face_values_cell.get_present_fe_values() + .get_quadrature_points(), + g); + + for (unsigned int n=0; n face_integral (n_solution_vectors, 0); + for (unsigned int n=0; n + void + integrate_over_irregular_face (const std::vector &solutions, + ParallelData ¶llel_data, + std::map > &local_face_integrals, + const typename DH::active_cell_iterator &cell, + const unsigned int face_no, + dealii::hp::FEFaceValues &fe_face_values, + dealii::hp::FESubfaceValues &fe_subface_values) + { + const unsigned int dim = DH::dimension; + + const typename DH::cell_iterator neighbor = cell->neighbor(face_no); + const unsigned int n_q_points = parallel_data.face_quadratures[cell->active_fe_index()] + .n_quadrature_points, + n_components = parallel_data.finite_element.n_components(), + n_solution_vectors = solutions.size(); + const typename DH::face_iterator + face=cell->face(face_no); + + Assert (neighbor.state() == IteratorState::valid, ExcInternalError()); + Assert (face->has_children(), ExcInternalError()); + // set up a vector of the gradients + // of the finite element function + // on this cell at the quadrature + // points + // + // let psi be a short name for + // [a grad u_h], where the second + // index be the component of the + // finite element, and the first + // index the number of the + // quadrature point + + // store which number @p{cell} has + // in the list of neighbors of + // @p{neighbor} + const unsigned int neighbor_neighbor + = cell->neighbor_of_neighbor (face_no); + Assert (neighbor_neighbor::faces_per_cell, + ExcInternalError()); + + // loop over all subfaces + for (unsigned int subface_no=0; subface_non_children(); ++subface_no) + { + // get an iterator pointing to the + // cell behind the present subface + const typename DH::active_cell_iterator neighbor_child + = cell->neighbor_child_on_subface (face_no, subface_no); + Assert (!neighbor_child->has_children(), + ExcInternalError()); + + // restrict the finite element + // on the present cell to the + // subface + fe_subface_values.reinit (cell, face_no, subface_no, + cell->active_fe_index()); + + // restrict the finite element + // on the neighbor cell to the + // common @p{subface}. + fe_face_values.reinit (neighbor_child, neighbor_neighbor, + cell->active_fe_index()); + + // store the gradient of the + // solution in psi + for (unsigned int n=0; nn_components == 1) + { + parallel_data.coefficients + ->value_list (fe_face_values.get_present_fe_values() + .get_quadrature_points(), + parallel_data.coefficient_values1); + for (unsigned int n=0; nvector_value_list (fe_face_values.get_present_fe_values() + .get_quadrature_points(), + parallel_data.coefficient_values); + for (unsigned int n=0; n face_integral (n_solution_vectors, 0); + for (unsigned int n=0; nface(neighbor_neighbor)] + = face_integral; + } + + + // finally loop over all subfaces to + // collect the contributions of the + // subfaces and store them with the + // mother face + std::vector sum (n_solution_vectors, 0); + for (unsigned int subface_no=0; subface_non_children(); ++subface_no) + { + Assert (local_face_integrals.find(face->child(subface_no)) != + local_face_integrals.end(), + ExcInternalError()); + Assert (local_face_integrals[face->child(subface_no)][0] >= 0, + ExcInternalError()); + + for (unsigned int n=0; nchild(subface_no)][n]; + } + + local_face_integrals[face] = sum; + } + + + /** + * Computate the error on the faces of a + * single cell. + * + * This function is only needed + * in two or three dimensions. + * The error estimator in one + * dimension is implemented + * seperatly. + */ + template + void + estimate_one_cell (const typename DH::active_cell_iterator &cell, + ParallelData ¶llel_data, + std::map > &local_face_integrals, + const std::vector &solutions) + { + const unsigned int n_solution_vectors = solutions.size(); + + const unsigned int subdomain_id = parallel_data.subdomain_id; + const unsigned int material_id = parallel_data.material_id; + + // empty our own copy of the local face + // integrals + local_face_integrals.clear(); + + // loop over all faces of this cell + for (unsigned int face_no=0; + face_no::faces_per_cell; ++face_no) + { + const typename DH::face_iterator + face=cell->face(face_no); + + // make sure we do work + // only once: this face + // may either be regular + // or irregular. if it is + // regular and has a + // neighbor, then we + // visit the face twice, + // once from every + // side. let the one with + // the lower index do the + // work. if it is at the + // boundary, or if the + // face is irregular, + // then do the work below + if ((face->has_children() == false) && + !cell->at_boundary(face_no) && + (!cell->neighbor_is_coarser(face_no) && + (cell->neighbor(face_no)->index() < cell->index() || + (cell->neighbor(face_no)->index() == cell->index() && + cell->neighbor(face_no)->level() < cell->level())))) + continue; + + // if the neighboring cell is less + // refined than the present one, + // then do nothing since we + // integrate over the subfaces when + // we visit the coarse cells. + if (cell->at_boundary(face_no) == false) + if (cell->neighbor_is_coarser(face_no)) + continue; + + // if this face is part of the + // boundary but not of the neumann + // boundary -> nothing to + // do. However, to make things + // easier when summing up the + // contributions of the faces of + // cells, we enter this face into + // the list of faces with + // contribution zero. + const unsigned char boundary_indicator + = face->boundary_indicator(); + if (face->at_boundary() + && + (parallel_data.neumann_bc->find(boundary_indicator) == + parallel_data.neumann_bc->end())) + { + local_face_integrals[face] + = std::vector (n_solution_vectors, 0.); + continue; + } + + // finally: note that we only have + // to do something if either the + // present cell is on the subdomain + // we care for (and the same for + // material_id), or if one of the + // neighbors behind the face is on + // the subdomain we care for + if ( ! ( ((subdomain_id == numbers::invalid_unsigned_int) + || + (cell->subdomain_id() == subdomain_id)) + && + ((material_id == numbers::invalid_unsigned_int) + || + (cell->material_id() == material_id))) ) + { + // ok, cell is unwanted, but + // maybe its neighbor behind + // the face we presently work + // on? oh is there a face at + // all? + if (face->at_boundary()) + continue; + + bool care_for_cell = false; + if (face->has_children() == false) + care_for_cell |= ((cell->neighbor(face_no)->subdomain_id() + == subdomain_id) || + (subdomain_id == numbers::invalid_unsigned_int)) + && + ((cell->neighbor(face_no)->material_id() + == material_id) || + (material_id == numbers::invalid_unsigned_int)); + else + { + for (unsigned int sf=0; sfn_children(); ++sf) + if (((cell->neighbor_child_on_subface(face_no,sf) + ->subdomain_id() == subdomain_id) + && + (material_id == + numbers::invalid_unsigned_int)) + || + ((cell->neighbor_child_on_subface(face_no,sf) + ->material_id() == material_id) + && + (subdomain_id == + numbers::invalid_unsigned_int))) + { + care_for_cell = true; + break; + } + } + + // so if none of the neighbors + // cares for this subdomain or + // material either, then try + // next face + if (care_for_cell == false) + continue; + } + + // so now we know that we care for + // this face, let's do something + // about it. first re-size the + // arrays we may use to the correct + // size: + parallel_data.resize (cell->active_fe_index()); + + + // then do the actual integration + if (face->has_children() == false) + // if the face is a regular one, + // i.e. either on the other side + // there is nirvana (face is at + // boundary), or the other side's + // refinement level is the same + // as that of this side, then + // handle the integration of + // these both cases together + integrate_over_regular_face (solutions, + parallel_data, + local_face_integrals, + cell, face_no, + parallel_data.fe_face_values_cell, + parallel_data.fe_face_values_neighbor); + + else + // otherwise we need to do some + // special computations which do + // not fit into the framework of + // the above function + integrate_over_irregular_face (solutions, + parallel_data, + local_face_integrals, + cell, face_no, + parallel_data.fe_face_values_cell, + parallel_data.fe_subface_values); + } + } + } } + + #if deal_II_dimension == 1 @@ -469,90 +1399,12 @@ estimate (const Mapping<1,spacedim> &mapping, #else // #if deal_II_dimension !=1 -template -KellyErrorEstimator::PerThreadData:: -PerThreadData (const unsigned int n_solution_vectors, - const unsigned int n_components, - const unsigned int max_n_q_points, - const unsigned int subdomain_id, - const unsigned int material_id) - : - subdomain_id (subdomain_id), - material_id (material_id) -{ - // Init the size of a lot of vectors needed - // in the calculations once per thread. we - // will later resize them as necessary, but - // for the moment just reserve the maximal - // memory necessary to avoid later - // re-allocation - normal_vectors.resize(max_n_q_points); - coefficient_values1.resize(max_n_q_points); - coefficient_values.resize(max_n_q_points); - JxW_values.resize(max_n_q_points); - - phi.resize(n_solution_vectors); - psi.resize(n_solution_vectors); - neighbor_psi.resize(n_solution_vectors); - - for (unsigned int i=0; i -void -KellyErrorEstimator::PerThreadData:: -resize (const unsigned int n_components, - const unsigned int n_q_points) -{ - const unsigned int n_solution_vectors = phi.size(); - - normal_vectors.resize(n_q_points); - coefficient_values1.resize(n_q_points); - coefficient_values.resize(n_q_points); - JxW_values.resize(n_q_points); - - for (unsigned int i=0; i -template +template void KellyErrorEstimator:: estimate (const Mapping &mapping, @@ -644,274 +1496,6 @@ estimate (const DH &dof_handler, -template -template -void KellyErrorEstimator:: -estimate_some (const hp::MappingCollection &mapping, - const DH &dof_handler, - const hp::QCollection &quadrature, - const typename FunctionMap::type &neumann_bc, - const std::vector &solutions, - const std::pair*,const Function*> &component_mask_and_coefficients, - const std::pair this_thread, - typename FaceIntegrals::type &face_integrals, - PerThreadData &per_thread_data) -{ - const std::vector &component_mask = *component_mask_and_coefficients.first; - const Function *coefficients = component_mask_and_coefficients.second; - - const unsigned int n_solution_vectors = solutions.size(); - const unsigned int n_components = dof_handler.get_fe().n_components(); - - const unsigned int subdomain_id = per_thread_data.subdomain_id; - const unsigned int material_id = per_thread_data.material_id; - - const hp::FECollection fe (dof_handler.get_fe()); - - // make up a fe face values object - // for the restriction of the - // finite element function to a - // face, for the present cell and - // its neighbor. In principle we - // would only need one at a time, - // but this way we can have more - // fine grained access to what - // values really need to be - // computed (we need not compute - // all values on the neighbor - // cells, so using two objects - // gives us a performance gain). - // - // in debug mode, make sure that - // some data matches, so compute - // quadrature points always - hp::FEFaceValues fe_face_values_cell (mapping, - fe, - quadrature, - UpdateFlags( - update_gradients | - update_JxW_values | - ((!neumann_bc.empty() || - (coefficients != 0)) ? - update_quadrature_points : 0) | - update_normal_vectors)); - hp::FEFaceValues fe_face_values_neighbor (mapping, - fe, - quadrature, - update_gradients); - hp::FESubfaceValues fe_subface_values (mapping, - fe, - quadrature, - update_gradients); - - - typename DH::active_cell_iterator cell = dof_handler.begin_active(); - - // calculate the start cell for - // this thread. note that this way - // the threads work interleaved on - // successive cells, rather than on - // blocks of cells. the reason is - // that it takes vastly more time - // to work on cells with hanging - // nodes than on regular cells, but - // such cells are not evenly - // distributed across the range of - // cell iterators, so in order to - // have the different threads do - // approximately the same amount of - // work, we have to let them work - // interleaved to the effect of a - // pseudorandom distribution of the - // `hard' cells to the different - // threads. - for (unsigned int t=0; (t::faces_per_cell; ++face_no) - { - const typename DH::face_iterator - face=cell->face(face_no); - - // make sure we do work - // only once: this face - // may either be regular - // or irregular. if it is - // regular and has a - // neighbor, then we - // visit the face twice, - // once from every - // side. let the one with - // the lower index do the - // work. if it is at the - // boundary, or if the - // face is irregular, - // then do the work below - if ((face->has_children() == false) && - !cell->at_boundary(face_no) && - (!cell->neighbor_is_coarser(face_no) && - (cell->neighbor(face_no)->index() < cell->index() || - (cell->neighbor(face_no)->index() == cell->index() && - cell->neighbor(face_no)->level() < cell->level())))) - continue; - - // if we already visited - // this face: do - // nothing. only check - // component for first - // solution vector, as we - // treat them all at the - // same time - if (face_integrals[face][0] >=0) - continue; - - - // if the neighboring cell is less - // refined than the present one, - // then do nothing since we - // integrate over the subfaces when - // we visit the coarse cells. - if (cell->at_boundary(face_no) == false) - if (cell->neighbor_is_coarser(face_no)) - continue; - - // if this face is part of the - // boundary but not of the neumann - // boundary -> nothing to - // do. However, to make things - // easier when summing up the - // contributions of the faces of - // cells, we enter this face into - // the list of faces with - // contribution zero. - const unsigned char boundary_indicator - = face->boundary_indicator(); - if (face->at_boundary() - && - neumann_bc.find(boundary_indicator)==neumann_bc.end()) - { - for (unsigned int n=0; nsubdomain_id() == subdomain_id)) - && - ((material_id == numbers::invalid_unsigned_int) - || - (cell->material_id() == material_id))) ) - { - // ok, cell is unwanted, but - // maybe its neighbor behind - // the face we presently work - // on? oh is there a face at - // all? - if (face->at_boundary()) - continue; - - bool care_for_cell = false; - if (face->has_children() == false) - care_for_cell |= ((cell->neighbor(face_no)->subdomain_id() - == subdomain_id) || - (subdomain_id == numbers::invalid_unsigned_int)) - && - ((cell->neighbor(face_no)->material_id() - == material_id) || - (material_id == numbers::invalid_unsigned_int)); - else - { - for (unsigned int sf=0; sfn_children(); ++sf) - if (((cell->neighbor_child_on_subface(face_no,sf) - ->subdomain_id() == subdomain_id) - && - (material_id == - numbers::invalid_unsigned_int)) - || - ((cell->neighbor_child_on_subface(face_no,sf) - ->material_id() == material_id) - && - (subdomain_id == - numbers::invalid_unsigned_int))) - { - care_for_cell = true; - break; - } - } - - // so if none of the neighbors - // cares for this subdomain or - // material either, then try - // next face - if (care_for_cell == false) - continue; - } - - - // so now we know that we care for - // this face, let's do something - // about it. first re-size the - // arrays we may use to the correct - // size: - per_thread_data.resize (n_components, - quadrature[cell->active_fe_index()] - .n_quadrature_points); - - - // then do the actual integration - if (face->has_children() == false) - // if the face is a regular one, - // i.e. either on the other side - // there is nirvana (face is at - // boundary), or the other side's - // refinement level is the same - // as that of this side, then - // handle the integration of - // these both cases together - integrate_over_regular_face (dof_handler, quadrature, - neumann_bc, solutions, component_mask, - coefficients, - face_integrals, - per_thread_data, - cell, face_no, - fe_face_values_cell, - fe_face_values_neighbor); - - else - // otherwise we need to do some - // special computations which do - // not fit into the framework of - // the above function - integrate_over_irregular_face (dof_handler, quadrature, - solutions, component_mask, - coefficients, - face_integrals, - per_thread_data, - cell, face_no, - fe_face_values_cell, - fe_subface_values); - } - } -} - - template template @@ -925,7 +1509,7 @@ estimate (const Mapping &mapping, std::vector*> &errors, const std::vector &component_mask_, const Function *coefficients, - const unsigned int n_threads_, + const unsigned int , const unsigned int subdomain_id, const unsigned int material_id) { @@ -958,8 +1542,6 @@ estimate (const Mapping &mapping, Assert (solutions[n]->size() == dof_handler.n_dofs(), ExcInvalidSolutionVector()); - Assert (n_threads_ > 0, ExcInternalError()); - // if no mask given: treat all components std::vector component_mask ((component_mask_.size() == 0) ? std::vector(n_components, true) : @@ -968,10 +1550,6 @@ estimate (const Mapping &mapping, Assert (std::count(component_mask.begin(), component_mask.end(), true) > 0, ExcInvalidComponentMask()); - // if NOT multithreaded, set - // n_threads to one - const unsigned int n_threads = (DEAL_II_USE_MT ? n_threads_ : 1); - const unsigned int n_solution_vectors = solutions.size(); // Map of integrals indexed by @@ -982,84 +1560,35 @@ estimate (const Mapping &mapping, // loop over the cells and collect the // contributions of the different faces // of the cell. - // - // The initial values for all faces - // are set to -10e20. It would cost - // a lot of time to synchronise the - // initialisation (i.e. the - // creation of new keys) of the map - // in multithreaded mode. Negative - // value indicates that the face - // has not yet been processed. - const double invalid_double = -10e20; - std::vector default_face_integrals (n_solution_vectors, - invalid_double); - typename FaceIntegrals::type face_integrals; - for (typename DH::active_cell_iterator cell=dof_handler.begin_active(); - cell!=dof_handler.end(); ++cell) - for (unsigned int face_no=0; - face_no::faces_per_cell; - ++face_no) - face_integrals[cell->face(face_no)] = default_face_integrals; - + std::map > face_integrals; // all the data needed in the error // estimator by each of the threads // is gathered in the following // stuctures - // - // note that if no component mask - // was given, then treat all - // components - std::vector data_structures (n_threads); - for (unsigned int i=0; i threads; - void (*estimate_some_ptr) (const hp::MappingCollection &, - const DH &, - const hp::QCollection &, - const typename FunctionMap::type &, - const std::vector &, - const std::pair*,const Function*> &, - const std::pair, - typename FaceIntegrals::type &, - PerThreadData &) - = &KellyErrorEstimator::template estimate_some; - - hp::MappingCollection mapping_collection; - mapping_collection.push_back (mapping); - - for (unsigned int i=0; i mapping_collection(mapping); + const internal::ParallelData + parallel_data (dof_handler.get_fe(), + face_quadratures, + mapping_collection, + (!neumann_bc.empty() || (coefficients != 0)), + solutions.size(), + subdomain_id, + material_id, + &neumann_bc, + component_mask, + coefficients); + std::map > sample_local_face_integrals; + + // now let's work on all those cells: + WorkStream::run (dof_handler.begin_active(), + static_cast(dof_handler.end()), + std_cxx1x::bind (&internal::estimate_one_cell, + _1, _2, _3, std_cxx1x::ref(solutions)), + std_cxx1x::bind (&internal::copy_local_to_global, + _1, std_cxx1x::ref(face_integrals)), + parallel_data, + sample_local_face_integrals); // finally add up the contributions of the // faces for each cell @@ -1069,16 +1598,15 @@ estimate (const Mapping &mapping, for (unsigned int n=0; n::estimate (const DH -template -template -void KellyErrorEstimator:: -integrate_over_regular_face (const DH &dof_handler, - const hp::QCollection &quadrature, - const typename FunctionMap::type &neumann_bc, - const std::vector &solutions, - const std::vector &component_mask, - const Function *coefficients, - typename FaceIntegrals::type &face_integrals, - PerThreadData &per_thread_data, - const typename DH::active_cell_iterator &cell, - const unsigned int face_no, - hp::FEFaceValues &fe_face_values_cell, - hp::FEFaceValues &fe_face_values_neighbor) -{ - const typename DH::face_iterator face = cell->face(face_no); - const unsigned int n_q_points = quadrature[cell->active_fe_index()].n_quadrature_points, - n_components = dof_handler.get_fe().n_components(), - n_solution_vectors = solutions.size(); - - - // initialize data of the restriction - // of this cell to the present face - fe_face_values_cell.reinit (cell, face_no, - cell->active_fe_index()); - - // get gradients of the finite element - // function on this cell - for (unsigned int n=0; nat_boundary() == false) - // internal face; integrate jump - // of gradient across this face - { - Assert (cell->neighbor(face_no).state() == IteratorState::valid, - ExcInternalError()); - - const typename DH::active_cell_iterator neighbor = cell->neighbor(face_no); - - // find which number the - // current face has relative to - // the neighboring cell - const unsigned int neighbor_neighbor - = cell->neighbor_of_neighbor (face_no); - Assert (neighbor_neighbor::faces_per_cell, - ExcInternalError()); - - // get restriction of finite element - // function of @p{neighbor} to the - // common face. in the hp case, use the - // quadrature formula that matches the - // one we would use for the present - // cell - fe_face_values_neighbor.reinit (neighbor, neighbor_neighbor, - cell->active_fe_index()); - - // get gradients on neighbor cell - for (unsigned int n=0; nn_components == 1) - { - - coefficients->value_list (fe_face_values_cell.get_present_fe_values() - .get_quadrature_points(), - per_thread_data.coefficient_values1); - for (unsigned int n=0; nvector_value_list (fe_face_values_cell.get_present_fe_values() - .get_quadrature_points(), - per_thread_data.coefficient_values); - for (unsigned int n=0; nat_boundary() == true) - // neumann boundary face. compute - // difference between normal - // derivative and boundary function - { - const unsigned char boundary_indicator = face->boundary_indicator(); - - Assert (neumann_bc.find(boundary_indicator) != neumann_bc.end(), - ExcInternalError ()); - // get the values of the boundary - // function at the quadrature - // points - if (n_components == 1) - { - std::vector g(n_q_points); - neumann_bc.find(boundary_indicator)->second - ->value_list (fe_face_values_cell.get_present_fe_values() - .get_quadrature_points(), g); - - for (unsigned int n=0; n > g(n_q_points, Vector(n_components)); - neumann_bc.find(boundary_indicator)->second - ->vector_value_list (fe_face_values_cell.get_present_fe_values() - .get_quadrature_points(), - g); - - for (unsigned int n=0; n face_integral (n_solution_vectors, 0); - for (unsigned int n=0; n -template -void KellyErrorEstimator:: -integrate_over_irregular_face (const DH &dof_handler, - const hp::QCollection &quadrature, - const std::vector &solutions, - const std::vector &component_mask, - const Function *coefficients, - typename FaceIntegrals::type &face_integrals, - PerThreadData &per_thread_data, - const typename DH::active_cell_iterator &cell, - const unsigned int face_no, - hp::FEFaceValues &fe_face_values, - hp::FESubfaceValues &fe_subface_values) -{ - const typename DH::cell_iterator neighbor = cell->neighbor(face_no); - const unsigned int n_q_points = quadrature[cell->active_fe_index()].n_quadrature_points, - n_components = dof_handler.get_fe().n_components(), - n_solution_vectors = solutions.size(); - const typename DH::face_iterator - face=cell->face(face_no); - - Assert (neighbor.state() == IteratorState::valid, ExcInternalError()); - Assert (face->has_children(), ExcInternalError()); - // set up a vector of the gradients - // of the finite element function - // on this cell at the quadrature - // points - // - // let psi be a short name for - // [a grad u_h], where the second - // index be the component of the - // finite element, and the first - // index the number of the - // quadrature point - - // store which number @p{cell} has - // in the list of neighbors of - // @p{neighbor} - const unsigned int neighbor_neighbor - = cell->neighbor_of_neighbor (face_no); - Assert (neighbor_neighbor::faces_per_cell, - ExcInternalError()); - - // loop over all subfaces - for (unsigned int subface_no=0; subface_non_children(); ++subface_no) - { - // get an iterator pointing to the - // cell behind the present subface - const typename DH::active_cell_iterator neighbor_child - = cell->neighbor_child_on_subface (face_no, subface_no); - Assert (!neighbor_child->has_children(), - ExcInternalError()); - - // restrict the finite element - // on the present cell to the - // subface - fe_subface_values.reinit (cell, face_no, subface_no, - cell->active_fe_index()); - - // restrict the finite element - // on the neighbor cell to the - // common @p{subface}. - fe_face_values.reinit (neighbor_child, neighbor_neighbor, - cell->active_fe_index()); - - // store the gradient of the - // solution in psi - for (unsigned int n=0; nn_components == 1) - { - coefficients->value_list (fe_face_values.get_present_fe_values() - .get_quadrature_points(), - per_thread_data.coefficient_values1); - for (unsigned int n=0; nvector_value_list (fe_face_values.get_present_fe_values() - .get_quadrature_points(), - per_thread_data.coefficient_values); - for (unsigned int n=0; n face_integral (n_solution_vectors, 0); - for (unsigned int n=0; nface(neighbor_neighbor)] = face_integral; - } - - - // finally loop over all subfaces to - // collect the contributions of the - // subfaces and store them with the - // mother face - std::vector sum (n_solution_vectors, 0); - for (unsigned int subface_no=0; subface_non_children(); ++subface_no) - { - Assert (face_integrals.find(face->child(subface_no)) != - face_integrals.end(), - ExcInternalError()); - Assert (face_integrals[face->child(subface_no)][0] >= 0, - ExcInternalError()); - - for (unsigned int n=0; nchild(subface_no)][n]; - } - - face_integrals[face] = sum; -} - #endif diff --git a/deal.II/deal.II/source/numerics/matrices.cc b/deal.II/deal.II/source/numerics/matrices.cc index 38368e0167..cc47a687d4 100644 --- a/deal.II/deal.II/source/numerics/matrices.cc +++ b/deal.II/deal.II/source/numerics/matrices.cc @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -42,540 +43,737 @@ DEAL_II_NAMESPACE_OPEN -template -inline -MatrixCreator::IteratorRange:: -IteratorRange (const active_cell_iterator &first, - const active_cell_iterator &second) - : - first (first), - second (second) -{} - - -template -inline -MatrixCreator::IteratorRange::IteratorRange (const iterator_pair &ip) - : - first (ip.first), - second (ip.second) -{} - - - - -template -void MatrixCreator::create_mass_matrix (const Mapping &mapping, - const DoFHandler &dof, - const Quadrature &q, - SparseMatrix &matrix, - const Function * const coefficient) +namespace internal { - Assert (matrix.m() == dof.n_dofs(), - ExcDimensionMismatch (matrix.m(), dof.n_dofs())); - Assert (matrix.n() == dof.n_dofs(), - ExcDimensionMismatch (matrix.n(), dof.n_dofs())); - - const unsigned int n_threads = multithread_info.n_default_threads; - Threads::ThreadGroup<> threads; - - // define starting and end point - // for each thread - typedef typename DoFHandler::active_cell_iterator active_cell_iterator; - const std::vector > - thread_ranges = Threads::split_range (dof.begin_active(), - dof.end(), n_threads); - - // mutex to synchronise access to - // the matrix - Threads::ThreadMutex mutex; - - // then assemble in parallel - typedef void (*create_mass_matrix_1_t) (const Mapping &mapping, - const DoFHandler &dof, - const Quadrature &q, - SparseMatrix &matrix, - const Function * const coefficient, - const IteratorRange > range, - Threads::ThreadMutex &mutex); - create_mass_matrix_1_t p = &MatrixCreator::template create_mass_matrix_1; - for (unsigned int thread=0; thread + struct Scratch + { + Scratch (const FiniteElement &fe, + const UpdateFlags update_flags, + const Function *coefficient, + const Function *rhs_function, + const Quadrature &quadrature, + const Mapping &mapping) + : + fe_collection (fe), + quadrature_collection (quadrature), + mapping_collection (mapping), + x_fe_values (mapping_collection, + fe_collection, + quadrature_collection, + update_flags), + coefficient_values(quadrature_collection.max_n_quadrature_points()), + coefficient_vector_values (quadrature_collection.max_n_quadrature_points(), + dealii::Vector (fe_collection.n_components())), + rhs_values(quadrature_collection.max_n_quadrature_points()), + rhs_vector_values (quadrature_collection.max_n_quadrature_points(), + dealii::Vector (fe_collection.n_components())), + coefficient (coefficient), + rhs_function (rhs_function), + update_flags (update_flags) + {} + + Scratch (const ::dealii::hp::FECollection &fe, + const UpdateFlags update_flags, + const Function *coefficient, + const Function *rhs_function, + const ::dealii::hp::QCollection &quadrature, + const ::dealii::hp::MappingCollection &mapping) + : + fe_collection (fe), + quadrature_collection (quadrature), + mapping_collection (mapping), + x_fe_values (mapping_collection, + fe_collection, + quadrature_collection, + update_flags), + coefficient_values(quadrature_collection.max_n_quadrature_points()), + coefficient_vector_values (quadrature_collection.max_n_quadrature_points(), + dealii::Vector (fe_collection.n_components())), + rhs_values(quadrature_collection.max_n_quadrature_points()), + rhs_vector_values(quadrature_collection.max_n_quadrature_points(), + dealii::Vector (fe_collection.n_components())), + coefficient (coefficient), + rhs_function (rhs_function), + update_flags (update_flags) + {} + + Scratch (const Scratch &data) + : + fe_collection (data.fe_collection), + quadrature_collection (data.quadrature_collection), + mapping_collection (data.mapping_collection), + x_fe_values (mapping_collection, + fe_collection, + quadrature_collection, + data.update_flags), + coefficient_values (data.coefficient_values), + coefficient_vector_values (data.coefficient_vector_values), + rhs_values (data.rhs_values), + rhs_vector_values (data.rhs_vector_values), + coefficient (data.coefficient), + rhs_function (data.rhs_function), + update_flags (data.update_flags) + {} + + const ::dealii::hp::FECollection fe_collection; + const ::dealii::hp::QCollection quadrature_collection; + const ::dealii::hp::MappingCollection mapping_collection; + + ::dealii::hp::FEValues x_fe_values; + + std::vector coefficient_values; + std::vector > coefficient_vector_values; + std::vector rhs_values; + std::vector > rhs_vector_values; + + const Function *coefficient; + const Function *rhs_function; + + const UpdateFlags update_flags; + }; + + + struct CopyData + { + std::vector dof_indices; + FullMatrix cell_matrix; + dealii::Vector cell_rhs; + }; + -template -void MatrixCreator::create_mass_matrix_1 (const Mapping &mapping, - const DoFHandler &dof, - const Quadrature &q, - SparseMatrix &matrix, - const Function * const coefficient, - const IteratorRange > range, - Threads::ThreadMutex &mutex) -{ - UpdateFlags update_flags = UpdateFlags(update_values | update_JxW_values); - if (coefficient != 0) - update_flags = UpdateFlags (update_flags | update_quadrature_points); - - FEValues fe_values (mapping, dof.get_fe(), q, update_flags); + + } - const unsigned int dofs_per_cell = fe_values.dofs_per_cell, - n_q_points = fe_values.n_quadrature_points; - const FiniteElement &fe = fe_values.get_fe(); - const unsigned int n_components = fe.n_components(); - Assert(coefficient == 0 || - coefficient->n_components==1 || - coefficient->n_components==n_components, ExcComponentMismatch()); - - FullMatrix cell_matrix (dofs_per_cell, dofs_per_cell); - std::vector coefficient_values (n_q_points); - std::vector > coefficient_vector_values (n_q_points, - Vector (n_components)); - - std::vector dof_indices (dofs_per_cell); - - typename DoFHandler::active_cell_iterator cell = range.first; - for (; cell!=range.second; ++cell) + template + void mass_assembler (const CellIterator &cell, + internal::MatrixCreator::AssemblerData::Scratch &data, + internal::MatrixCreator::AssemblerData::CopyData ©_data) { - fe_values.reinit (cell); + data.x_fe_values.reinit (cell); + const FEValues &fe_values = data.x_fe_values.get_present_fe_values (); - cell_matrix = 0; - cell->get_dof_indices (dof_indices); + const unsigned int dofs_per_cell = fe_values.dofs_per_cell, + n_q_points = fe_values.n_quadrature_points; + const FiniteElement &fe = fe_values.get_fe(); + const unsigned int n_components = fe.n_components(); + + Assert(data.rhs_function == 0 || + data.rhs_function->n_components==1 || + data.rhs_function->n_components==n_components, + ::dealii::MatrixCreator::ExcComponentMismatch()); + Assert(data.coefficient == 0 || + data.coefficient->n_components==1 || + data.coefficient->n_components==n_components, + ::dealii::MatrixCreator::ExcComponentMismatch()); + + copy_data.cell_matrix.reinit (dofs_per_cell, dofs_per_cell); + copy_data.cell_matrix = 0; + + copy_data.cell_rhs.reinit (dofs_per_cell); + copy_data.cell_rhs = 0; - if (coefficient != 0) + copy_data.dof_indices.resize (dofs_per_cell); + cell->get_dof_indices (copy_data.dof_indices); + + if (data.rhs_function != 0) { - if (coefficient->n_components==1) + if (data.rhs_function->n_components==1) + { + data.rhs_values.resize (n_q_points); + data.rhs_function->value_list (fe_values.get_quadrature_points(), + data.rhs_values); + } + else { - // Version for variable coefficient with 1 component - coefficient->value_list (fe_values.get_quadrature_points(), - coefficient_values); - for (unsigned int point=0; point(n_components)); + data.rhs_function->vector_value_list (fe_values.get_quadrature_points(), + data.rhs_vector_values); + } + } + + if (data.coefficient != 0) + { + if (data.coefficient->n_components==1) + { + data.coefficient_values.resize (n_q_points); + data.coefficient->value_list (fe_values.get_quadrature_points(), + data.coefficient_values); + } + else + { + data.coefficient_vector_values.resize (n_q_points, + dealii::Vector(n_components)); + data.coefficient->vector_value_list (fe_values.get_quadrature_points(), + data.coefficient_vector_values); + } + } + + + if (data.coefficient != 0) + { + if (data.coefficient->n_components==1) + { + for (unsigned int i=0; in_components==1) + for (unsigned int point=0; pointvector_value_list (fe_values.get_quadrature_points(), - coefficient_vector_values); if (fe.is_primitive ()) { - // Version for variable coefficient with multiple components - for (unsigned int point=0; pointn_components==1) + for (unsigned int point=0; pointn_components==1) + for (unsigned int point=0; pointn_components==1) + for (unsigned int point=0; pointn_components==1) + for (unsigned int point=0; point -void MatrixCreator::create_mass_matrix (const DoFHandler &dof, - const Quadrature &q, - SparseMatrix &matrix, - const Function * const coefficient) -{ - Assert (DEAL_II_COMPAT_MAPPING, ExcCompatibility("mapping")); - create_mass_matrix(StaticMappingQ1::mapping, dof, q, matrix, coefficient); -} - - -template -void MatrixCreator::create_mass_matrix (const Mapping &mapping, - const DoFHandler &dof, - const Quadrature &q, - SparseMatrix &matrix, - const Function &rhs, - Vector &rhs_vector, - const Function * const coefficient) -{ - Assert (matrix.m() == dof.n_dofs(), - ExcDimensionMismatch (matrix.m(), dof.n_dofs())); - Assert (matrix.n() == dof.n_dofs(), - ExcDimensionMismatch (matrix.n(), dof.n_dofs())); - - const unsigned int n_threads = multithread_info.n_default_threads; - Threads::ThreadGroup<> threads; - - // define starting and end point - // for each thread - typedef typename DoFHandler::active_cell_iterator active_cell_iterator; - std::vector > thread_ranges - = Threads::split_range (dof.begin_active(), - dof.end(), n_threads); - - // mutex to synchronise access to - // the matrix - Threads::ThreadMutex mutex; - - // then assemble in parallel - typedef void (*create_mass_matrix_2_t) (const Mapping &mapping, - const DoFHandler &dof, - const Quadrature &q, - SparseMatrix &matrix, - const Function &rhs, - Vector &rhs_vector, - const Function * const coefficient, - const IteratorRange > range, - Threads::ThreadMutex &mutex); - create_mass_matrix_2_t p = &MatrixCreator::template create_mass_matrix_2; - for (unsigned int thread=0; thread -void -MatrixCreator::create_mass_matrix_2 (const Mapping &mapping, - const DoFHandler &dof, - const Quadrature &q, - SparseMatrix &matrix, - const Function &rhs, - Vector &rhs_vector, - const Function * const coefficient, - const IteratorRange > range, - Threads::ThreadMutex &mutex) -{ - UpdateFlags update_flags = UpdateFlags(update_values | - update_quadrature_points | - update_JxW_values); - if (coefficient != 0) - update_flags = UpdateFlags (update_flags | update_quadrature_points); + } - FEValues fe_values (mapping, dof.get_fe(), q, update_flags); - - const unsigned int dofs_per_cell = fe_values.dofs_per_cell, - n_q_points = fe_values.n_quadrature_points; - const FiniteElement &fe = fe_values.get_fe(); - const unsigned int n_components = fe.n_components(); - Assert(coefficient == 0 || - coefficient->n_components==1 || - coefficient->n_components==n_components, ExcComponentMismatch()); - Assert (rhs.n_components == 1 || - rhs.n_components == n_components,ExcComponentMismatch()); - Assert (rhs_vector.size() == dof.n_dofs(), - ExcDimensionMismatch(rhs_vector.size(), dof.n_dofs())); - - FullMatrix cell_matrix (dofs_per_cell, dofs_per_cell); - Vector local_rhs (dofs_per_cell); - std::vector coefficient_values (n_q_points); - std::vector > coefficient_vector_values (n_q_points, - Vector (n_components)); - std::vector rhs_values(n_q_points); - std::vector > rhs_vector_values(n_q_points, - Vector (n_components)); - std::vector dof_indices (dofs_per_cell); + template + void laplace_assembler (const CellIterator &cell, + internal::MatrixCreator::AssemblerData::Scratch &data, + internal::MatrixCreator::AssemblerData::CopyData ©_data) + { + data.x_fe_values.reinit (cell); + const FEValues &fe_values = data.x_fe_values.get_present_fe_values (); - typename DoFHandler::active_cell_iterator cell = range.first; + const unsigned int dofs_per_cell = fe_values.dofs_per_cell, + n_q_points = fe_values.n_quadrature_points; + const FiniteElement &fe = fe_values.get_fe(); + const unsigned int n_components = fe.n_components(); + + Assert(data.rhs_function == 0 || + data.rhs_function->n_components==1 || + data.rhs_function->n_components==n_components, + ::dealii::MatrixCreator::ExcComponentMismatch()); + Assert(data.coefficient == 0 || + data.coefficient->n_components==1 || + data.coefficient->n_components==n_components, + ::dealii::MatrixCreator::ExcComponentMismatch()); + + copy_data.cell_matrix.reinit (dofs_per_cell, dofs_per_cell); + copy_data.cell_matrix = 0; + + copy_data.cell_rhs.reinit (dofs_per_cell); + copy_data.cell_rhs = 0; + + copy_data.dof_indices.resize (dofs_per_cell); + cell->get_dof_indices (copy_data.dof_indices); - for (; cell!=range.second; ++cell) - { - fe_values.reinit (cell); - cell_matrix = 0; - local_rhs = 0; - cell->get_dof_indices (dof_indices); + if (data.rhs_function != 0) + { + if (data.rhs_function->n_components==1) + { + data.rhs_values.resize (n_q_points); + data.rhs_function->value_list (fe_values.get_quadrature_points(), + data.rhs_values); + } + else + { + data.rhs_vector_values.resize (n_q_points, + dealii::Vector(n_components)); + data.rhs_function->vector_value_list (fe_values.get_quadrature_points(), + data.rhs_vector_values); + } + } - // value_list for one component rhs, - // vector_value_list otherwise - if (rhs.n_components==1) - rhs.value_list (fe_values.get_quadrature_points(), rhs_values); - else - rhs.vector_value_list (fe_values.get_quadrature_points(), - rhs_vector_values); + if (data.coefficient != 0) + { + if (data.coefficient->n_components==1) + { + data.coefficient_values.resize (n_q_points); + data.coefficient->value_list (fe_values.get_quadrature_points(), + data.coefficient_values); + } + else + { + data.coefficient_vector_values.resize (n_q_points, + dealii::Vector(n_components)); + data.coefficient->vector_value_list (fe_values.get_quadrature_points(), + data.coefficient_vector_values); + } + } - // Case with coefficient - if (coefficient != 0) + + if (data.coefficient != 0) { - if (coefficient->n_components == 1) + if (data.coefficient->n_components==1) { - // Version for variable coefficient with 1 component - coefficient->value_list (fe_values.get_quadrature_points(), - coefficient_values); - for (unsigned int point=0; pointn_components==1) + for (unsigned int point=0; pointvector_value_list (fe_values.get_quadrature_points(), - coefficient_vector_values); if (fe.is_primitive ()) { - // Version for variable coefficient with multiple components - for (unsigned int point=0; pointn_components==1) + for (unsigned int point=0; pointn_components==1) + for (unsigned int point=0; pointn_components==1) + for (unsigned int point=0; pointn_components==1) + for (unsigned int point=0; point + void copy_local_to_global (const AssemblerData::CopyData &data, + MatrixType *matrix, + VectorType *right_hand_side) + { + const unsigned int dofs_per_cell = data.dof_indices.size(); + + Assert (data.cell_matrix.m() == dofs_per_cell, + ExcInternalError()); + Assert (data.cell_matrix.n() == dofs_per_cell, + ExcInternalError()); + Assert ((right_hand_side == 0) + || + (data.cell_rhs.size() == dofs_per_cell), + ExcInternalError()); + + matrix->add (data.dof_indices, data.cell_matrix); + + if (right_hand_side != 0) + for (unsigned int i=0; i +inline +MatrixCreator::IteratorRange:: +IteratorRange (const active_cell_iterator &first, + const active_cell_iterator &second) + : + first (first), + second (second) +{} + + + +template +inline +MatrixCreator::IteratorRange::IteratorRange (const iterator_pair &ip) + : + first (ip.first), + second (ip.second) +{} + + + + +template +void MatrixCreator::create_mass_matrix (const Mapping &mapping, + const DoFHandler &dof, + const Quadrature &q, + SparseMatrix &matrix, + const Function * const coefficient) +{ + Assert (matrix.m() == dof.n_dofs(), + ExcDimensionMismatch (matrix.m(), dof.n_dofs())); + Assert (matrix.n() == dof.n_dofs(), + ExcDimensionMismatch (matrix.n(), dof.n_dofs())); + + internal::MatrixCreator::AssemblerData::Scratch + assembler_data (dof.get_fe(), + update_values | update_JxW_values | + (coefficient != 0 ? update_quadrature_points : UpdateFlags(0)), + coefficient, /*rhs_function=*/0, + q, mapping); + + internal::MatrixCreator::AssemblerData::CopyData copy_data; + copy_data.cell_matrix.reinit (assembler_data.fe_collection.max_dofs_per_cell(), + assembler_data.fe_collection.max_dofs_per_cell()); + copy_data.cell_rhs.reinit (assembler_data.fe_collection.max_dofs_per_cell()); + copy_data.dof_indices.resize (assembler_data.fe_collection.max_dofs_per_cell()); + + WorkStream::run (dof.begin_active(), + static_cast::active_cell_iterator>(dof.end()), + &internal::MatrixCreator::mass_assembler::active_cell_iterator>, + std_cxx1x::bind (&internal::MatrixCreator:: + copy_local_to_global, Vector >, + _1, &matrix, (Vector*)0), + assembler_data, + copy_data); +} + + + +template +void MatrixCreator::create_mass_matrix (const DoFHandler &dof, + const Quadrature &q, + SparseMatrix &matrix, + const Function * const coefficient) +{ + Assert (DEAL_II_COMPAT_MAPPING, ExcCompatibility("mapping")); + create_mass_matrix(StaticMappingQ1::mapping, dof, + q, matrix, coefficient); +} + + + +template +void MatrixCreator::create_mass_matrix (const Mapping &mapping, + const DoFHandler &dof, + const Quadrature &q, + SparseMatrix &matrix, + const Function &rhs, + Vector &rhs_vector, + const Function * const coefficient) +{ + Assert (matrix.m() == dof.n_dofs(), + ExcDimensionMismatch (matrix.m(), dof.n_dofs())); + Assert (matrix.n() == dof.n_dofs(), + ExcDimensionMismatch (matrix.n(), dof.n_dofs())); + + internal::MatrixCreator::AssemblerData::Scratch + assembler_data (dof.get_fe(), + update_values | + update_JxW_values | update_quadrature_points, + coefficient, &rhs, + q, mapping); + internal::MatrixCreator::AssemblerData::CopyData copy_data; + copy_data.cell_matrix.reinit (assembler_data.fe_collection.max_dofs_per_cell(), + assembler_data.fe_collection.max_dofs_per_cell()); + copy_data.cell_rhs.reinit (assembler_data.fe_collection.max_dofs_per_cell()); + copy_data.dof_indices.resize (assembler_data.fe_collection.max_dofs_per_cell()); + + WorkStream::run (dof.begin_active(), + static_cast::active_cell_iterator>(dof.end()), + &internal::MatrixCreator::mass_assembler::active_cell_iterator>, + std_cxx1x::bind(&internal::MatrixCreator:: + copy_local_to_global, Vector >, + _1, &matrix, &rhs_vector), + assembler_data, + copy_data); +} + + + template void MatrixCreator::create_mass_matrix (const DoFHandler &dof, const Quadrature &q, @@ -592,7 +790,7 @@ void MatrixCreator::create_mass_matrix (const DoFHandler &dof, template -void MatrixCreator::create_mass_matrix (const hp::MappingCollection &mapping, +void MatrixCreator::create_mass_matrix (const hp::MappingCollection &mapping, const hp::DoFHandler &dof, const hp::QCollection &q, SparseMatrix &matrix, @@ -603,164 +801,26 @@ void MatrixCreator::create_mass_matrix (const hp::MappingCollection threads; - - // define starting and end point - // for each thread - typedef typename hp::DoFHandler::active_cell_iterator active_cell_iterator; - std::vector > thread_ranges - = Threads::split_range (dof.begin_active(), - dof.end(), n_threads); - - // mutex to synchronise access to - // the matrix - Threads::ThreadMutex mutex; - - // then assemble in parallel - typedef void (*create_mass_matrix_1_t) (const hp::MappingCollection &mapping, - const hp::DoFHandler &dof, - const hp::QCollection &q, - SparseMatrix &matrix, - const Function * const coefficient, - const IteratorRange > range, - Threads::ThreadMutex &mutex); - create_mass_matrix_1_t p = &MatrixCreator::template create_mass_matrix_1; - for (unsigned int thread=0; thread -void -MatrixCreator::create_mass_matrix_1 (const hp::MappingCollection &mapping, - const hp::DoFHandler &dof, - const hp::QCollection &q, - SparseMatrix &matrix, - const Function * const coefficient, - const IteratorRange > range, - Threads::ThreadMutex &mutex) -{ - UpdateFlags update_flags = UpdateFlags(update_values | - update_JxW_values); - if (coefficient != 0) - update_flags = UpdateFlags (update_flags | update_quadrature_points); - - hp::FEValues x_fe_values (mapping, dof.get_fe(), q, update_flags); - - const unsigned int n_components = dof.get_fe().n_components(); - - Assert(coefficient == 0 || - coefficient->n_components==1 || - coefficient->n_components==n_components, ExcComponentMismatch()); - - FullMatrix cell_matrix; - std::vector coefficient_values; - std::vector > coefficient_vector_values; - - std::vector dof_indices; + internal::MatrixCreator::AssemblerData::Scratch + assembler_data (dof.get_fe(), + update_values | update_JxW_values | + (coefficient != 0 ? update_quadrature_points : UpdateFlags(0)), + coefficient, /*rhs_function=*/0, + q, mapping); + internal::MatrixCreator::AssemblerData::CopyData copy_data; + copy_data.cell_matrix.reinit (assembler_data.fe_collection.max_dofs_per_cell(), + assembler_data.fe_collection.max_dofs_per_cell()); + copy_data.cell_rhs.reinit (assembler_data.fe_collection.max_dofs_per_cell()); + copy_data.dof_indices.resize (assembler_data.fe_collection.max_dofs_per_cell()); - typename hp::DoFHandler::active_cell_iterator cell = range.first; - for (; cell!=range.second; ++cell) - { - x_fe_values.reinit (cell); - const FEValues &fe_values = x_fe_values.get_present_fe_values (); - - const unsigned int dofs_per_cell = fe_values.dofs_per_cell, - n_q_points = fe_values.n_quadrature_points; - const FiniteElement &fe = fe_values.get_fe(); - - cell_matrix.reinit (dofs_per_cell, dofs_per_cell); - coefficient_values.resize (n_q_points); - coefficient_vector_values.resize (n_q_points, - Vector (n_components)); - dof_indices.resize (dofs_per_cell); - - - cell_matrix = 0; - cell->get_dof_indices (dof_indices); - - if (coefficient != 0) - { - if (coefficient->n_components==1) - { - coefficient->value_list (fe_values.get_quadrature_points(), - coefficient_values); - for (unsigned int point=0; pointvector_value_list (fe_values.get_quadrature_points(), - coefficient_vector_values); - for (unsigned int point=0; point::active_cell_iterator>(dof.end()), + &internal::MatrixCreator::mass_assembler::active_cell_iterator>, + std_cxx1x::bind (&internal::MatrixCreator:: + copy_local_to_global, Vector >, + _1, &matrix, (Vector*)0), + assembler_data, + copy_data); } @@ -772,7 +832,7 @@ void MatrixCreator::create_mass_matrix (const hp::DoFHandler &d const Function * const coefficient) { Assert (DEAL_II_COMPAT_MAPPING, ExcCompatibility("mapping")); - create_mass_matrix(hp::StaticMappingQ1::mapping_collection, dof, q, matrix, coefficient); + create_mass_matrix(hp::StaticMappingQ1::mapping_collection, dof, q, matrix, coefficient); } @@ -791,185 +851,26 @@ void MatrixCreator::create_mass_matrix (const hp::MappingCollection threads; - - // define starting and end point - // for each thread - typedef typename hp::DoFHandler::active_cell_iterator active_cell_iterator; - std::vector > thread_ranges - = Threads::split_range (dof.begin_active(), - dof.end(), n_threads); - - // mutex to synchronise access to - // the matrix - Threads::ThreadMutex mutex; - - // then assemble in parallel - typedef void (*create_mass_matrix_2_t) (const hp::MappingCollection &mapping, - const hp::DoFHandler &dof, - const hp::QCollection &q, - SparseMatrix &matrix, - const Function &rhs, - Vector &rhs_vector, - const Function * const coefficient, - const IteratorRange > range, - Threads::ThreadMutex &mutex); - create_mass_matrix_2_t p = &MatrixCreator::template create_mass_matrix_2; - for (unsigned int thread=0; thread -void -MatrixCreator::create_mass_matrix_2 (const hp::MappingCollection &mapping, - const hp::DoFHandler &dof, - const hp::QCollection &q, - SparseMatrix &matrix, - const Function &rhs, - Vector &rhs_vector, - const Function * const coefficient, - const IteratorRange > range, - Threads::ThreadMutex &mutex) -{ - UpdateFlags update_flags = UpdateFlags(update_values | - update_quadrature_points | - update_JxW_values); - if (coefficient != 0) - update_flags = UpdateFlags (update_flags | update_quadrature_points); - - hp::FEValues x_fe_values (mapping, dof.get_fe(), q, update_flags); - - const unsigned int n_components = dof.get_fe().n_components(); - - Assert(coefficient == 0 || - coefficient->n_components==1 || - coefficient->n_components==n_components, ExcComponentMismatch()); - - FullMatrix cell_matrix; - Vector local_rhs; - std::vector rhs_values; - std::vector coefficient_values; - std::vector > coefficient_vector_values; - - std::vector dof_indices; - - typename hp::DoFHandler::active_cell_iterator cell = range.first; - for (; cell!=range.second; ++cell) - { - x_fe_values.reinit (cell); - const FEValues &fe_values = x_fe_values.get_present_fe_values (); - - const unsigned int dofs_per_cell = fe_values.dofs_per_cell, - n_q_points = fe_values.n_quadrature_points; - const FiniteElement &fe = fe_values.get_fe(); - - cell_matrix.reinit (dofs_per_cell, dofs_per_cell); - local_rhs.reinit (dofs_per_cell); - rhs_values.resize (fe_values.n_quadrature_points); - coefficient_values.resize (n_q_points); - coefficient_vector_values.resize (n_q_points, - Vector (n_components)); - dof_indices.resize (dofs_per_cell); - - - cell_matrix = 0; - local_rhs = 0; - cell->get_dof_indices (dof_indices); - - rhs.value_list (fe_values.get_quadrature_points(), rhs_values); - - if (coefficient != 0) - { - if (coefficient->n_components==1) - { - coefficient->value_list (fe_values.get_quadrature_points(), - coefficient_values); - for (unsigned int point=0; pointvector_value_list (fe_values.get_quadrature_points(), - coefficient_vector_values); - for (unsigned int point=0; point + assembler_data (dof.get_fe(), + update_values | + update_JxW_values | update_quadrature_points, + coefficient, &rhs, + q, mapping); + internal::MatrixCreator::AssemblerData::CopyData copy_data; + copy_data.cell_matrix.reinit (assembler_data.fe_collection.max_dofs_per_cell(), + assembler_data.fe_collection.max_dofs_per_cell()); + copy_data.cell_rhs.reinit (assembler_data.fe_collection.max_dofs_per_cell()); + copy_data.dof_indices.resize (assembler_data.fe_collection.max_dofs_per_cell()); + + WorkStream::run (dof.begin_active(), + static_cast::active_cell_iterator>(dof.end()), + &internal::MatrixCreator::mass_assembler::active_cell_iterator>, + std_cxx1x::bind (&internal::MatrixCreator:: + copy_local_to_global, Vector >, + _1, &matrix, &rhs_vector), + assembler_data, + copy_data); } @@ -983,7 +884,7 @@ void MatrixCreator::create_mass_matrix (const hp::DoFHandler &d const Function * const coefficient) { Assert (DEAL_II_COMPAT_MAPPING, ExcCompatibility("mapping")); - create_mass_matrix(hp::StaticMappingQ1::mapping_collection, dof, q, + create_mass_matrix(hp::StaticMappingQ1::mapping_collection, dof, q, matrix, rhs, rhs_vector, coefficient); } @@ -1096,12 +997,15 @@ MatrixCreator::create_boundary_mass_matrix (const Mapping &mappi const IteratorRange > range, Threads::ThreadMutex &mutex); create_boundary_mass_matrix_1_t p = &MatrixCreator::template create_boundary_mass_matrix_1; + +//TODO: Use WorkStream here for (unsigned int thread=0; thread &, update_JxW_values | update_normal_vectors | update_quadrature_points); - FEFaceValues fe_values (mapping, fe, q, update_flags); + FEFaceValues fe_values (mapping, fe, q, update_flags); // two variables for the coefficient, // one for the two cases indicated in @@ -1449,7 +1353,7 @@ MatrixCreator::create_boundary_mass_matrix (const hp::MappingCollection * const coefficient, std::vector component_mapping) { - const hp::FECollection &fe_collection = dof.get_fe(); + const hp::FECollection &fe_collection = dof.get_fe(); const unsigned int n_components = fe_collection.n_components(); Assert (matrix.n() == dof.n_boundary_dofs(boundary_functions), @@ -1482,8 +1386,8 @@ MatrixCreator::create_boundary_mass_matrix (const hp::MappingCollection (dof.begin_active(), dof.end(), n_threads); - typedef std_cxx1x::tuple&, - const hp::DoFHandler&, + typedef std_cxx1x::tuple&, + const hp::DoFHandler&, const hp::QCollection&> Commons; // mutex to synchronise access to @@ -1502,12 +1406,15 @@ MatrixCreator::create_boundary_mass_matrix (const hp::MappingCollection > range, Threads::ThreadMutex &mutex); create_boundary_mass_matrix_1_t p = &MatrixCreator::template create_boundary_mass_matrix_1; + +//TODO: Use WorkStream here for (unsigned int thread=0; thread > range, Threads::ThreadMutex &mutex) { - const hp::MappingCollection& mapping = std_cxx1x::get<0>(commons); - const hp::DoFHandler& dof = std_cxx1x::get<1>(commons); + const hp::MappingCollection& mapping = std_cxx1x::get<0>(commons); + const hp::DoFHandler& dof = std_cxx1x::get<1>(commons); const hp::QCollection& q = std_cxx1x::get<2>(commons); const hp::FECollection &fe_collection = dof.get_fe(); const unsigned int n_components = fe_collection.n_components(); @@ -1587,7 +1494,7 @@ create_boundary_mass_matrix_1 (std_cxx1x::tuple &fe_values = x_fe_values.get_present_fe_values (); + const FEFaceValues &fe_values = x_fe_values.get_present_fe_values (); const FiniteElement &fe = cell->get_fe(); const unsigned int dofs_per_cell = fe.dofs_per_cell; @@ -1621,15 +1528,14 @@ create_boundary_mass_matrix_1 (std_cxx1x::tuple -void MatrixCreator::create_boundary_mass_matrix (const hp::DoFHandler &dof, - const hp::QCollection &q, - SparseMatrix &matrix, - const typename FunctionMap::type &rhs, - Vector &rhs_vector, - std::vector &dof_to_boundary_mapping, - const Function * const a, - std::vector component_mapping) -{ - Assert (DEAL_II_COMPAT_MAPPING, ExcCompatibility("mapping")); - create_boundary_mass_matrix(hp::StaticMappingQ1::mapping_collection, dof, q, - matrix,rhs, rhs_vector, dof_to_boundary_mapping, a, component_mapping); -} - - - -template -void MatrixCreator::create_laplace_matrix (const Mapping &mapping, - const DoFHandler &dof, - const Quadrature &q, - SparseMatrix &matrix, - const Function * const coefficient) -{ - Assert (matrix.m() == dof.n_dofs(), - ExcDimensionMismatch (matrix.m(), dof.n_dofs())); - Assert (matrix.n() == dof.n_dofs(), - ExcDimensionMismatch (matrix.n(), dof.n_dofs())); - - const unsigned int n_threads = multithread_info.n_default_threads; - Threads::ThreadGroup<> threads; - - // define starting and end point - // for each thread - typedef typename DoFHandler::active_cell_iterator active_cell_iterator; - std::vector > thread_ranges - = Threads::split_range (dof.begin_active(), - dof.end(), n_threads); - - // mutex to synchronise access to - // the matrix - Threads::ThreadMutex mutex; - - // then assemble in parallel - typedef void (*create_laplace_matrix_1_t) (const Mapping &mapping, - const DoFHandler &dof, - const Quadrature &q, - SparseMatrix &matrix, - const Function * const coefficient, - const IteratorRange > range, - Threads::ThreadMutex &mutex); - create_laplace_matrix_1_t p = &MatrixCreator::template create_laplace_matrix_1; - for (unsigned int thread=0; thread -void MatrixCreator::create_laplace_matrix_1 (const Mapping &mapping, - const DoFHandler &dof, - const Quadrature &q, - SparseMatrix &matrix, - const Function * const coefficient, - const IteratorRange > range, - Threads::ThreadMutex &mutex) -{ - UpdateFlags update_flags = UpdateFlags(update_JxW_values | - update_gradients); - if (coefficient != 0) - update_flags = UpdateFlags (update_flags | update_quadrature_points); - - FEValues fe_values (mapping, dof.get_fe(), q, update_flags); - - const unsigned int dofs_per_cell = fe_values.dofs_per_cell, - n_q_points = fe_values.n_quadrature_points; - const FiniteElement &fe = fe_values.get_fe(); - const unsigned int n_components = fe.n_components(); - - Assert(coefficient == 0 || - coefficient->n_components==1 || - coefficient->n_components==n_components, ExcComponentMismatch()); - - FullMatrix cell_matrix (dofs_per_cell, dofs_per_cell); - std::vector coefficient_values (n_q_points); - std::vector > coefficient_vector_values (n_q_points, - Vector (n_components)); - - std::vector dof_indices (dofs_per_cell); - - typename DoFHandler::active_cell_iterator cell = range.first; - for (; cell!=range.second; ++cell) - { - fe_values.reinit (cell); - - cell_matrix = 0; - cell->get_dof_indices (dof_indices); - - if (coefficient != 0) - { - if (coefficient->n_components==1) - { - coefficient->value_list (fe_values.get_quadrature_points(), - coefficient_values); - for (unsigned int point=0; point& Dv = fe_values.shape_grad(i,point); - for (unsigned int j=0; j& Du = fe_values.shape_grad(j,point); - if ((n_components==1) || - (fe.system_to_component_index(i).first == - fe.system_to_component_index(j).first)) - cell_matrix(i,j) += (Du * Dv * weight * - coefficient_values[point]); - } - } - } - } - else - { - coefficient->vector_value_list (fe_values.get_quadrature_points(), - coefficient_vector_values); - for (unsigned int point=0; point& Dv = fe_values.shape_grad(i,point); - const unsigned int component_i= - fe.system_to_component_index(i).first; - for (unsigned int j=0; j& Du = fe_values.shape_grad(j,point); - if ((n_components==1) || - (fe.system_to_component_index(j).first == component_i)) - cell_matrix(i,j) += (Du * Dv * weight * - coefficient_vector_values[point](component_i)); - - } - } - } - } - } - else - for (unsigned int point=0; point& Dv = fe_values.shape_grad(i,point); - for (unsigned int j=0; j& Du = fe_values.shape_grad(j,point); - if ((n_components==1) || - (fe.system_to_component_index(i).first == - fe.system_to_component_index(j).first)) - cell_matrix(i,j) += (Du * Dv * weight); - } + // compare here for relative + // smallness + Assert (std::fabs(cell_vector(j)) <= 1e-10 * max_diag_entry, + ExcInternalError()); } - } - - // transfer everything into the - // global object. lock the - // matrix meanwhile - Threads::ThreadMutex::ScopedLock lock (mutex); - for (unsigned int i=0; i -void MatrixCreator::create_laplace_matrix (const DoFHandler &dof, - const Quadrature &q, - SparseMatrix &matrix, - const Function * const coefficient) +void MatrixCreator::create_boundary_mass_matrix (const hp::DoFHandler &dof, + const hp::QCollection &q, + SparseMatrix &matrix, + const typename FunctionMap::type &rhs, + Vector &rhs_vector, + std::vector &dof_to_boundary_mapping, + const Function * const a, + std::vector component_mapping) { Assert (DEAL_II_COMPAT_MAPPING, ExcCompatibility("mapping")); - create_laplace_matrix(StaticMappingQ1::mapping, dof, q, matrix, coefficient); + create_boundary_mass_matrix(hp::StaticMappingQ1::mapping_collection, dof, q, + matrix,rhs, rhs_vector, dof_to_boundary_mapping, a, component_mapping); } @@ -2085,8 +1809,6 @@ void MatrixCreator::create_laplace_matrix (const Mapping &m const DoFHandler &dof, const Quadrature &q, SparseMatrix &matrix, - const Function &rhs, - Vector &rhs_vector, const Function * const coefficient) { Assert (matrix.m() == dof.n_dofs(), @@ -2094,176 +1816,75 @@ void MatrixCreator::create_laplace_matrix (const Mapping &m Assert (matrix.n() == dof.n_dofs(), ExcDimensionMismatch (matrix.n(), dof.n_dofs())); - const unsigned int n_threads = multithread_info.n_default_threads; - Threads::ThreadGroup<> threads; + internal::MatrixCreator::AssemblerData::Scratch + assembler_data (dof.get_fe(), + update_gradients | update_JxW_values | + (coefficient != 0 ? update_quadrature_points : UpdateFlags(0)), + coefficient, /*rhs_function=*/0, + q, mapping); + internal::MatrixCreator::AssemblerData::CopyData copy_data; + copy_data.cell_matrix.reinit (assembler_data.fe_collection.max_dofs_per_cell(), + assembler_data.fe_collection.max_dofs_per_cell()); + copy_data.cell_rhs.reinit (assembler_data.fe_collection.max_dofs_per_cell()); + copy_data.dof_indices.resize (assembler_data.fe_collection.max_dofs_per_cell()); + + WorkStream::run (dof.begin_active(), + static_cast::active_cell_iterator>(dof.end()), + &internal::MatrixCreator::laplace_assembler::active_cell_iterator>, + std_cxx1x::bind (&internal::MatrixCreator:: + copy_local_to_global, Vector >, + _1, &matrix, (Vector*)0), + assembler_data, + copy_data); +} - // define starting and end point - // for each thread - typedef typename DoFHandler::active_cell_iterator active_cell_iterator; - std::vector > thread_ranges - = Threads::split_range (dof.begin_active(), - dof.end(), n_threads); - // mutex to synchronise access to - // the matrix - Threads::ThreadMutex mutex; - - // then assemble in parallel - typedef void (*create_laplace_matrix_2_t) (const Mapping &mapping, - const DoFHandler &dof, - const Quadrature &q, - SparseMatrix &matrix, - const Function &rhs, - Vector &rhs_vector, - const Function * const coefficient, - const IteratorRange > range, - Threads::ThreadMutex &mutex); - create_laplace_matrix_2_t p = &MatrixCreator::template create_laplace_matrix_2; - for (unsigned int thread=0; thread +void MatrixCreator::create_laplace_matrix (const DoFHandler &dof, + const Quadrature &q, + SparseMatrix &matrix, + const Function * const coefficient) +{ + Assert (DEAL_II_COMPAT_MAPPING, ExcCompatibility("mapping")); + create_laplace_matrix(StaticMappingQ1::mapping, dof, q, matrix, coefficient); } template -void -MatrixCreator::create_laplace_matrix_2 (const Mapping &mapping, - const DoFHandler &dof, - const Quadrature &q, - SparseMatrix &matrix, - const Function &rhs, - Vector &rhs_vector, - const Function * const coefficient, - const IteratorRange > range, - Threads::ThreadMutex &mutex) +void MatrixCreator::create_laplace_matrix (const Mapping &mapping, + const DoFHandler &dof, + const Quadrature &q, + SparseMatrix &matrix, + const Function &rhs, + Vector &rhs_vector, + const Function * const coefficient) { - UpdateFlags update_flags = UpdateFlags(update_values | - update_gradients | - update_quadrature_points | - update_JxW_values); - if (coefficient != 0) - update_flags = UpdateFlags (update_flags | update_quadrature_points); - - FEValues fe_values (mapping, dof.get_fe(), q, update_flags); - - const unsigned int dofs_per_cell = fe_values.dofs_per_cell, - n_q_points = fe_values.n_quadrature_points; - const FiniteElement &fe = fe_values.get_fe(); - const unsigned int n_components = fe.n_components(); - - Assert(coefficient == 0 || - coefficient->n_components==1 || - coefficient->n_components==n_components, ExcComponentMismatch()); + Assert (matrix.m() == dof.n_dofs(), + ExcDimensionMismatch (matrix.m(), dof.n_dofs())); + Assert (matrix.n() == dof.n_dofs(), + ExcDimensionMismatch (matrix.n(), dof.n_dofs())); - FullMatrix cell_matrix (dofs_per_cell, dofs_per_cell); - Vector local_rhs (dofs_per_cell); - std::vector rhs_values (fe_values.n_quadrature_points); - std::vector coefficient_values (n_q_points); - std::vector > coefficient_vector_values (n_q_points, - Vector (n_components)); - - std::vector dof_indices (dofs_per_cell); - - typename DoFHandler::active_cell_iterator cell = range.first; - for (; cell!=range.second; ++cell) - { - fe_values.reinit (cell); - - cell_matrix = 0; - local_rhs = 0; - cell->get_dof_indices (dof_indices); - - rhs.value_list (fe_values.get_quadrature_points(), rhs_values); - - if (coefficient != 0) - { - if (coefficient->n_components==1) - { - coefficient->value_list (fe_values.get_quadrature_points(), - coefficient_values); - for (unsigned int point=0; point& Dv = fe_values.shape_grad(i,point); - for (unsigned int j=0; j& Du = fe_values.shape_grad(j,point); - if ((n_components==1) || - (fe.system_to_component_index(i).first == - fe.system_to_component_index(j).first)) - cell_matrix(i,j) += (Du * Dv * weight * - coefficient_values[point]); - } - local_rhs(i) += v * rhs_values[point] * weight; - } - } - } - else - { - coefficient->vector_value_list (fe_values.get_quadrature_points(), - coefficient_vector_values); - for (unsigned int point=0; point& Dv = fe_values.shape_grad(i,point); - const unsigned int component_i= - fe.system_to_component_index(i).first; - for (unsigned int j=0; j& Du = fe_values.shape_grad(j,point); - if ((n_components==1) || - (fe.system_to_component_index(j).first == component_i)) - cell_matrix(i,j) += (Du * Dv * weight * - coefficient_vector_values[point](component_i)); - } - local_rhs(i) += v * rhs_values[point] * weight; - } - } - } - } - else - for (unsigned int point=0; point& Dv = fe_values.shape_grad(i,point); - for (unsigned int j=0; j& Du = fe_values.shape_grad(j,point); - if ((n_components==1) || - (fe.system_to_component_index(i).first == - fe.system_to_component_index(j).first)) - cell_matrix(i,j) += (Du * Dv * weight); - } - local_rhs(i) += v * rhs_values[point] * weight; - } - } - - // transfer everything into the - // global object. lock the - // matrix meanwhile - Threads::ThreadMutex::ScopedLock lock (mutex); - for (unsigned int i=0; i + assembler_data (dof.get_fe(), + update_gradients | update_values | + update_JxW_values | update_quadrature_points, + coefficient, &rhs, + q, mapping); + internal::MatrixCreator::AssemblerData::CopyData copy_data; + copy_data.cell_matrix.reinit (assembler_data.fe_collection.max_dofs_per_cell(), + assembler_data.fe_collection.max_dofs_per_cell()); + copy_data.cell_rhs.reinit (assembler_data.fe_collection.max_dofs_per_cell()); + copy_data.dof_indices.resize (assembler_data.fe_collection.max_dofs_per_cell()); + WorkStream::run (dof.begin_active(), + static_cast::active_cell_iterator>(dof.end()), + &internal::MatrixCreator::laplace_assembler::active_cell_iterator>, + std_cxx1x::bind (&internal::MatrixCreator:: + copy_local_to_global, Vector >, + _1, &matrix, &rhs_vector), + assembler_data, + copy_data); } @@ -2277,7 +1898,7 @@ void MatrixCreator::create_laplace_matrix (const DoFHandler &do const Function * const coefficient) { Assert (DEAL_II_COMPAT_MAPPING, ExcCompatibility("mapping")); - create_laplace_matrix(StaticMappingQ1::mapping, dof, q, + create_laplace_matrix(StaticMappingQ1::mapping, dof, q, matrix, rhs, rhs_vector, coefficient); } @@ -2295,164 +1916,26 @@ void MatrixCreator::create_laplace_matrix (const hp::MappingCollection threads; - - // define starting and end point - // for each thread - typedef typename hp::DoFHandler::active_cell_iterator active_cell_iterator; - std::vector > thread_ranges - = Threads::split_range (dof.begin_active(), - dof.end(), n_threads); - - // mutex to synchronise access to - // the matrix - Threads::ThreadMutex mutex; - - // then assemble in parallel - typedef void (*create_laplace_matrix_1_t) (const hp::MappingCollection &mapping, - const hp::DoFHandler &dof, - const hp::QCollection &q, - SparseMatrix &matrix, - const Function * const coefficient, - const IteratorRange > range, - Threads::ThreadMutex &mutex); - create_laplace_matrix_1_t p = &MatrixCreator::template create_laplace_matrix_1; - for (unsigned int thread=0; thread -void -MatrixCreator::create_laplace_matrix_1 (const hp::MappingCollection &mapping, - const hp::DoFHandler &dof, - const hp::QCollection &q, - SparseMatrix &matrix, - const Function * const coefficient, - const IteratorRange > range, - Threads::ThreadMutex &mutex) -{ - UpdateFlags update_flags = UpdateFlags(update_gradients | - update_JxW_values); - if (coefficient != 0) - update_flags = UpdateFlags (update_flags | update_quadrature_points); - - hp::FEValues x_fe_values (mapping, dof.get_fe(), q, update_flags); - - const unsigned int n_components = dof.get_fe().n_components(); - - Assert(coefficient == 0 || - coefficient->n_components==1 || - coefficient->n_components==n_components, ExcComponentMismatch()); - - FullMatrix cell_matrix; - std::vector coefficient_values; - std::vector > coefficient_vector_values; - - std::vector dof_indices; - - typename hp::DoFHandler::active_cell_iterator cell = range.first; - for (; cell!=range.second; ++cell) - { - x_fe_values.reinit (cell); - const FEValues &fe_values = x_fe_values.get_present_fe_values (); - - const unsigned int dofs_per_cell = fe_values.dofs_per_cell, - n_q_points = fe_values.n_quadrature_points; - const FiniteElement &fe = fe_values.get_fe(); - - cell_matrix.reinit (dofs_per_cell, dofs_per_cell); - coefficient_values.resize (n_q_points); - coefficient_vector_values.resize (n_q_points, - Vector (n_components)); - dof_indices.resize (dofs_per_cell); - - - cell_matrix = 0; - cell->get_dof_indices (dof_indices); - - if (coefficient != 0) - { - if (coefficient->n_components==1) - { - coefficient->value_list (fe_values.get_quadrature_points(), - coefficient_values); - for (unsigned int point=0; point& Dv = fe_values.shape_grad(i,point); - for (unsigned int j=0; j& Du = fe_values.shape_grad(j,point); - if ((n_components==1) || - (fe.system_to_component_index(i).first == - fe.system_to_component_index(j).first)) - cell_matrix(i,j) += (Du * Dv * weight * - coefficient_values[point]); - } - } - } - } - else - { - coefficient->vector_value_list (fe_values.get_quadrature_points(), - coefficient_vector_values); - for (unsigned int point=0; point& Dv = fe_values.shape_grad(i,point); - const unsigned int component_i= - fe.system_to_component_index(i).first; - for (unsigned int j=0; j& Du = fe_values.shape_grad(j,point); - if ((n_components==1) || - (fe.system_to_component_index(j).first == component_i)) - cell_matrix(i,j) += (Du * Dv * weight * - coefficient_vector_values[point](component_i)); - } - } - } - } - } - else - for (unsigned int point=0; point& Dv = fe_values.shape_grad(i,point); - for (unsigned int j=0; j& Du = fe_values.shape_grad(j,point); - if ((n_components==1) || - (fe.system_to_component_index(i).first == - fe.system_to_component_index(j).first)) - cell_matrix(i,j) += (Du * Dv * weight); - } - } - } - - // transfer everything into the - // global object. lock the - // matrix meanwhile - Threads::ThreadMutex::ScopedLock lock (mutex); - for (unsigned int i=0; i + assembler_data (dof.get_fe(), + update_gradients | update_JxW_values | + (coefficient != 0 ? update_quadrature_points : UpdateFlags(0)), + coefficient, /*rhs_function=*/0, + q, mapping); + internal::MatrixCreator::AssemblerData::CopyData copy_data; + copy_data.cell_matrix.reinit (assembler_data.fe_collection.max_dofs_per_cell(), + assembler_data.fe_collection.max_dofs_per_cell()); + copy_data.cell_rhs.reinit (assembler_data.fe_collection.max_dofs_per_cell()); + copy_data.dof_indices.resize (assembler_data.fe_collection.max_dofs_per_cell()); + + WorkStream::run (dof.begin_active(), + static_cast::active_cell_iterator>(dof.end()), + &internal::MatrixCreator::laplace_assembler::active_cell_iterator>, + std_cxx1x::bind (&internal::MatrixCreator:: + copy_local_to_global, Vector >, + _1, &matrix, (Vector*)0), + assembler_data, + copy_data); } @@ -2464,7 +1947,7 @@ void MatrixCreator::create_laplace_matrix (const hp::DoFHandler const Function * const coefficient) { Assert (DEAL_II_COMPAT_MAPPING, ExcCompatibility("mapping")); - create_laplace_matrix(hp::StaticMappingQ1::mapping_collection, dof, q, matrix, coefficient); + create_laplace_matrix(hp::StaticMappingQ1::mapping_collection, dof, q, matrix, coefficient); } @@ -2483,186 +1966,26 @@ void MatrixCreator::create_laplace_matrix (const hp::MappingCollection threads; - - // define starting and end point - // for each thread - typedef typename hp::DoFHandler::active_cell_iterator active_cell_iterator; - std::vector > thread_ranges - = Threads::split_range (dof.begin_active(), - dof.end(), n_threads); - - // mutex to synchronise access to - // the matrix - Threads::ThreadMutex mutex; - - // then assemble in parallel - typedef void (*create_laplace_matrix_2_t) (const hp::MappingCollection &mapping, - const hp::DoFHandler &dof, - const hp::QCollection &q, - SparseMatrix &matrix, - const Function &rhs, - Vector &rhs_vector, - const Function * const coefficient, - const IteratorRange > range, - Threads::ThreadMutex &mutex); - create_laplace_matrix_2_t p = &MatrixCreator::template create_laplace_matrix_2; - for (unsigned int thread=0; thread -void -MatrixCreator::create_laplace_matrix_2 (const hp::MappingCollection &mapping, - const hp::DoFHandler &dof, - const hp::QCollection &q, - SparseMatrix &matrix, - const Function &rhs, - Vector &rhs_vector, - const Function * const coefficient, - const IteratorRange > range, - Threads::ThreadMutex &mutex) -{ - UpdateFlags update_flags = UpdateFlags(update_values | - update_gradients | - update_quadrature_points | - update_JxW_values); - if (coefficient != 0) - update_flags = UpdateFlags (update_flags | update_quadrature_points); - - hp::FEValues x_fe_values (mapping, dof.get_fe(), q, update_flags); - - const unsigned int n_components = dof.get_fe().n_components(); - - Assert(coefficient == 0 || - coefficient->n_components==1 || - coefficient->n_components==n_components, ExcComponentMismatch()); - - FullMatrix cell_matrix; - Vector local_rhs; - std::vector rhs_values; - std::vector coefficient_values; - std::vector > coefficient_vector_values; - - std::vector dof_indices; + internal::MatrixCreator::AssemblerData::Scratch + assembler_data (dof.get_fe(), + update_gradients | update_values | + update_JxW_values | update_quadrature_points, + coefficient, &rhs, + q, mapping); + internal::MatrixCreator::AssemblerData::CopyData copy_data; + copy_data.cell_matrix.reinit (assembler_data.fe_collection.max_dofs_per_cell(), + assembler_data.fe_collection.max_dofs_per_cell()); + copy_data.cell_rhs.reinit (assembler_data.fe_collection.max_dofs_per_cell()); + copy_data.dof_indices.resize (assembler_data.fe_collection.max_dofs_per_cell()); - typename hp::DoFHandler::active_cell_iterator cell = range.first; - for (; cell!=range.second; ++cell) - { - x_fe_values.reinit (cell); - const FEValues &fe_values = x_fe_values.get_present_fe_values (); - - const unsigned int dofs_per_cell = fe_values.dofs_per_cell, - n_q_points = fe_values.n_quadrature_points; - const FiniteElement &fe = fe_values.get_fe(); - - cell_matrix.reinit (dofs_per_cell, dofs_per_cell); - local_rhs.reinit (dofs_per_cell); - rhs_values.resize (fe_values.n_quadrature_points); - coefficient_values.resize (n_q_points); - coefficient_vector_values.resize (n_q_points, - Vector (n_components)); - dof_indices.resize (dofs_per_cell); - - - cell_matrix = 0; - local_rhs = 0; - cell->get_dof_indices (dof_indices); - - rhs.value_list (fe_values.get_quadrature_points(), rhs_values); - - if (coefficient != 0) - { - if (coefficient->n_components==1) - { - coefficient->value_list (fe_values.get_quadrature_points(), - coefficient_values); - for (unsigned int point=0; point& Dv = fe_values.shape_grad(i,point); - for (unsigned int j=0; j& Du = fe_values.shape_grad(j,point); - if ((n_components==1) || - (fe.system_to_component_index(i).first == - fe.system_to_component_index(j).first)) - cell_matrix(i,j) += (Du * Dv * weight * - coefficient_values[point]); - } - local_rhs(i) += v * rhs_values[point] * weight; - } - } - } - else - { - coefficient->vector_value_list (fe_values.get_quadrature_points(), - coefficient_vector_values); - for (unsigned int point=0; point& Dv = fe_values.shape_grad(i,point); - const unsigned int component_i= - fe.system_to_component_index(i).first; - for (unsigned int j=0; j& Du = fe_values.shape_grad(j,point); - if ((n_components==1) || - (fe.system_to_component_index(j).first == component_i)) - cell_matrix(i,j) += (Du * Dv * weight * - coefficient_vector_values[point](component_i)); - } - local_rhs(i) += v * rhs_values[point] * weight; - } - } - } - } - else - for (unsigned int point=0; point& Dv = fe_values.shape_grad(i,point); - for (unsigned int j=0; j& Du = fe_values.shape_grad(j,point); - if ((n_components==1) || - (fe.system_to_component_index(i).first == - fe.system_to_component_index(j).first)) - cell_matrix(i,j) += (Du * Dv * weight); - } - local_rhs(i) += v * rhs_values[point] * weight; - } - } - - // transfer everything into the - // global object. lock the - // matrix meanwhile - Threads::ThreadMutex::ScopedLock lock (mutex); - for (unsigned int i=0; i::active_cell_iterator>(dof.end()), + &internal::MatrixCreator::laplace_assembler::active_cell_iterator>, + std_cxx1x::bind (&internal::MatrixCreator:: + copy_local_to_global, Vector >, + _1, &matrix, &rhs_vector), + assembler_data, + copy_data); } @@ -2676,7 +1999,7 @@ void MatrixCreator::create_laplace_matrix (const hp::DoFHandler const Function * const coefficient) { Assert (DEAL_II_COMPAT_MAPPING, ExcCompatibility("mapping")); - create_laplace_matrix(hp::StaticMappingQ1::mapping_collection, dof, q, + create_laplace_matrix(hp::StaticMappingQ1::mapping_collection, dof, q, matrix, rhs, rhs_vector, coefficient); } diff --git a/deal.II/deal.II/source/numerics/time_dependent.cc b/deal.II/deal.II/source/numerics/time_dependent.cc index 64404ff0cc..534c589adc 100644 --- a/deal.II/deal.II/source/numerics/time_dependent.cc +++ b/deal.II/deal.II/source/numerics/time_dependent.cc @@ -212,10 +212,10 @@ void TimeDependent::end_sweep (const unsigned int n_threads) void (TimeDependent::*p) (const unsigned int, const unsigned int) = &TimeDependent::end_sweep; for (unsigned int i=0; iF77LIBS=$(F77LIBS)' >> $@ @echo '
  • ACE_ROOT=$(ACE_ROOT)' >> $@ @echo '
  • lib-ACE=$(lib-ACE)' >> $@ - @echo '
  • with-multithreading=$(with-multithreading)' >> $@ + @echo '
  • enable-threads=$(enable-threads)' >> $@ @echo '
  • TECIO_INCLUDE=$(TECIO_INCLUDE)' >> $@ @echo '
  • TECIO_LIBRARY=$(TECIO_LIBRARY)' >> $@ @cat makefiles.2.html >> $@ diff --git a/deal.II/doc/development/Makefile.large b/deal.II/doc/development/Makefile.large index 316d049f4b..1c813c474e 100644 --- a/deal.II/doc/development/Makefile.large +++ b/deal.II/doc/development/Makefile.large @@ -64,17 +64,6 @@ libs.o = $(lib-deal2-$(deal_II_dimension)d.o) \ -# Define a nifty string to indicate in the output of the compile -# commands whether the program is compiled in multithread mode or not: -ifneq ($(with-multithreading),no) - MT = MT -else - MT = == -endif - - - - # Now use the information from above to define the set of libraries to # link with and the flags to be passed to the compiler: ifeq ($(debug-mode),on) diff --git a/deal.II/doc/development/makefiles.1.html b/deal.II/doc/development/makefiles.1.html index 68668cab36..ece06b10e5 100644 --- a/deal.II/doc/development/makefiles.1.html +++ b/deal.II/doc/development/makefiles.1.html @@ -511,7 +511,7 @@
  • Variables in multithreading mode

    -
    with-multithreading
    +
    enable-threads

    This symbol is 'no' if the flag was not given to ./configure and 'posix' otherwise diff --git a/deal.II/doc/doxygen/headers/multithreading.h b/deal.II/doc/doxygen/headers/multithreading.h index 5e2644c4e4..5049c14f5b 100644 --- a/deal.II/doc/doxygen/headers/multithreading.h +++ b/deal.II/doc/doxygen/headers/multithreading.h @@ -2,7 +2,7 @@ // $Id$ // Version: $Name$ // -// Copyright (C) 2006 by the deal.II authors +// Copyright (C) 2006, 2008, 2009 by the deal.II authors // // This file is subject to QPL and may not be distributed // without copyright and license information. Please refer @@ -12,35 +12,1083 @@ //------------------------------------------------------------------------- /** - * @defgroup threads Multithreading + * @defgroup threads Parallel computing with multiple processors + * + * @brief A module discussing the use of parallelism on shared memory + * machines. See the detailed documentation and + * @ref MTToC "Table of Contents" below the lenghty list of members + * of this module. * * On machines with more than one processor (or multicore processors), * it is often profitable to run several parts of the computations in - * parallel. For example, one could have several threads running in - * parallel, each of which assembles the cell matrices of a subset of + * %parallel. For example, one could have several threads running in + * %parallel, each of which assembles the cell matrices of a subset of * the triangulation and then writes them into the global * matrix. Since assembling matrices is often an expensive operation, * this frequently leads to significant savings in compute time on * multiprocessor machines. * - * In a similar way, it is often also profitable to use multiple - * threads on a single-CPU system if a significant amount of input or - * output tasks has to be performed. In such cases, the program - * usually has to wait for disks or network storages to provide the - * requested data, or to flush buffers. If this is done on a separate - * thread, other threads of the program can continue to do other, more - * interesting things at the same time, using the CPU downtime. - * - * Support for this model of computations, i.e. using multiple threads - * on a shared-memory machine (SMP machine) is provided mainly through - * the Threads namespace that offers functions to create new threads - * as well as synchronisation primitives. The MultithreadInfo class - * allows to query certain properties of the system, such as the - * number of CPUs. The use of these classes is explained in the - * step-9, step-13 and step-14 tutorial programs. + * deal.II supports operations running in %parallel on on shared-memory (SMP) + * machines through the functions and classes in the Threads namespace. The + * MultithreadInfo class allows to query certain properties of the system, + * such as the number of CPUs. These facilities for %parallel computing are + * described in the following. The step-9, step-13 and step-14 tutorial + * programs also show their use in practice. * * On the other hand, programs running on distributed memory machines - * (i.e. clusters) need a different programming model built on top of - * MPI and PETSc that is described in the step-17 and later example - * programs. + * (i.e. clusters) need a different programming model built on top of MPI and + * PETSc or Trilinos. This is described in the step-17, step-18 and step-32 + * example programs. + * + * @anchor MTToC + * + * + *
    %Table of contents
    + *
      + *
    1. @ref MTTasks "Task-based parallelism" + *
    2. @ref MTUsing "Using tasks from within deal.II" + *
    3. @ref MTHow "How scheduling tasks works and when task-based programming is not efficient" + *
    4. @ref MTSimpleLoops "Abstractions for tasks: Simple loops" + *
    5. @ref MTComplexLoops "Abstractions for tasks: More complex loops" + *
    6. @ref MTWorkStream "Abstractions for tasks: Work streams" + *
    7. @ref MTThreads "Thread-based parallelism" + *
    + * + * + * @anchor MTTasks + *

    Task-based parallelism

    + * + * The traditional view of parallelism on shared memory machines has been to + * decompose a program into threads, i.e. running different parts of + * the program in %parallel at the same time (if there are more threads + * than processor cores on your machine, the operating system will run each + * thread round-robin for a brief amount of time before switching execution to + * another thread, thereby simulating that threads run + * concurrently). deal.II's facilities for threads are described below (see + * @ref MTThreads "Thread-based parallelism"), but we would first like to + * discuss an abstraction that is often more suitable than threads: + * tasks. + * + * Tasks are essentially the individual parts of a program. Some of them are + * independent, whereas others depend on previous tasks to be completed + * first. By way of example, consider the typical layout of a part of the + * setup_dofs function that most of the tutorial programs have: + * @code +1 dof_handler.distribute_dofs (fe); +2 DoFTools::make_hanging_node_constraints (dof_handler, hanging_node_constraints); +3 DoFTools::make_sparsity_pattern (dof_handler, sparsity_pattern); +4 hanging_node_constraints.condense (sparsity_pattern); + * @endcode + * + * Here, each of the operations require a significant amount of + * computations. But note that not all of them depend on each other: clearly + * we can not run statements 2-4 before 1, and 4 needs to wait for the + * completion of statements 2 and 3. But statements 2 and 3 are independent: + * they could be run in any order, or in %parallel. In essence, we have + * identified four tasks, some of which are dependent on each other, + * whereas others are independent. In the current example, tasks are + * identified with individual C++ statements, but oftentimes they more + * generally coincide with entire code blocks. + * + * The point here is this: To exploit the independence of tasks 2 and 3, we + * could start two threads and run each task on its own thread; we would then + * wait for the two threads to finish (an operation called "joining a thread") + * and go on with statement 4. As discussed in more detail below, code to + * achieve this would look like this: + * @code + dof_handler.distribute_dofs (fe); + + Threads::Thread + thread_1 = Threads::new_thread (&DoFTools::make_hanging_node_constraints, + dof_handler, hanging_node_constraints); + Threads::Thread + thread_2 = Threads::new_thread (&DoFTools::make_sparsity_pattern, + dof_handler, sparsity_pattern); + thread_1.join(); + thread_2.join(); + hanging_node_constraints.condense (sparsity_pattern); + * @endcode + * + * But what if + * your computer has only one processor core, or if we have two but there is + * already a different part of the program running in %parallel to the code + * above? In that case, we could of course still start new threads, but the + * program is not going to run faster since no additional compute resources + * are available; rather, the program will run slower since threads have to be + * created and destroyed, and the operating system has to schedule threads to + * oversubscribed compute resources. + * + * A better scheme would identify independent tasks and then hand them off to + * a scheduler that maps tasks to available compute resources. This way, the + * program could, for example, start one thread per processor core and then + * let threads work on tasks. Tasks would run to completion, rather than + * concurrently, avoiding the overhead of interrupting threads to run a + * different thread. In this model, if two processor cores would be available, + * tasks 2 and 3 above would run in %parallel; if only one is available, the + * scheduler would first completely execute task 2 before doing task 3, or the + * other way around. This model is able to execute much more efficiently in + * particular if a large number of tasks is available for execution, see for + * example the discussion below in section + * @ref MTWorkStream "Abstractions for tasks: Work streams". In + * essence, tasks are a high-level description of what needs to be done, + * whereas threads are a low-level way of implementing how these tasks can be + * completed. As in many other instances, being able to use a high-level + * description allows to find efficient low-level implementations; in this + * vein, it often pays off to use tasks, rather than threads, in a program. + * + * deal.II does not implement scheduling tasks to threads itself. For this, we + * use the
    Threading Building + * Blocks (TBB) library for which we provide simple wrappers. TBB + * abstracts the details of how to start or stop threads, start tasks on + * individual threads, etc, and provides interfaces that are portable across + * many different systems. + * + * + * + * @anchor MTUsing + *

    Using tasks from within deal.II

    + * + * Ideally, the syntax to start tasks (and similarly for threads, for that + * matter), would be something like this for the example above: + * @code + Threads::Task + thread + = new_task DoFTools::make_hanging_node_constraints (dof_handler, + hanging_node_constraints); + * @endcode + * In other words, we would like to indicate the fact that the function call + * should be run on a separate task by simply prefixing the call with a + * keyword (such as new_task here, with a similar keyword + * new_thread for threads). Prefixing a call would return a + * handle for the task that we can use to wait for the tasks's completion and + * that we may use to query the return value of the function called (unless it + * is void, as it is here). + * + * Since C++ does not support the creation of new keywords, we have to be a + * bit more creative. The way chosen is to introduce a function + * new_task that takes as arguments the function to call as well + * as the arguments to the call. The new_task function is + * overloaded to accomodate starting tasks with functions that take no, one, + * two, and up to 9 arguments. In deal.II, these functions live in the Threads + * namespace. Consequently, the actual code for what we try to do above looks + * like this: + * @code + Threads::Task + thread + = Threads::new_task (&DoFTools::make_hanging_node_constraints, + dof_handler, + hanging_node_constraints); + * @endcode + * Note that DoFTools::make_hanging_node_constraints is a static member + * function and so does not need an object of type DoFTools to work on. + * (In fact, DoFTools has only static member functions and could as well be + * a namespace instead of class; that it is a class at the time of writing + * this is mostly a historic relic.) + * + * Similarly, if we want to call a member function on a different task, we can + * do so by specifying the object on which to call the function as first + * argument after the function pointer: + * @code + class C { + public: + double f(int); + }; + + int main () { + C c; + + // call f(13) as usual, i.e. using the current processor: + c.f(13); + + // call f(42) as a separate task, to be scheduled + // whenever processor resources become available: + Threads::Task + task = Threads::new_task (&C::f, c, 42); + + // do something else in between: + ...; + + // having finished our other business, wait until the task + // above has terminated and get the value returns by c.f(42): + double result = task.return_value(); + * @endcode + * Here, note first how we pass the object c (i.e. the + * this pointer the function C::f will see) as if it + * was the first argument to the function. Secondly, note how we can acquire + * the value returned by the function on the separate task by calling + * Threads::Task::return_value(). This function implies waiting for the + * completion of the task, i.e. the last line is completely equivalent to + * @code + task.join (); + double result = task.return_value(); + * @endcode + * + * Note also that it is entirely valid if C::f wants to start + * tasks of its own: + * @code + class C { + public: + double f(int); + private: + double f1(int); + double f2(int); + }; + + double C::f (int i) { + Threads::Task t1 = Threads::new_task (&C::f1, *this, i); + Threads::Task t2 = Threads::new_task (&C::f2, *this, i); + return t1.return_value() + t2.return_value(); + } + + int main () { + C c; + + Threads::Task + task = Threads::new_task (&C::f, c, 42); + + // do something else in between: + ...; + + double result = task.return_value(); + * @endcode + * Here, we let C::f compute its return value as + * c.f1(i)+c.f2(i). If sufficient CPU resources are available, + * then the two parts of the addition as well as the other things in + * main() will all run in %parallel. If not, then we will + * eventually block at one of the places where the return value is needed, + * thereby freeing up the CPU resources necessary to run all those spawned + * tasks to completion. + * + * + * In many cases, such as the introductory example of the + * setup_dofs function outlined above, one can identify several + * independent jobs that can be run as tasks, but will have to wait for all of + * them to finish at one point. One can do so by storing the returned object + * from all the Threads::new_task() calls, and calling Threads::Task::join() + * on each one of them. A simpler way to do this is to put all of these task + * objects into a Threads::TaskGroup object and waiting for all of them at + * once. The code would then look like this: + * @code + dof_handler.distribute_dofs (fe); + + Threads::TaskGroup task_group; + task_group += Threads::new_task (&DoFTools::make_hanging_node_constraints, + dof_handler, hanging_node_constraints); + task_group += Threads::new_task (&DoFTools::make_sparsity_pattern, + dof_handler, sparsity_pattern); + task_group.join_all (); + hanging_node_constraints.condense (sparsity_pattern); + * @endcode + * + * + * @anchor MTHow + *

    How scheduling tasks works and when task-based programming is not efficient

    + * + * The exact details of how tasks are scheduled to run are %internal to the + * Threading Building Blocks (TBB) library that deal.II uses for tasks. The + * documentation of TBB gives a detailed description of how tasks are + * scheduled to threads but is rather quiet on how many threads are actually + * used. However, a reasonable guess is probably to assume that TBB creates as + * many threads as there are processor cores on your system. This way, it is + * able to fully utilize the entire system, without having too many threads + * that the operating system will then have to interrupt regularly so that + * other threads can run on the available processor cores. + * + * The point then is that the TBB scheduler takes tasks and lets threads + * execute them. %Threads execute tasks completely, i.e. the TBB scheduler does + * not interrupt a task half way through to make some halfway progress with + * another task. This makes sure that caches are always hot, for example, and + * avoids the overhead of preemptive interrupts. + * + * The downside is that the CPU cores are only fully utilized if the threads + * are actually doing something, and that means that (i) there must be enough + * tasks available, and (ii) these tasks are actually doing something. Note + * that both conditions must be met; in particular, this means that CPU cores + * are underutilized if we have identified a sufficient number of tasks but if + * some of them twiddle thumbs, for example because a task is writing data to + * disk (a process where the CPU frequently has to wait for the disk to + * complete a transaction) or is waiting for input. Other cases are where + * tasks block on other external events, for example synchronising with other + * tasks or threads through a mutex. In such cases, the scheduler would let a + * task run on a thread, but doesn't notice that that thread doesn't fully + * utilize the CPU core. + * + * In cases like these, it does make sense to create a new thread (see + * @ref MTThreads "Thread-based parallelism" below) that the operating system + * can put on hold while they are waiting for something external, and let a + * different thread (for example one running a task scheduled by TBB) use the + * CPU at the same time. + * + * + * @anchor MTSimpleLoops + *

    Abstractions for tasks: Simple loops

    + * + * Some loops execute bodies on data that is completely independent + * and that can therefore be executed in %parallel. Rather than a + * priori split the loop into a fixed number of chunks and executing + * them on tasks or threads, the TBB library uses the following + * concept: the range over which the loop iterates is split into a + * certain number of sub-ranges (for example two or three times as + * many as there are CPU cores) and are equally distributed among + * threads; threads then execute sub-ranges and, if they are done with + * their work, steal entire or parts of sub-ranges from other threads + * to keep busy. This way, work is load-balanced even if not every + * loop iteration takes equally much work, or if some of the CPUs fall + * behind because the operating system interrupted it for some other + * work. + * + * The TBB library primitives for this are a bit clumsy so deal.II has + * wrapper routines for the most frequently use operations. The + * simplest one is akin to the what the std::transform does: it takes + * one or more ranges of input operators, one output iterator, and a + * function object. A typical implementation of std::transform would + * look like this: + * @code + template + void transform (const InputIterator1 &begin_in_1, + const InputIterator1 &end_in_1, + const InputIterator2 &begin_in_2, + const OutputIterator &begin_out, + FunctionObject &function) + { + InputIterator1 in_1 = begin_in_1; + InputIterator2 in_2 = begin_in_2; + OutputIterator out = begin_out; + + for (; in_1 != end_in_1; ++in_1, ++in_2, ++out) + *out = function(*in_1, *in_2); + } + * @endcode + * + * In many cases, function has no state, and so we can + * split this loop into several sub-ranges as explained + * above. Consequently, deal.II has a set of functions + * parallel::transform that look like the one above but that do their + * work in %parallel (there are several versions with one, two, and + * more input iterators for function objects that take one, two, or + * more arguments). The only difference in calling these functions is + * that they take an additional last argument that denotes the minimum + * size of sub-ranges of [begin_in_1,end_in_1); it should + * be big enough so that we don't spend more time on scheduling + * sub-ranges to processors but small enough that processors can be + * efficiently load balanced. A rule of thumb appears to be that a + * sub-range is too small if it takes less than 2000 instructions to + * execute it. + * + * An example of how to use these functions are vector operations like + * the addition in $z = x+y$ where all three objects are of type Vector: + * @code + parallel::transform (x.begin(), x.end(), + y.begin(), + z.begin(), + (boost::lambda::_1 + boost::lambda::_2), + 1000); + * @endcode + * + * In this example, we used the Boost + * Lambda library to construct, on the fly, a function object that + * takes two arguments and returns the sum of the two. This is exactly + * what we needed when we want to add the individual elements of + * vectors $x$ and $y$ and write the sum of the two into the elements + * of $z$. Because of the way Boost Lambda is written, the function + * object that we get here is completely known to the compiler and + * when it expands the loop that results from parallel::transform will + * be as if we had written the loop in its obvious form: + * @code + InputIterator1 in_1 = x.begin(); + InputIterator2 in_2 = y.begin(); + OutputIterator out = z.begin(); + + for (; in_1 != x.end(); ++in_1, ++in_2, ++out) + *out = *in_1 + *in_2; + * @endcode + * The next C++ standard will contain a more elegant way to achieve the + * same effect shown above using the Boost Lambda library, through a + * mechanism known as lambda expressions and closures. + * + * Note also that we have made sure that no CPU ever gets a chunk of + * the whole loop that is smaller than 1000 iterations (unless the + * whole range is smaller). + * + * + * @anchor MTComplexLoops + *

    Abstractions for tasks: More complex loops

    + * + * The scheme shown in the previous section is effective if the + * operation done in each iteration is such that it does not require + * significant setup costs and can be inlined by the compiler. Lambda + * expressions are exactly of this kind because the compiler knows + * everything about the lambda expression and can inline it, thereby + * eliminating the overhead of calling an external function. However, + * there are cases where it is inefficient to call some object or + * function within each iteration. + * + * An example for this case is sparse matrix-vector multiplication. If you + * know how data is stored in compressed row format like in the SparseMatrix + * class, then a matrix-vector product function looks like this: + * @code + void SparseMatrix::vmult (const Vector &src, + Vector &dst) const + { + const double *val_ptr = &values[0]; + const unsigned int *colnum_ptr = &colnums[0]; + Vector::iterator dst_ptr = dst.begin(); + + for (unsigned int row=0; rowsrc and write it into + * the corresponding element of the dst vector. The code is made + * more efficient by utilizing that the elements of the next row follow + * the ones of the current row immediately, i.e. at the beginning of + * the loop body we do not have to re-set the pointers that point to the + * values and column %numbers of each row. + * + * Using the parallel::transform function above, we could in principle write + * this code as follows: + * @code + void SparseMatrix::vmult (const Vector &src, + Vector &dst, + Vector::iterator &dst_row) const + { + const unsigned int row = (dst_row - dst.begin()); + + const double *val_ptr = &values[rowstart[row]]; + const unsigned int *colnum_ptr = &colnums[rowstart[row]]; + + double s = 0.; + const double *const val_end_of_row = &values[rowstart[row+1]]; + while (val_ptr != val_end_of_row) + s += *val_ptr++ * src(*colnum_ptr++); + *dst_row = s; + } + + void SparseMatrix::vmult (const Vector &src, + Vector &dst) const + { + parallel::transform (dst.begin(), dst.end(), + boost::bind (&SparseMatrix::vmult_one_row, + this, + boost::cref(src), + boost::ref(dst), + _1), + 200); + } + * @endcode + * Note how we use boost::bind + * to bind certain arguments to the vmult_one_row + * function, leaving one argument open and thus allowing the + * parallel::transform function to consider the passed function argument as + * unary. Also note that we need to make the source and destination vectors as + * (const) references to prevent boost::bind from passing them by value + * (implying a copy for src and writing the result into a + * temporary copy of dst, neither of which is what we desired). + * Finally, notice the grainsize of a minimum of 200 rows of a matrix that + * should be processed by an individual CPU core. + * + * The point is that while this is correct, it is not efficient: we have to + * set up the row, val_ptr, colnum_ptr variables in each + * iteration of the loop. Furthermore, since now the function object to be + * called on each row is not a simple Boost Lambda expression any more, there + * is an implied function call including argument passing in each iteration of + * the loop. + * + * A more efficient way is to let TBB split the original range into + * sub-ranges, and then call a target function not on each individual element + * of the loop, but on the entire range. This is facilitated by the + * parallel::apply_to_subranges function: + * @code + void + SparseMatrix::vmult_on_subrange (const unsigned int begin_row, + const unsigned int end_row, + const Vector &src, + Vector &dst) + { + const double *val_ptr = &values[rowstart[begin_row]]; + const unsigned int *colnum_ptr = &colnums[rowstart[begin_row]]; + Vector::iterator dst_ptr = dst.begin() + begin_row; + + for (unsigned int row=begin_row; rowvmult_on_subrange function on sub-ranges + * of at least 200 elements each, so that the initial setup cost can amortize. + * + * A related operation is when the loops over elements each produce a + * result that must then be accumulated (other reduction operations + * than addition of numbers would work as well). An example is to form + * the matrix norm $x^T M x$ (it really is only a norm if $M$ is + * positive definite, but let's assume for a moment that it is). A + * sequential implementation would look like this for sparse matrices: + * @code + double SparseMatrix::mat_norm (const Vector &x) const + { + const double *val_ptr = &values[0]; + const unsigned int *colnum_ptr = &colnums[0]; + + double norm_sqr = 0; + + for (unsigned int row=0; rowAbstractions for tasks: Work streams + * + * In the examples shown in the introduction we had identified a + * number of functions that can be run as independent tasks. Ideally, + * this number of tasks is larger than the number of CPU cores (to + * keep them busy) but is also not exceedingly huge (so as not to + * inundate the scheduler with millions of tasks that will then have + * to be distributed to 2 or 4 cores, for example). There are, + * however, cases where we have many thousands or even millions of + * relatively independent jobs: for example, assembling local + * contributions to the global linear system on each cell of a mesh; + * evaluating an error estimator on each cell; or postprocessing on + * each cell computed data for output fall into this class. + * + * Code like this could then be written like this: + * @code + template + void MyClass::assemble_on_one_cell (const typename DoFHandler::active_cell_iterator &cell) + { ... } + + template + void MyClass::assemble_system () + { + Threads::TaskGroup task_group; + for (typename DoFHandler::active_cell_iterator + cell = dof_handler.begin_active(); + cell != dof_handler.end(); ++cell) + task_group += Threads::new_task (&MyClass::assemble_on_one_cell, + *this, + cell); + task_group.join_all (); + } + * @endcode + * On a big mesh, with maybe a million cells, this would create a massive + * number of tasks; while it would keep all CPU cores busy for a while, the + * overhead of first creating so many tasks, scheduling them, and then waiting + * for them would probably not lead to efficient code. A better strategy would + * be if the scheduler could somehow indicate that it has available resources, + * at which point we would feed it another newly created task, and we would do + * so until we run out of tasks and the ones that were created have been + * worked on. + * + * This is essentially what the WorkStream class does: You give it an iterator + * range from which it can draw objects to work on (in the above case it is + * the interval given by dof_handler.begin_active() to + * dof_handler.end()), and a function that would do the work on + * each item (the function MyClass::assemble_on_one_cell) + * together with an object if it is a member function. + * + * Essentially, the way the MyClass::assemble_system + * function could be written then is like this (note that this is not quite + * the correct syntax, as will be described below): + * @code + template + void MyClass::assemble_on_one_cell (const typename DoFHandler::active_cell_iterator &cell) + { ... } + + template + void MyClass::assemble_system () + { + WorkStream work_stream; + work_stream.run (dof_handler.begin_active(), + dof_handler.end(), + *this, + &MyClass::assemble_on_one_cell); + } + * @endcode + * + * There are at least three problems with this, however: + *
      + *
    • First, let us take a look at how the MyClass::assemble_on_one_cell + * function likely looks: + * @code + template + void MyClass::assemble_on_one_cell (const typename DoFHandler::active_cell_iterator &cell) + { + FEValues fe_values (...); + FullMatrix cell_matrix (...); + Vector cell_rhs (...); + + // assemble local contributions + fe_values.reinit (cell); + for (unsigned int i=0; i dof_indices (...); + cell->get_dof_indices (dof_indices); + for (unsigned int i=0; iMyClass::assemble_on_one_cell, could potentially try + * to write into the object MyClass::system_matrix at + * the same time. This could be avoided by explicit synchronisation + * using a Threads::Mutex, for example, and would look like this: + * @code + // now copy results into global system + std::vector dof_indices (...); + cell->get_dof_indices (dof_indices); + + static Threads::Mutex mutex; + mutex.acquire (); + for (unsigned int i=0; iA second correctness problem is that even if we do lock the global matrix + * and right hand side objects using a mutex, we do so in a more or less + * random order: while tasks are created in the order in which we traverse + * cells normally, there is no guarantee that by the time we get to the + * point where we want to copy the local into the global contributions the + * order is still as if we computed things sequentially. In other words, it + * may happen that we add the contributions of cell 1 before those of cell + * 0. That may seem harmless because addition is commutative, but in fact it + * is not if done in floating point arithmetic: $a+b+c \neq a+c+b$ -- take + * for example $a=1, b=-1, c=10^{-20}$ (because $1+10^{-20}=1$ in floating + * point arithmetic, using double precision). + * + * As a consequence, the exact values that end up in the global matrix and + * right hand side will be close but may differ by amounts close to + * round-off depending on the order in which tasks happened to finish their + * job. That's not a desirable outcome, since results will not be + * reproducible this way. + * + * As a consequence, the way the WorkStream class is designed is to use two + * functions: the MyClass::assemble_on_one_cell computes the + * local contributions and stores it somewhere (we'll get to that next), and + * a second function, say MyClass::copy_local_to_global, that + * copies the results computed on each cell into the global objects. The + * trick implemented in the WorkStream class is that (i) the + * MyClass::copy_local_to_global never runs more than once in + * %parallel, so we do not need to synchronise execution through a mutex, and + * (ii) it runs in exactly the same order on cells as they appear in the + * iterator range, i.e. we add elements into the global matrix the same way + * every time, independently of when the computation of these element + * finishes. + * + * We now only have to discuss how the + * MyClass::assemble_on_one_cell communicates to + * MyClass::copy_local_to_global what it has computed. The way + * this is done is to use an object that holds all temporary data: + * @code + struct PerTaskData { + FullMatrix cell_matrix; + Vector cell_rhs; + std::vector dof_indices; + } + + template + void MyClass::assemble_on_one_cell (const typename DoFHandler::active_cell_iterator &cell, + PerTaskData &data) + { + FEValues fe_values (...); + + data.cell_matrix = 0; + data.cell_rhs = 0; + + // assemble local contributions + fe_values.reinit (cell); + for (unsigned int i=0; iget_dof_indices (data.dof_indices); + } + + template + void MyClass::copy_local_to_global (const PerTaskData &data) + { + for (unsigned int i=0; i + void MyClass::assemble_system () + { + PerTaskData per_task_data; + ...initialize members of per_task_data to the correct sizes... + + WorkStream work_stream; + work_stream.run (dof_handler.begin_active(), + dof_handler.end(), + *this, + &MyClass::assemble_on_one_cell, + &MyClass::copy_local_to_global, + per_task_data); + } + * @endcode + * + * The way this works is that we create a sample per_task_data + * object that the work stream object will replicate once per task that runs + * in %parallel. For each task, this object will be passed first to one of + * possibly several instances of MyClass::assemble_on_one_cell + * running in %parallel which fills it with the data obtained on a single + * cell, and then to a sequentially running + * MyClass::copy_local_to_global that copies data into the + * global object. In practice, of course, we will not generate millions of + * per_task_data objects if we have millions of cells; rather, + * we recycle these objects after they have been used by + * MyClass::copy_local_to_global and feed them back into + * another instance of MyClass::assemble_on_one_cell; this + * means that the number of such objects we actually do create is a small + * multiple of the number of threads the scheduler uses, which is typically + * about as many as there are CPU cores on a system. + * + *
    • The last issue that is worth addressing is that the way we wrote the + * MyClass::assemble_on_one_cell function above, we create and + * destroy an FEValues object every time the function is called, i.e. once + * for each cell in the triangulation. That's an immensely expensive + * operation because the FEValues class tries to do a lot of work in its + * constructor in an attempt to reduce the number of operations we have to + * do on each cell (i.e. it increases the constant in the ${\cal O}(1)$ + * effort to initialize such an object in order to reduce the constant in + * the ${\cal O}(N)$ operations to call FEValues::reinit on the $N$ cells of + * a triangulation). Creating and destroying an FEValues object on each cell + * invalidates this effort. + * + * The way to avoid this is to put the FEValues object into a second + * structure that will hold scratch data, and initialize it in the + * constructor: + * @code + struct PerTaskData { + FullMatrix cell_matrix; + Vector cell_rhs; + std::vector dof_indices; + + PerTaskData (const FiniteElement &fe) + : + cell_matrix (fe.dofs_per_cell, fe.dofs_per_cell), + cell_rhs (fe.dofs_per_cell), + dof_indices (fe.dofs_per_cell) + {} + } + + struct ScratchData { + FEValues fe_values; + + ScratchData (const FiniteElement &fe, + const Quadrature &quadrature, + const UpdateFlags update_flags) + : + fe_values (fe, quadrature, update_flags) + {} + } + * @endcode + * and then use this FEValues object in the assemble function: + * @code + template + void MyClass::assemble_on_one_cell (const typename DoFHandler::active_cell_iterator &cell, + ScratchData &scratch, + PerTaskData &data) + { + scratch.fe_values.reinit (cell); + ... + } + * @endcode + * The same approach, putting things into the ScratchData + * data structure should be used for everything that is expensive to + * construct. This holds, in particular, for everything that needs to + * allocate memory upon construction; for example, if the values of a + * function need to be evaluated at quadrature points, then this is + * expensive: + * @code + template + void MyClass::assemble_on_one_cell (const typename DoFHandler::active_cell_iterator &cell, + ScratchData &scratch, + PerTaskData &data) + { + std::vector rhs_values (fe_values.n_quadrature_points); + rhs_function.value_list (data.fe_values.get_quadrature_points, + rhs_values) + ... + } + * @endcode + * whereas this is a much cheaper way: + * @code + struct ScratchData { + std::vector rhs_values; + FEValues fe_values; + + ScratchData (const FiniteElement &fe, + const Quadrature &quadrature, + const UpdateFlags update_flags) + : + rhs_values (quadrature.n_quadrature_points), + fe_values (fe, quadrature, update_flags) + {} + } + + template + void MyClass::assemble_on_one_cell (const typename DoFHandler::active_cell_iterator &cell, + ScratchData &scratch, + PerTaskData &data) + { + rhs_function.value_list (scratch.fe_values.get_quadrature_points, + scratch.rhs_values) + ... + } + * @endcode + * + *
    + * + * As a final point: What if, for some reason, my assembler and copier + * function do not match the above signature with three and one argument, + * respectively? That's not a problem either. The WorkStream class offers two + * versions of the WorkStream::run() function: one that takes an object and + * the addresses of two member functions, and one that simply takes two + * function objects that can be called with three and one argument, + * respectively. So, in other words, the following two calls are exactly + * identical: + * @code + work_stream.run (dof_handler.begin_active(), + dof_handler.end(), + *this, + &MyClass::assemble_on_one_cell, + &MyClass::copy_local_to_global, + per_task_data); + // ...is the same as: + work_stream.run (dof_handler.begin_active(), + dof_handler.end(), + boost::bind(&MyClass::assemble_on_one_cell, *this, _1, _2, _3), + boost::bind(&MyClass::copy_local_to_global, *this, _1), + per_task_data); + * @endcode + * Note how boost::bind produces a function object that takes three + * arguments by binding the member function to the *this + * object. _1, _2 and _3 are placeholders for the first, + * second and third argument that can be specified later on. In other words, for + * example if p is the result of the first call to + * boost::bind, then the call p(cell, scratch_data, + * per_task_data) will result in executing + * this-@>assemble_on_one_cell (cell, scratch_data, per_task_data), + * i.e. boost::bind has bound the object to the function pointer + * but left the three arguments open for later. + * + * Similarly, let us assume that MyClass::assemble_on_one_cell + * has the following signature in a solver of nonlinear, time-dependent problem: + * @code + template + void + MyClass::assemble_on_one_cell (const Vector &linearization_point, + const typename DoFHandler::active_cell_iterator &cell, + ScratchData &scratch, + PerTaskData &data, + const double current_time) + { ... } + * @endcode + * Because WorkStream expects to be able to call the worker function with + * just three arguments, the first of which is the iterator and the second + * and third the ScratchData and PerTaskData objects, we need to pass the following + * to it: + * @code + work_stream.run (dof_handler.begin_active(), + dof_handler.end(), + boost::bind(&MyClass::assemble_on_one_cell, + *this, + current_solution, + _1, + _2, + _3, + previous_time+time_step), + boost::bind(&MyClass::copy_local_to_global, + *this, _1), + per_task_data); + * @endcode + * Here, we bind the object, the linearization point argument, and the + * current time argument to the function before we have it off to + * WorkStream::run(). WorkStream::run() will then simply call the + * function with the cell and scratch and per task objects which will be filled + * in at the positions indicated by _1, _2 and _3. + * + * + * @anchor MTThreads + *

    Thread-based parallelism

    + * + * Even though tasks are a higher-level way to describe things, there are + * cases where they are poorly suited to a task. The main reason for not using + * tasks even for computations that are independent are listed in the section + * on + * @ref MTHow "How scheduling tasks works and when task-based programming is not efficient" + * above. Primarily, jobs that are not able to fully utilize are bad fits for tasks. + * + * In a case like this, you can resort to explicitly start threads, rather + * than tasks, using pretty much the same syntax as above. For example, if you + * had a function in your application that generates graphical output and then + * estimates the error to refine the mesh for the next iteration of an + * adaptive mesh scheme, it could look like this: + * @code + template + void MyClass::output_and_estimate_error () const + { + DataOut data_out; + data_out.attach_dof_handler (dof_handler); + data_out.add_data_vector (solution, "solution"); + data_out.build_patches (); + + std::ofstream output ("solution.vtk"); + + Threads::Thread + thread = Threads::new_thread (&DataOut::write_vtk, data_out, output); + + Vector error_per_cell (triangulation.n_active_cells()); + KellyErrorEstimator::estimate (dof_handler, + QGauss(3), + typename FunctionMap::type(), + solution, + estimated_error_per_cell); + thread.join (); + * @endcode + * + * Here, Threads::new_thread starts the given function that writes to the + * output file on a new thread that can run in %parallel to everything + * else. Note that this function is actually pretty well parallelized: both + * DataOut::build_patches() and KellyErrorEstimator::estimate() already use + * WorkStream and will therefore utilize pretty much all available compute + * resources. In %parallel to the KellyErrorEstimator::estimate() function, the + * DataOut::write_vtk() function will run on a %parallel thread, independent of + * the scheduler that takes care of the tasks, but that is not a problem + * because writing lots of data to a file is not something that will keep a + * CPU very busy. + * + * Creating threads works pretty much the same way as tasks, i.e. you can wait + * for the termination of a thread using Threads::Thread::join(), query the + * return value of a finished thread using Threads::Thread::return_value(), + * and you can group threads into a Threads::ThreadGroup object and wait for + * all of them to finish. */ diff --git a/deal.II/doc/news/changes-tbb-branch.h b/deal.II/doc/news/changes-tbb-branch.h new file mode 100644 index 0000000000..6e9ea86cef --- /dev/null +++ b/deal.II/doc/news/changes-tbb-branch.h @@ -0,0 +1,93 @@ +
  • +

    + Changed: The two DataOut::build_patches, DataOutFaces::build_patches, and + DataOutRotation::build_patches functions have lost the argument + that indicated the number of threads with which they should build the + intermediate representation. This is something that now happens + transparently in the background and doesn't need caller input any more. +
    + (WB 2008/12/16) +

    + + +
  • +

    + Changed: Previously, one had to give the two flags + --enable-multithreading --with-multithreading to + ./configure to enable thread usage throughout the library. + This has now been simplified: only the flag --enable-threads + is now necessary. Furthermore, since most current machines have multiple + cores these days, the default is now to use threads. This can be switched + off using --disable-threads, however. +
    + (WB 2008/09/29) +

    + +
  • +

    + New: As a primary means of parallelizing programs, deal.II now uses + a task-based, rather than thread-based approach, in which one + uses a high-level description of what needs to be done, + rather than how these jobs have to be mapped onto threads. We then + use the Threading + Building Blocks (TBB) library to schedule tasks onto available + hardware resources. This new scheme of describing parallism and + various abstractions to make programming in this framework easier + are described in great detail in the + @ref threads "Parallel computing with multiple processors" module. + In addition, most of the parallelism already used within deal.II + has been converted to use tasks, rather than threads, and so have + some of the tutorial programs. +
    + (WB 2009/01/09) +

    +
  • + +
  • +

    + Changed: The support for threading has been completely re-written. In + particular, the Threads::spawn functions have been deprecated, and + new functions Threads::new_threads have been introduced. + Threading is now discussed in a lot of detail in the + @ref threads "Parallel computing with multiple processors" module. +
    + (WB 2009/01/09) +

    +
  • + +
  • +

    + Fixed: The DoFRenumbering::component_wise function for MGDoFHandler objects + did a few things in parallel that weren't thread-safe. This is now fixed. +
    + (WB, 2009/01/20) +

    + +
  • +

    + Changed: The KellyErrorEstimator::estimate functions had a parameter + that indicates the number of threads to be used in the computation. + This parameter continues to exist for compatibility, but is now ignored. + Rather, the number of threads is determined automatically by scheduling + the requested computations on available compute resources. +
    + (WB, 2008/12/29) +

    + +
  • +

    + New: The new function internal::hp::FEValuesBase::get_fe_collection function + allows to query the finite element collection currently in used in an hp::FEValues, + hp::FEFaceValues, or hp::FESubfaceValues object. +
    + (WB 2008/09/30) +

    + +
  • +

    + New: The new function FEValuesBase::get_update_flags allows to query + the update flags that are currently set on an FEValues, FEFaceValues, or + FESubfaceValues object. +
    + (WB 2008/09/29) +

    diff --git a/deal.II/doc/readme.html b/deal.II/doc/readme.html index 371601f5ee..38db95c41b 100644 --- a/deal.II/doc/readme.html +++ b/deal.II/doc/readme.html @@ -454,12 +454,13 @@
  • - --enable-multithreading --with-multithreading: The - first of these flags instructs the compiler to generate code - that can be run from multiple threads. The second switches on - code in the library that actually creates multiple threads for - some tasks, making programs significantly faster on machines - with multiple cores or processors. + --enable-threads: This flag indicates that those + parts of the library that support this compute in parallel, + using multiple threads, making programs significantly faster on + machines with multiple cores or processors. The default is to + use multiple threads since most machines today have several + processor cores. If this is not desired, + use --disable-threads.

  • diff --git a/deal.II/examples/step-13/step-13.cc b/deal.II/examples/step-13/step-13.cc index 4b9d28f851..5387fa75b2 100644 --- a/deal.II/examples/step-13/step-13.cc +++ b/deal.II/examples/step-13/step-13.cc @@ -1134,13 +1134,15 @@ namespace LaplaceSolver } - // The following function assembles - // matrix and right hand side of - // the linear system to be solved - // in each step. It goes along the - // same lines as used in previous + // The following function assembles matrix + // and right hand side of the linear system + // to be solved in each step. It goes along + // the same lines as used in previous // examples, so we explain it only - // briefly: + // briefly. Note that we do a number of + // things in parallel, a process described + // in more detail in the @ref threads + // module. template void Solver::assemble_linear_system (LinearSystem &linear_system) @@ -1183,13 +1185,14 @@ namespace LaplaceSolver Threads::ThreadMutex mutex; Threads::ThreadGroup<> threads; for (unsigned int thread=0; thread::assemble_matrix) - (linear_system, - thread_ranges[thread].first, - thread_ranges[thread].second, - mutex); - - // While the spawned threads + threads += Threads::new_thread (&Solver::assemble_matrix, + *this, + linear_system, + thread_ranges[thread].first, + thread_ranges[thread].second, + mutex); + + // While the new threads // assemble the system matrix, we // can already compute the right // hand side vector in the main @@ -1199,8 +1202,8 @@ namespace LaplaceSolver assemble_rhs (linear_system.rhs); linear_system.hanging_node_constraints.condense (linear_system.rhs); - // And while we're already at it - // to compute things in parallel, + // And while we're already + // computing things in parallel, // interpolating boundary values // is one more thing that can be // done independently, so we do @@ -1389,7 +1392,7 @@ namespace LaplaceSolver // concurrency, at least; // otherwise, the actions are // performed sequentially). Note - // that we spawn only one thread, + // that we start only one thread, // and do the second action in the // main thread. Since only one // thread is generated, we don't @@ -1446,8 +1449,9 @@ namespace LaplaceSolver = &DoFTools::make_hanging_node_constraints; Threads::Thread<> - mhnc_thread = Threads::spawn (mhnc_p)(dof_handler, - hanging_node_constraints); + mhnc_thread = Threads::new_thread (mhnc_p, + dof_handler, + hanging_node_constraints); sparsity_pattern.reinit (dof_handler.n_dofs(), dof_handler.n_dofs(), diff --git a/deal.II/examples/step-14/step-14.cc b/deal.II/examples/step-14/step-14.cc index 739242b13e..54e260fb09 100644 --- a/deal.II/examples/step-14/step-14.cc +++ b/deal.II/examples/step-14/step-14.cc @@ -647,11 +647,12 @@ namespace LaplaceSolver Threads::ThreadMutex mutex; Threads::ThreadGroup<> threads; for (unsigned int thread=0; thread::assemble_matrix) - (linear_system, - thread_ranges[thread].first, - thread_ranges[thread].second, - mutex); + threads += Threads::new_thread (&Solver::assemble_matrix, + *this, + linear_system, + thread_ranges[thread].first, + thread_ranges[thread].second, + mutex); assemble_rhs (linear_system.rhs); linear_system.hanging_node_constraints.condense (linear_system.rhs); @@ -726,7 +727,9 @@ namespace LaplaceSolver = &DoFTools::make_hanging_node_constraints; Threads::Thread<> - mhnc_thread = Threads::spawn (mhnc_p)(dof_handler, hanging_node_constraints); + mhnc_thread = Threads::new_thread (mhnc_p, + dof_handler, + hanging_node_constraints); sparsity_pattern.reinit (dof_handler.n_dofs(), dof_handler.n_dofs(), @@ -2397,7 +2400,7 @@ namespace LaplaceSolver // has a subtle but important // drawback: we will call these // functions over and over - // again, many thousand times + // again, many thousands of times // maybe; it has now turned out // that allocating vectors and // other objects that need @@ -2619,8 +2622,10 @@ namespace LaplaceSolver WeightedResidual::solve_problem () { Threads::ThreadGroup<> threads; - threads += Threads::spawn (*this, &WeightedResidual::solve_primal_problem)(); - threads += Threads::spawn (*this, &WeightedResidual::solve_dual_problem)(); + threads += Threads::new_thread (&WeightedResidual::solve_primal_problem, + *this); + threads += Threads::new_thread (&WeightedResidual::solve_dual_problem, + *this); threads.join_all (); } @@ -2989,12 +2994,13 @@ namespace LaplaceSolver const unsigned int n_threads = multithread_info.n_default_threads; Threads::ThreadGroup<> threads; for (unsigned int i=0; i::estimate_some) - (primal_solution, - dual_weights, - n_threads, i, - error_indicators, - face_integrals); + threads += Threads::new_thread (&WeightedResidual::estimate_some, + *this, + primal_solution, + dual_weights, + n_threads, i, + error_indicators, + face_integrals); threads.join_all(); // Once the error contributions diff --git a/deal.II/examples/step-28/step-28.cc b/deal.II/examples/step-28/step-28.cc index d3fd019d83..123a20dc33 100644 --- a/deal.II/examples/step-28/step-28.cc +++ b/deal.II/examples/step-28/step-28.cc @@ -1551,10 +1551,12 @@ EnergyGroup::output_results (const unsigned int cycle) const // doing all the rest. In several // places, we have to do something // for all energy groups, in which - // case we will spawn threads for + // case we will start threads for // each group to let these things run // in parallel if deal.II was // configured for multithreading. + // For strategies of parallelization, + // take a look at the @ref threads module. // // The biggest difference to previous // example programs is that we also @@ -2025,30 +2027,27 @@ void NeutronDiffusionProblem::initialize_problem() // iteration. The total power then is // used to renew k-effective. // - // Since the total fission source is - // a sum over all the energy groups, - // and since each of these sums can - // be computed independently, we - // actually do this in parallel. One - // of the problems is that the - // function in the - // EnergyGroup class - // that computes the fission source - // returns a value. If we now simply - // spin off a new thread, we have to - // later capture the return value of - // the function run on that - // thread. The way this can be done - // is to use the return value of the - // Threads::spawn function, which is - // of type Threads::Thread@ - // if the function spawned returns a - // double. We can the later ask this - // object for the returned value + // Since the total fission source is a sum + // over all the energy groups, and since each + // of these sums can be computed + // independently, we actually do this in + // parallel. One of the problems is that the + // function in the EnergyGroup + // class that computes the fission source + // returns a value. If we now simply spin off + // a new thread, we have to later capture the + // return value of the function run on that + // thread. The way this can be done is to use + // the return value of the + // Threads::new_thread function, which + // returns an object of type + // Threads::Thread@ if the function + // spawned returns a double. We can then later + // ask this object for the returned value // (when doing so, the // Threads::Thread@::return_value - // function first waits for the - // thread to finish). + // function first waits for the thread to + // finish if it hasn't done so already). // // The way this function then works // is to first spawn one thread for @@ -2061,8 +2060,8 @@ double NeutronDiffusionProblem::get_total_fission_source () const { std::vector > threads; for (unsigned int group=0; group::get_fission_source) ()); + threads.push_back (Threads::new_thread (&EnergyGroup::get_fission_source, + *energy_groups[group])); double fission_source = 0; for (unsigned int group=0; group::refine_grid () { Threads::ThreadGroup<> threads; for (unsigned int group=0; group::estimate_errors) - (group_error_indicators.block(group)); + threads += Threads::new_thread (&EnergyGroup::estimate_errors, + *energy_groups[group], + group_error_indicators.block(group)); threads.join_all (); } @@ -2115,10 +2115,11 @@ void NeutronDiffusionProblem::refine_grid () { Threads::ThreadGroup<> threads; for (unsigned int group=0; group::refine_grid) - (group_error_indicators.block(group), - refine_threshold, - coarsen_threshold); + threads += Threads::new_thread (&EnergyGroup::refine_grid, + *energy_groups[group], + group_error_indicators.block(group), + refine_threshold, + coarsen_threshold); threads.join_all (); } } @@ -2176,9 +2177,9 @@ void NeutronDiffusionProblem::run () Threads::ThreadGroup<> threads; for (unsigned int group=0; group::assemble_system_matrix) - (); + threads += Threads::new_thread + (&EnergyGroup::assemble_system_matrix, + *energy_groups[group]); threads.join_all (); double error; diff --git a/deal.II/examples/step-31/step-31.cc b/deal.II/examples/step-31/step-31.cc index 593bf5953e..495b6ad68c 100644 --- a/deal.II/examples/step-31/step-31.cc +++ b/deal.II/examples/step-31/step-31.cc @@ -20,6 +20,7 @@ #include #include #include +#include #include #include @@ -534,6 +535,116 @@ namespace LinearSolvers +namespace AssemblerData +{ + template + struct StokesPreconditioner + { + StokesPreconditioner (const FiniteElement &stokes_fe, + const Quadrature &stokes_quadrature, + const UpdateFlags update_flags); + StokesPreconditioner (const StokesPreconditioner &data); + + FEValues stokes_fe_values; + + FullMatrix local_matrix; + std::vector local_dof_indices; + + std::vector > grad_phi_u; + std::vector phi_p; + }; + + template + StokesPreconditioner:: + StokesPreconditioner (const FiniteElement &stokes_fe, + const Quadrature &stokes_quadrature, + const UpdateFlags update_flags) + : + stokes_fe_values (stokes_fe, stokes_quadrature, update_flags), + local_matrix (stokes_fe.dofs_per_cell, stokes_fe.dofs_per_cell), + local_dof_indices (stokes_fe.dofs_per_cell), + grad_phi_u (stokes_fe.dofs_per_cell), + phi_p (stokes_fe.dofs_per_cell) + {} + + + + template + StokesPreconditioner:: + StokesPreconditioner (const StokesPreconditioner &data) + : + stokes_fe_values (data.stokes_fe_values.get_fe(), + data.stokes_fe_values.get_quadrature(), + data.stokes_fe_values.get_update_flags()), + local_matrix (data.local_matrix), + local_dof_indices (data.local_dof_indices), + grad_phi_u (data.grad_phi_u), + phi_p (data.phi_p) + {} + + + + template + struct StokesSystem : public StokesPreconditioner + { + StokesSystem (const FiniteElement &stokes_fe, + const Quadrature &stokes_quadrature, + const UpdateFlags stokes_update_flags, + const FiniteElement &temperature_fe, + const UpdateFlags temperature_update_flags); + + StokesSystem (const StokesSystem &data); + + FEValues temperature_fe_values; + Vector local_rhs; + + std::vector > phi_u; + std::vector > grads_phi_u; + std::vector div_phi_u; + + std::vector old_temperature_values; + }; + + + template + StokesSystem:: + StokesSystem (const FiniteElement &stokes_fe, + const Quadrature &stokes_quadrature, + const UpdateFlags stokes_update_flags, + const FiniteElement &temperature_fe, + const UpdateFlags temperature_update_flags) + : + StokesPreconditioner (stokes_fe, stokes_quadrature, + stokes_update_flags), + temperature_fe_values (temperature_fe, stokes_quadrature, + temperature_update_flags), + local_rhs (stokes_fe.dofs_per_cell), + phi_u (stokes_fe.dofs_per_cell), + grads_phi_u (stokes_fe.dofs_per_cell), + div_phi_u (stokes_fe.dofs_per_cell), + old_temperature_values (stokes_quadrature.n_quadrature_points) + {} + + + template + StokesSystem:: + StokesSystem (const StokesSystem &data) + : + StokesPreconditioner (data), + temperature_fe_values (data.temperature_fe_values.get_fe(), + data.temperature_fe_values.get_quadrature(), + data.temperature_fe_values.get_update_flags()), + local_rhs (data.local_rhs), + phi_u (data.phi_u), + grads_phi_u (data.grads_phi_u), + div_phi_u (data.div_phi_u), + old_temperature_values (data.old_temperature_values) + {} +} + + + + // @sect3{The BoussinesqFlowProblem class template} // The definition of the class that defines @@ -607,7 +718,6 @@ class BoussinesqFlowProblem const double global_T_variation, const double cell_diameter) const; - Triangulation triangulation; double global_Omega_diameter; @@ -650,6 +760,19 @@ class BoussinesqFlowProblem bool rebuild_stokes_matrix; bool rebuild_temperature_matrices; bool rebuild_stokes_preconditioner; + + void + local_assemble_stokes_preconditioner (const typename DoFHandler::active_cell_iterator &cell, + AssemblerData::StokesPreconditioner &data); + + void copy_local_to_global_stokes_preconditioner (const AssemblerData::StokesPreconditioner &data); + + + void + local_assemble_stokes_system (const typename DoFHandler::active_cell_iterator &cell, + AssemblerData::StokesSystem &data); + + void copy_local_to_global_stokes_system (const AssemblerData::StokesSystem &data); }; @@ -1321,6 +1444,79 @@ void BoussinesqFlowProblem::setup_dofs () + + + +template +void +BoussinesqFlowProblem:: +local_assemble_stokes_preconditioner (const typename DoFHandler::active_cell_iterator &cell, + AssemblerData::StokesPreconditioner &data) +{ + const unsigned int dofs_per_cell = stokes_fe.dofs_per_cell; + const unsigned int n_q_points = data.stokes_fe_values.n_quadrature_points; + + const FEValuesExtractors::Vector velocities (0); + const FEValuesExtractors::Scalar pressure (dim); + + data.stokes_fe_values.reinit (cell); + data.local_matrix = 0; + + // The creation of the local matrix is + // rather simple. There are only a + // Laplace term (on the velocity) and a + // mass matrix weighted by $\eta^{-1}$ + // to be generated, so the creation of + // the local matrix is done in two + // lines. Once the local matrix is + // ready (loop over rows and columns in + // the local matrix on each quadrature + // point), we get the local DoF indices + // and write the local information into + // the global matrix. We do this as in + // step-27, i.e. we directly apply the + // constraints from hanging nodes + // locally. By doing so, we don't have + // to do that afterwards, and we don't + // also write into entries of the + // matrix that will actually be set to + // zero again later when eliminating + // constraints. + for (unsigned int q=0; qget_dof_indices (data.local_dof_indices); +} + + +template +void +BoussinesqFlowProblem:: +copy_local_to_global_stokes_preconditioner (const AssemblerData::StokesPreconditioner &data) +{ + stokes_constraints.distribute_local_to_global (data.local_matrix, + data.local_dof_indices, + stokes_preconditioner_matrix); +} + + + + // @sect4{BoussinesqFlowProblem::assemble_stokes_preconditioner} // // This function assembles the matrix we use @@ -1336,7 +1532,7 @@ void BoussinesqFlowProblem::setup_dofs () // we create data structures for the cell // matrix and the relation between local and // global DoFs. The vectors - // phi_grad_u and + // grad_phi_u and // phi_p are going to hold the // values of the basis functions in order to // faster build up the local matrices, as was @@ -1351,74 +1547,25 @@ BoussinesqFlowProblem::assemble_stokes_preconditioner () stokes_preconditioner_matrix = 0; const QGauss quadrature_formula(stokes_degree+2); - FEValues stokes_fe_values (stokes_fe, quadrature_formula, - update_JxW_values | - update_values | - update_gradients); - - const unsigned int dofs_per_cell = stokes_fe.dofs_per_cell; - const unsigned int n_q_points = quadrature_formula.size(); - - FullMatrix local_matrix (dofs_per_cell, dofs_per_cell); - std::vector local_dof_indices (dofs_per_cell); - - std::vector > phi_grad_u (dofs_per_cell); - std::vector phi_p (dofs_per_cell); - - const FEValuesExtractors::Vector velocities (0); - const FEValuesExtractors::Scalar pressure (dim); - typename DoFHandler::active_cell_iterator - cell = stokes_dof_handler.begin_active(), - endc = stokes_dof_handler.end(); - for (; cell!=endc; ++cell) - { - stokes_fe_values.reinit (cell); - local_matrix = 0; - - // The creation of the local matrix is - // rather simple. There are only a - // Laplace term (on the velocity) and a - // mass matrix weighted by $\eta^{-1}$ - // to be generated, so the creation of - // the local matrix is done in two - // lines. Once the local matrix is - // ready (loop over rows and columns in - // the local matrix on each quadrature - // point), we get the local DoF indices - // and write the local information into - // the global matrix. We do this as in - // step-27, i.e. we directly apply the - // constraints from hanging nodes - // locally. By doing so, we don't have - // to do that afterwards, and we don't - // also write into entries of the - // matrix that will actually be set to - // zero again later when eliminating - // constraints. - for (unsigned int q=0; qget_dof_indices (local_dof_indices); - stokes_constraints.distribute_local_to_global (local_matrix, - local_dof_indices, - stokes_preconditioner_matrix); - } + AssemblerData::StokesPreconditioner + data_template (stokes_fe, quadrature_formula, + update_JxW_values | + update_values | + update_gradients); + + WorkStream().run (stokes_dof_handler.begin_active(), + stokes_dof_handler.end(), + std_cxx0x::bind (&BoussinesqFlowProblem:: + local_assemble_stokes_preconditioner, + this, + _1, + _2), + std_cxx0x::bind (&BoussinesqFlowProblem:: + copy_local_to_global_stokes_preconditioner, + this, + _1), + data_template); } @@ -1623,37 +1770,14 @@ BoussinesqFlowProblem::build_stokes_preconditioner () // the local dofs compared to the global // system. template -void BoussinesqFlowProblem::assemble_stokes_system () +void +BoussinesqFlowProblem:: +local_assemble_stokes_system (const typename DoFHandler::active_cell_iterator &cell, + AssemblerData::StokesSystem &data) { - std::cout << " Assembling..." << std::flush; - - if (rebuild_stokes_matrix == true) - stokes_matrix=0; - - stokes_rhs=0; - - const QGauss quadrature_formula (stokes_degree+2); - FEValues stokes_fe_values (stokes_fe, quadrature_formula, - update_values | - update_quadrature_points | - update_JxW_values | - (rebuild_stokes_matrix == true - ? - update_gradients - : - UpdateFlags(0))); + const unsigned int dofs_per_cell = data.stokes_fe_values.get_fe().dofs_per_cell; + const unsigned int n_q_points = data.stokes_fe_values.n_quadrature_points; - FEValues temperature_fe_values (temperature_fe, quadrature_formula, - update_values); - - const unsigned int dofs_per_cell = stokes_fe.dofs_per_cell; - const unsigned int n_q_points = quadrature_formula.size(); - - FullMatrix local_matrix (dofs_per_cell, dofs_per_cell); - Vector local_rhs (dofs_per_cell); - - std::vector local_dof_indices (dofs_per_cell); - // Next we need a vector that will contain // the values of the temperature solution // at the previous time level at the @@ -1677,12 +1801,6 @@ void BoussinesqFlowProblem::assemble_stokes_system () // extract the individual blocks // (velocity, pressure, temperature) from // the total FE system. - std::vector old_temperature_values(n_q_points); - - std::vector > phi_u (dofs_per_cell); - std::vector > grads_phi_u (dofs_per_cell); - std::vector div_phi_u (dofs_per_cell); - std::vector phi_p (dofs_per_cell); const FEValuesExtractors::Vector velocities (0); const FEValuesExtractors::Scalar pressure (dim); @@ -1706,103 +1824,147 @@ void BoussinesqFlowProblem::assemble_stokes_system () // quadrature points. Then we are ready to // loop over the quadrature points on the // cell. + data.stokes_fe_values.reinit (cell); + typename DoFHandler::active_cell_iterator - cell = stokes_dof_handler.begin_active(), - endc = stokes_dof_handler.end(); - typename DoFHandler::active_cell_iterator - temperature_cell = temperature_dof_handler.begin_active(); - - for (; cell!=endc; ++cell, ++temperature_cell) - { - stokes_fe_values.reinit (cell); - temperature_fe_values.reinit (temperature_cell); + temperature_cell (&triangulation, + cell->level(), + cell->index(), + &temperature_dof_handler); + data.temperature_fe_values.reinit (temperature_cell); - local_matrix = 0; - local_rhs = 0; + data.local_matrix = 0; + data.local_rhs = 0; - temperature_fe_values.get_function_values (old_temperature_solution, - old_temperature_values); + data.temperature_fe_values.get_function_values (old_temperature_solution, + data.old_temperature_values); - for (unsigned int q=0; qrebuild_matrices + // flag. + for (unsigned int k=0; krebuild_matrices - // flag. - for (unsigned int k=0; k gravity = ( (dim == 2) ? (Point (0,1)) : - (Point (0,0,1)) ); - for (unsigned int i=0; ilocal_dof_indices. - // Again, we let the ConstraintMatrix - // class do the insertion of the cell - // matrix elements to the global - // matrix, which already condenses the - // hanging node constraints. - cell->get_dof_indices (local_dof_indices); - - if (rebuild_stokes_matrix == true) - stokes_constraints.distribute_local_to_global (local_matrix, - local_rhs, - local_dof_indices, - stokes_matrix, - stokes_rhs); - else - stokes_constraints.distribute_local_to_global (local_rhs, - local_dof_indices, - stokes_rhs); + if (rebuild_stokes_matrix) + for (unsigned int i=0; i gravity = ( (dim == 2) ? (Point (0,1)) : + (Point (0,0,1)) ); + for (unsigned int i=0; ilocal_dof_indices. + // Again, we let the ConstraintMatrix + // class do the insertion of the cell + // matrix elements to the global + // matrix, which already condenses the + // hanging node constraints. + cell->get_dof_indices (data.local_dof_indices); +} + +template +void +BoussinesqFlowProblem:: +copy_local_to_global_stokes_system (const AssemblerData::StokesSystem &data) +{ + if (rebuild_stokes_matrix == true) + stokes_constraints.distribute_local_to_global (local_matrix, + local_rhs, + local_dof_indices, + stokes_matrix, + stokes_rhs); + else + stokes_constraints.distribute_local_to_global (local_rhs, + local_dof_indices, + stokes_rhs); +} + + + +template +void BoussinesqFlowProblem::assemble_stokes_system () +{ + std::cout << " Assembling..." << std::flush; + + if (rebuild_stokes_matrix == true) + stokes_matrix=0; + + stokes_rhs=0; + + const QGauss quadrature_formula (stokes_degree+2); + + AssemblerData::StokesSystem + data_template (stokes_fe, quadrature_formula, + (update_values | + update_quadrature_points | + update_JxW_values | + (rebuild_stokes_matrix == true + ? + update_gradients + : + UpdateFlags(0))), + temperature_fe, + update_values); + + WorkStream().run (stokes_dof_handler.begin_active(), + stokes_dof_handler.end(), + std_cxx0x::bind (&BoussinesqFlowProblem:: + local_assemble_stokes_system, + this, + _1, + _2), + std_cxx0x::bind (&BoussinesqFlowProblem:: + copy_local_to_global_stokes_system, + this, + _1), + data_template); + rebuild_stokes_matrix = false; std::cout << std::endl; @@ -1994,7 +2156,6 @@ void BoussinesqFlowProblem:: const unsigned int n_q_points = quadrature_formula.size(); Vector local_rhs (dofs_per_cell); - FullMatrix local_matrix (dofs_per_cell, dofs_per_cell); std::vector local_dof_indices (dofs_per_cell); diff --git a/deal.II/examples/step-32/step-32.cc b/deal.II/examples/step-32/step-32.cc index 4b0b291cfa..c92672ba87 100644 --- a/deal.II/examples/step-32/step-32.cc +++ b/deal.II/examples/step-32/step-32.cc @@ -20,9 +20,10 @@ #include #include #include +#include #include -#include +#include #include #include #include @@ -35,6 +36,7 @@ #include #include #include +#include #include #include #include @@ -57,7 +59,7 @@ #include - // Time measurements. + // Time measurements. #include #include @@ -196,7 +198,7 @@ namespace LinearSolvers const PreconditionerMp &mp_preconditioner; const PreconditionerA &a_preconditioner; mutable TrilinosWrappers::MPI::Vector tmp; -}; + }; @@ -206,10 +208,10 @@ namespace LinearSolvers const PreconditionerMp &Mppreconditioner, const PreconditionerA &Apreconditioner) : - stokes_matrix (&S), - mp_preconditioner (Mppreconditioner), - a_preconditioner (Apreconditioner), - tmp (stokes_matrix->block(1,1).row_partitioner()) + stokes_matrix (&S), + mp_preconditioner (Mppreconditioner), + a_preconditioner (Apreconditioner), + tmp (stokes_matrix->block(1,1).row_partitioner()) {} @@ -228,6 +230,356 @@ namespace LinearSolvers +namespace Assembly +{ + namespace Scratch + { + template + struct StokesPreconditioner + { + StokesPreconditioner (const FiniteElement &stokes_fe, + const Quadrature &stokes_quadrature, + const UpdateFlags update_flags); + StokesPreconditioner (const StokesPreconditioner &data); + + FEValues stokes_fe_values; + + std::vector > grad_phi_u; + std::vector phi_p; + }; + + template + StokesPreconditioner:: + StokesPreconditioner (const FiniteElement &stokes_fe, + const Quadrature &stokes_quadrature, + const UpdateFlags update_flags) + : + stokes_fe_values (stokes_fe, stokes_quadrature, + update_flags), + grad_phi_u (stokes_fe.dofs_per_cell), + phi_p (stokes_fe.dofs_per_cell) + {} + + + + template + StokesPreconditioner:: + StokesPreconditioner (const StokesPreconditioner &scratch) + : + stokes_fe_values (scratch.stokes_fe_values.get_fe(), + scratch.stokes_fe_values.get_quadrature(), + scratch.stokes_fe_values.get_update_flags()), + grad_phi_u (scratch.grad_phi_u), + phi_p (scratch.phi_p) + {} + + + + template + struct StokesSystem : public StokesPreconditioner + { + StokesSystem (const FiniteElement &stokes_fe, + const Quadrature &stokes_quadrature, + const UpdateFlags stokes_update_flags, + const FiniteElement &temperature_fe, + const UpdateFlags temperature_update_flags); + + StokesSystem (const StokesSystem &data); + + FEValues temperature_fe_values; + + std::vector > phi_u; + std::vector > grads_phi_u; + std::vector div_phi_u; + + std::vector old_temperature_values; + }; + + + template + StokesSystem:: + StokesSystem (const FiniteElement &stokes_fe, + const Quadrature &stokes_quadrature, + const UpdateFlags stokes_update_flags, + const FiniteElement &temperature_fe, + const UpdateFlags temperature_update_flags) + : + StokesPreconditioner (stokes_fe, stokes_quadrature, + stokes_update_flags), + temperature_fe_values (temperature_fe, stokes_quadrature, + temperature_update_flags), + phi_u (stokes_fe.dofs_per_cell), + grads_phi_u (stokes_fe.dofs_per_cell), + div_phi_u (stokes_fe.dofs_per_cell), + old_temperature_values (stokes_quadrature.n_quadrature_points) + {} + + + template + StokesSystem:: + StokesSystem (const StokesSystem &scratch) + : + StokesPreconditioner (scratch), + temperature_fe_values (scratch.temperature_fe_values.get_fe(), + scratch.temperature_fe_values.get_quadrature(), + scratch.temperature_fe_values.get_update_flags()), + phi_u (scratch.phi_u), + grads_phi_u (scratch.grads_phi_u), + div_phi_u (scratch.div_phi_u), + old_temperature_values (scratch.old_temperature_values) + {} + + + + template + struct TemperatureMatrix + { + TemperatureMatrix (const FiniteElement &temperature_fe, + const Quadrature &temperature_quadrature); + TemperatureMatrix (const TemperatureMatrix &data); + + FEValues temperature_fe_values; + + std::vector phi_T; + std::vector > grad_phi_T; + }; + + template + TemperatureMatrix:: + TemperatureMatrix (const FiniteElement &temperature_fe, + const Quadrature &temperature_quadrature) + : + temperature_fe_values (temperature_fe, temperature_quadrature, + update_values | update_gradients | + update_JxW_values), + phi_T (temperature_fe.dofs_per_cell), + grad_phi_T (temperature_fe.dofs_per_cell) + {} + + + template + TemperatureMatrix:: + TemperatureMatrix (const TemperatureMatrix &scratch) + : + temperature_fe_values (scratch.temperature_fe_values.get_fe(), + scratch.temperature_fe_values.get_quadrature(), + scratch.temperature_fe_values.get_update_flags()), + phi_T (scratch.phi_T), + grad_phi_T (scratch.grad_phi_T) + {} + + + template + struct TemperatureRHS + { + TemperatureRHS (const FiniteElement &temperature_fe, + const FiniteElement &stokes_fe, + const Quadrature &quadrature); + TemperatureRHS (const TemperatureRHS &data); + + FEValues temperature_fe_values; + FEValues stokes_fe_values; + + std::vector phi_T; + std::vector > grad_phi_T; + + std::vector > old_velocity_values; + std::vector > old_old_velocity_values; + + std::vector old_temperature_values; + std::vector old_old_temperature_values; + std::vector > old_temperature_grads; + std::vector > old_old_temperature_grads; + std::vector old_temperature_laplacians; + std::vector old_old_temperature_laplacians; + + std::vector gamma_values; + }; + + template + TemperatureRHS:: + TemperatureRHS (const FiniteElement &temperature_fe, + const FiniteElement &stokes_fe, + const Quadrature &quadrature) + : + temperature_fe_values (temperature_fe, quadrature, + update_values | + update_gradients | + update_hessians | + update_quadrature_points | + update_JxW_values), + stokes_fe_values (stokes_fe, quadrature, + update_values), + phi_T (temperature_fe.dofs_per_cell), + grad_phi_T (temperature_fe.dofs_per_cell), + + old_velocity_values (quadrature.n_quadrature_points), + old_old_velocity_values (quadrature.n_quadrature_points), + + old_temperature_values (quadrature.n_quadrature_points), + old_old_temperature_values(quadrature.n_quadrature_points), + old_temperature_grads(quadrature.n_quadrature_points), + old_old_temperature_grads(quadrature.n_quadrature_points), + old_temperature_laplacians(quadrature.n_quadrature_points), + old_old_temperature_laplacians(quadrature.n_quadrature_points), + + gamma_values (quadrature.n_quadrature_points) + {} + + + template + TemperatureRHS:: + TemperatureRHS (const TemperatureRHS &scratch) + : + temperature_fe_values (scratch.temperature_fe_values.get_fe(), + scratch.temperature_fe_values.get_quadrature(), + scratch.temperature_fe_values.get_update_flags()), + stokes_fe_values (scratch.stokes_fe_values.get_fe(), + scratch.stokes_fe_values.get_quadrature(), + scratch.stokes_fe_values.get_update_flags()), + phi_T (scratch.phi_T), + grad_phi_T (scratch.grad_phi_T), + + old_velocity_values (scratch.old_velocity_values), + old_old_velocity_values (scratch.old_old_velocity_values), + + old_temperature_values (scratch.old_temperature_values), + old_old_temperature_values (scratch.old_old_temperature_values), + old_temperature_grads (scratch.old_temperature_grads), + old_old_temperature_grads (scratch.old_old_temperature_grads), + old_temperature_laplacians (scratch.old_temperature_laplacians), + old_old_temperature_laplacians (scratch.old_old_temperature_laplacians), + + gamma_values (scratch.gamma_values) + {} + } + + namespace CopyData + { + template + struct StokesPreconditioner + { + StokesPreconditioner (const FiniteElement &stokes_fe); + StokesPreconditioner (const StokesPreconditioner &data); + + FullMatrix local_matrix; + std::vector local_dof_indices; + }; + + template + StokesPreconditioner:: + StokesPreconditioner (const FiniteElement &stokes_fe) + : + local_matrix (stokes_fe.dofs_per_cell, + stokes_fe.dofs_per_cell), + local_dof_indices (stokes_fe.dofs_per_cell) + {} + + + + template + StokesPreconditioner:: + StokesPreconditioner (const StokesPreconditioner &data) + : + local_matrix (data.local_matrix), + local_dof_indices (data.local_dof_indices) + {} + + + + template + struct StokesSystem : public StokesPreconditioner + { + StokesSystem (const FiniteElement &stokes_fe); + StokesSystem (const StokesSystem &data); + + Vector local_rhs; + }; + + + template + StokesSystem:: + StokesSystem (const FiniteElement &stokes_fe) + : + StokesPreconditioner (stokes_fe), + local_rhs (stokes_fe.dofs_per_cell) + {} + + + template + StokesSystem:: + StokesSystem (const StokesSystem &data) + : + StokesPreconditioner (data), + local_rhs (data.local_rhs) + {} + + + + template + struct TemperatureMatrix + { + TemperatureMatrix (const FiniteElement &temperature_fe); + TemperatureMatrix (const TemperatureMatrix &data); + + FullMatrix local_mass_matrix; + FullMatrix local_stiffness_matrix; + std::vector local_dof_indices; + }; + + template + TemperatureMatrix:: + TemperatureMatrix (const FiniteElement &temperature_fe) + : + local_mass_matrix (temperature_fe.dofs_per_cell, + temperature_fe.dofs_per_cell), + local_stiffness_matrix (temperature_fe.dofs_per_cell, + temperature_fe.dofs_per_cell), + local_dof_indices (temperature_fe.dofs_per_cell) + {} + + + template + TemperatureMatrix:: + TemperatureMatrix (const TemperatureMatrix &data) + : + local_mass_matrix (data.local_mass_matrix), + local_stiffness_matrix (data.local_stiffness_matrix), + local_dof_indices (data.local_dof_indices) + {} + + + template + struct TemperatureRHS + { + TemperatureRHS (const FiniteElement &temperature_fe); + TemperatureRHS (const TemperatureRHS &data); + + Vector local_rhs; + std::vector local_dof_indices; + }; + + template + TemperatureRHS:: + TemperatureRHS (const FiniteElement &temperature_fe) + : + local_rhs (temperature_fe.dofs_per_cell), + local_dof_indices (temperature_fe.dofs_per_cell) + {} + + + template + TemperatureRHS:: + TemperatureRHS (const TemperatureRHS &data) + : + local_rhs (data.local_rhs), + local_dof_indices (data.local_dof_indices) + {} + } +} + + + // @sect3{The BoussinesqFlowProblem class template} template class BoussinesqFlowProblem @@ -281,7 +633,7 @@ class BoussinesqFlowProblem TrilinosWrappers::BlockSparseMatrix stokes_matrix; TrilinosWrappers::BlockSparseMatrix stokes_preconditioner_matrix; - TrilinosWrappers::MPI::BlockVector stokes_solution; + TrilinosWrappers::BlockVector stokes_solution; TrilinosWrappers::BlockVector old_stokes_solution; TrilinosWrappers::MPI::BlockVector stokes_rhs; @@ -296,7 +648,7 @@ class BoussinesqFlowProblem TrilinosWrappers::SparseMatrix temperature_stiffness_matrix; TrilinosWrappers::SparseMatrix temperature_matrix; - TrilinosWrappers::MPI::Vector temperature_solution; + TrilinosWrappers::Vector temperature_solution; TrilinosWrappers::Vector old_temperature_solution; TrilinosWrappers::Vector old_old_temperature_solution; TrilinosWrappers::MPI::Vector temperature_rhs; @@ -307,8 +659,8 @@ class BoussinesqFlowProblem unsigned int timestep_number; std_cxx1x::shared_ptr Amg_preconditioner; - std_cxx1x::shared_ptr Mp_preconditioner; - std_cxx1x::shared_ptr T_preconditioner; + std_cxx1x::shared_ptr Mp_preconditioner; + std_cxx1x::shared_ptr T_preconditioner; bool rebuild_stokes_matrix; bool rebuild_stokes_preconditioner; @@ -316,6 +668,44 @@ class BoussinesqFlowProblem bool rebuild_temperature_preconditioner; TimerOutput computing_timer; + + void setup_stokes_matrix (); + void setup_stokes_preconditioner (); + void setup_temperature_matrices (); + + void + local_assemble_stokes_preconditioner (const typename DoFHandler::active_cell_iterator &cell, + Assembly::Scratch::StokesPreconditioner &scratch, + Assembly::CopyData::StokesPreconditioner &data); + + void copy_local_to_global_stokes_preconditioner (const Assembly::CopyData::StokesPreconditioner &data); + + + void + local_assemble_stokes_system (const typename DoFHandler::active_cell_iterator &cell, + Assembly::Scratch::StokesSystem &scratch, + Assembly::CopyData::StokesSystem &data); + + void copy_local_to_global_stokes_system (const Assembly::CopyData::StokesSystem &data); + + + void + local_assemble_temperature_matrix (const typename DoFHandler::active_cell_iterator &cell, + Assembly::Scratch::TemperatureMatrix &scratch, + Assembly::CopyData::TemperatureMatrix &data); + + void copy_local_to_global_temperature_matrix (const Assembly::CopyData::TemperatureMatrix &data); + + + + void + local_assemble_temperature_rhs (const std::pair global_T_range, + const double global_max_velocity, + const typename DoFHandler::active_cell_iterator &cell, + Assembly::Scratch::TemperatureRHS &scratch, + Assembly::CopyData::TemperatureRHS &data); + + void copy_local_to_global_temperature_rhs (const Assembly::CopyData::TemperatureRHS &data); }; @@ -326,8 +716,7 @@ template BoussinesqFlowProblem::BoussinesqFlowProblem () : trilinos_communicator (Utilities::Trilinos::comm_world()), - pcout (std::cout, - Utilities::Trilinos::get_this_mpi_process(trilinos_communicator)==0), + pcout (std::cout, Utilities::Trilinos::get_this_mpi_process(trilinos_communicator)==0), triangulation (Triangulation::maximum_smoothing), @@ -363,8 +752,6 @@ double BoussinesqFlowProblem::get_maximal_velocity () const stokes_degree+1); const unsigned int n_q_points = quadrature_formula.size(); - BlockVector localized_stokes_solution (stokes_solution); - FEValues fe_values (stokes_fe, quadrature_formula, update_values); std::vector > velocity_values(n_q_points); @@ -380,7 +767,7 @@ double BoussinesqFlowProblem::get_maximal_velocity () const Utilities::Trilinos::get_this_mpi_process(trilinos_communicator)) { fe_values.reinit (cell); - fe_values[velocities].get_function_values (localized_stokes_solution, + fe_values[velocities].get_function_values (stokes_solution, velocity_values); for (unsigned int q=0; q &old_temperature, } +template +void BoussinesqFlowProblem::setup_stokes_matrix () +{ + stokes_matrix.clear (); + + TrilinosWrappers::BlockSparsityPattern sp (stokes_partitioner); + + Table<2,DoFTools::Coupling> coupling (dim+1, dim+1); + + for (unsigned int c=0; cNumMyRows() + << ", nnz: " + << stokes_matrix.block(0,0).matrix->NumMyNonzeros() + << std::endl;*/ + +} + + + +template +void BoussinesqFlowProblem::setup_stokes_preconditioner () +{ + Amg_preconditioner.reset (); + Mp_preconditioner.reset (); + + stokes_preconditioner_matrix.clear (); + + TrilinosWrappers::BlockSparsityPattern sp (stokes_partitioner); + + Table<2,DoFTools::Coupling> coupling (dim+1, dim+1); + for (unsigned int c=0; c +void BoussinesqFlowProblem::setup_temperature_matrices () +{ + T_preconditioner.reset (); + temperature_mass_matrix.clear (); + temperature_stiffness_matrix.clear (); + temperature_matrix.clear (); + + TrilinosWrappers::SparsityPattern sp (temperature_partitioner); + DoFTools::make_sparsity_pattern (temperature_dof_handler, sp, + temperature_constraints, false, + Utilities::Trilinos:: + get_this_mpi_process(trilinos_communicator)); + sp.compress(); + + temperature_matrix.reinit (sp); + temperature_mass_matrix.reinit (sp); + temperature_stiffness_matrix.reinit (sp); +} + + // @sect4{BoussinesqFlowProblem::setup_dofs} @@ -561,7 +1031,8 @@ void BoussinesqFlowProblem::setup_dofs () std::vector stokes_sub_blocks (dim+1,0); stokes_sub_blocks[dim] = 1; - GridTools::partition_triangulation (Utilities::Trilinos::get_n_mpi_processes(trilinos_communicator), + GridTools::partition_triangulation (Utilities::Trilinos:: + get_n_mpi_processes(trilinos_communicator), triangulation); { @@ -588,7 +1059,7 @@ void BoussinesqFlowProblem::setup_dofs () temperature_constraints); temperature_constraints.close (); } - + std::vector stokes_dofs_per_block (2); DoFTools::count_dofs_per_block (stokes_dof_handler, stokes_dofs_per_block, stokes_sub_blocks); @@ -628,53 +1099,7 @@ void BoussinesqFlowProblem::setup_dofs () Epetra_Map map_p(n_p, n_local_pressures, 0, trilinos_communicator); stokes_partitioner.push_back (map_p); } - { - stokes_matrix.clear (); - - TrilinosWrappers::BlockSparsityPattern sp (stokes_partitioner); - - Table<2,DoFTools::Coupling> coupling (dim+1, dim+1); - - for (unsigned int c=0; c coupling (dim+1, dim+1); - for (unsigned int c=0; c::setup_dofs () Utilities::Trilinos::get_this_mpi_process(trilinos_communicator)), 0, trilinos_communicator); - { - T_preconditioner.reset (); - temperature_mass_matrix.clear (); - temperature_stiffness_matrix.clear (); - temperature_matrix.clear (); - - TrilinosWrappers::SparsityPattern sp (temperature_partitioner); - DoFTools::make_sparsity_pattern (temperature_dof_handler, sp, - temperature_constraints, false, - Utilities::Trilinos:: - get_this_mpi_process(trilinos_communicator)); - sp.compress(); - - temperature_matrix.reinit (sp); - temperature_mass_matrix.reinit (sp); - temperature_stiffness_matrix.reinit (sp); - } + + if (Utilities::Trilinos::get_n_mpi_processes(trilinos_communicator) == 1) + { + Threads::TaskGroup<> tasks; + tasks += Threads::new_task (&BoussinesqFlowProblem::setup_stokes_matrix, + *this); + tasks += Threads::new_task (&BoussinesqFlowProblem::setup_stokes_preconditioner, + *this); + tasks += Threads::new_task (&BoussinesqFlowProblem::setup_temperature_matrices, + *this); + tasks.join_all (); + } + else + { + setup_stokes_matrix (); + setup_stokes_preconditioner (); + setup_temperature_matrices (); + } stokes_solution.reinit (stokes_partitioner); old_stokes_solution.reinit (stokes_partitioner); @@ -716,68 +1142,91 @@ void BoussinesqFlowProblem::setup_dofs () template void -BoussinesqFlowProblem::assemble_stokes_preconditioner () +BoussinesqFlowProblem:: +local_assemble_stokes_preconditioner (const typename DoFHandler::active_cell_iterator &cell, + Assembly::Scratch::StokesPreconditioner &scratch, + Assembly::CopyData::StokesPreconditioner &data) { - stokes_preconditioner_matrix = 0; - - const QGauss quadrature_formula (stokes_degree+2); - FEValues stokes_fe_values (stokes_fe, quadrature_formula, - update_JxW_values | - update_values | - update_gradients); const unsigned int dofs_per_cell = stokes_fe.dofs_per_cell; + const unsigned int n_q_points = scratch.stokes_fe_values.n_quadrature_points; - const unsigned int n_q_points = quadrature_formula.size(); + const FEValuesExtractors::Vector velocities (0); + const FEValuesExtractors::Scalar pressure (dim); - FullMatrix local_matrix (dofs_per_cell, dofs_per_cell); - std::vector local_dof_indices (dofs_per_cell); + scratch.stokes_fe_values.reinit (cell); + cell->get_dof_indices (data.local_dof_indices); - std::vector > phi_grad_u (dofs_per_cell); - std::vector phi_p (dofs_per_cell); + data.local_matrix = 0; - const FEValuesExtractors::Vector velocities (0); - const FEValuesExtractors::Scalar pressure (dim); + for (unsigned int q=0; q::active_cell_iterator - cell = stokes_dof_handler.begin_active(), - endc = stokes_dof_handler.end(); - for (; cell!=endc; ++cell) - if (cell->subdomain_id() == - Utilities::Trilinos::get_this_mpi_process(trilinos_communicator)) - { - stokes_fe_values.reinit (cell); - // only need to recalculate local matrix - // if FEValues data has changed. - if (stokes_fe_values.get_cell_similarity() != CellSimilarity::translation) - { - local_matrix = 0; - for (unsigned int q=0; q +void +BoussinesqFlowProblem:: +copy_local_to_global_stokes_preconditioner (const Assembly::CopyData::StokesPreconditioner &data) +{ + stokes_constraints.distribute_local_to_global (data.local_matrix, + data.local_dof_indices, + stokes_preconditioner_matrix); +} - cell->get_dof_indices (local_dof_indices); - stokes_constraints.distribute_local_to_global (local_matrix, - local_dof_indices, - stokes_preconditioner_matrix); - } +template +void +BoussinesqFlowProblem::assemble_stokes_preconditioner () +{ + stokes_preconditioner_matrix = 0; + + const QGauss quadrature_formula(stokes_degree+2); + + typedef + FilteredIterator::active_cell_iterator> + SubdomainFilter; + + WorkStream:: + run (SubdomainFilter (IteratorFilters::SubdomainEqualTo + (Utilities::Trilinos::get_this_mpi_process(trilinos_communicator)), + stokes_dof_handler.begin_active()), + SubdomainFilter (IteratorFilters::SubdomainEqualTo + (Utilities::Trilinos::get_this_mpi_process(trilinos_communicator)), + stokes_dof_handler.end()), + std_cxx1x::bind (&BoussinesqFlowProblem:: + local_assemble_stokes_preconditioner, + this, + _1, + _2, + _3), + std_cxx1x::bind (&BoussinesqFlowProblem:: + copy_local_to_global_stokes_preconditioner, + this, + _1), + Assembly::Scratch:: + StokesPreconditioner (stokes_fe, quadrature_formula, + update_JxW_values | + update_values | + update_gradients), + Assembly::CopyData:: + StokesPreconditioner (stokes_fe)); + stokes_preconditioner_matrix.compress(); } @@ -795,29 +1244,27 @@ BoussinesqFlowProblem::build_stokes_preconditioner () assemble_stokes_preconditioner (); - Amg_preconditioner = std_cxx1x::shared_ptr - (new TrilinosWrappers::PreconditionAMG()); - std::vector > constant_modes; std::vector velocity_components (dim+1,true); velocity_components[dim] = false; DoFTools::extract_constant_modes (stokes_dof_handler, velocity_components, constant_modes); - TrilinosWrappers::PreconditionAMG::AdditionalData amg_data; - amg_data.constant_modes = constant_modes; - amg_data.elliptic = true; - amg_data.higher_order_elements = true; - amg_data.smoother_sweeps = 2; - amg_data.aggregation_threshold = 0.02; - - Amg_preconditioner->initialize(stokes_preconditioner_matrix.block(0,0), - amg_data); + Mp_preconditioner = std_cxx1x::shared_ptr + (new TrilinosWrappers::PreconditionILU()); + Amg_preconditioner = std_cxx1x::shared_ptr + (new TrilinosWrappers::PreconditionAMG()); - Mp_preconditioner = std_cxx1x::shared_ptr - (new TrilinosWrappers::PreconditionILU()); + TrilinosWrappers::PreconditionAMG::AdditionalData Amg_data; + Amg_data.constant_modes = constant_modes; + Amg_data.elliptic = true; + Amg_data.higher_order_elements = true; + Amg_data.smoother_sweeps = 2; + Amg_data.aggregation_threshold = 0.02; Mp_preconditioner->initialize (stokes_preconditioner_matrix.block(1,1)); + Amg_preconditioner->initialize (stokes_preconditioner_matrix.block(0,0), + Amg_data); rebuild_stokes_preconditioner = false; @@ -827,7 +1274,92 @@ BoussinesqFlowProblem::build_stokes_preconditioner () - // @sect4{BoussinesqFlowProblem::assemble_stokes_system} +template +void +BoussinesqFlowProblem:: +local_assemble_stokes_system (const typename DoFHandler::active_cell_iterator &cell, + Assembly::Scratch::StokesSystem &scratch, + Assembly::CopyData::StokesSystem &data) +{ + const unsigned int dofs_per_cell = scratch.stokes_fe_values.get_fe().dofs_per_cell; + const unsigned int n_q_points = scratch.stokes_fe_values.n_quadrature_points; + + const FEValuesExtractors::Vector velocities (0); + const FEValuesExtractors::Scalar pressure (dim); + + scratch.stokes_fe_values.reinit (cell); + + typename DoFHandler::active_cell_iterator + temperature_cell (&triangulation, + cell->level(), + cell->index(), + &temperature_dof_handler); + scratch.temperature_fe_values.reinit (temperature_cell); + + if (rebuild_stokes_matrix) + data.local_matrix = 0; + data.local_rhs = 0; + + scratch.temperature_fe_values.get_function_values (old_temperature_solution, + scratch.old_temperature_values); + + for (unsigned int q=0; q gravity = ( (dim == 2) ? (Point (0,1)) : + (Point (0,0,1)) ); + for (unsigned int i=0; iget_dof_indices (data.local_dof_indices); +} + + + +template +void +BoussinesqFlowProblem:: +copy_local_to_global_stokes_system (const Assembly::CopyData::StokesSystem &data) +{ + if (rebuild_stokes_matrix == true) + stokes_constraints.distribute_local_to_global (data.local_matrix, + data.local_rhs, + data.local_dof_indices, + stokes_matrix, + stokes_rhs); + else + stokes_constraints.distribute_local_to_global (data.local_rhs, + data.local_dof_indices, + stokes_rhs); +} + + + +// @sect4{BoussinesqFlowProblem::assemble_stokes_system} template void BoussinesqFlowProblem::assemble_stokes_system () { @@ -841,121 +1373,113 @@ void BoussinesqFlowProblem::assemble_stokes_system () stokes_rhs=0; const QGauss quadrature_formula(stokes_degree+2); - FEValues stokes_fe_values (stokes_fe, quadrature_formula, - update_values | - update_quadrature_points | - update_JxW_values | - (rebuild_stokes_matrix == true - ? - update_gradients - : - UpdateFlags(0))); + + typedef + FilteredIterator::active_cell_iterator> + SubdomainFilter; + + WorkStream:: + run (SubdomainFilter (IteratorFilters::SubdomainEqualTo + (Utilities::Trilinos::get_this_mpi_process(trilinos_communicator)), + stokes_dof_handler.begin_active()), + SubdomainFilter (IteratorFilters::SubdomainEqualTo + (Utilities::Trilinos::get_this_mpi_process(trilinos_communicator)), + stokes_dof_handler.end()), + std_cxx1x::bind (&BoussinesqFlowProblem:: + local_assemble_stokes_system, + this, + _1, + _2, + _3), + std_cxx1x::bind (&BoussinesqFlowProblem:: + copy_local_to_global_stokes_system, + this, + _1), + Assembly::Scratch:: + StokesSystem (stokes_fe, quadrature_formula, + (update_values | + update_quadrature_points | + update_JxW_values | + (rebuild_stokes_matrix == true + ? + update_gradients + : + UpdateFlags(0))), + temperature_fe, + update_values), + Assembly::CopyData:: + StokesSystem (stokes_fe)); - FEValues temperature_fe_values (temperature_fe, quadrature_formula, - update_values); + stokes_matrix.compress(); + stokes_rhs.compress(); - const unsigned int dofs_per_cell = stokes_fe.dofs_per_cell; + rebuild_stokes_matrix = false; + + pcout << std::endl; + computing_timer.exit_section(); +} - const unsigned int n_q_points = quadrature_formula.size(); - FullMatrix local_matrix (dofs_per_cell, dofs_per_cell); - Vector local_rhs (dofs_per_cell); - std::vector local_dof_indices (dofs_per_cell); - std::vector old_temperature_values(n_q_points); - std::vector > phi_u (dofs_per_cell); - std::vector > grads_phi_u (dofs_per_cell); - std::vector div_phi_u (dofs_per_cell); - std::vector phi_p (dofs_per_cell); - const FEValuesExtractors::Vector velocities (0); - const FEValuesExtractors::Scalar pressure (dim); + // @sect4{BoussinesqFlowProblem::assemble_temperature_system} +template +void BoussinesqFlowProblem:: +local_assemble_temperature_matrix (const typename DoFHandler::active_cell_iterator &cell, + Assembly::Scratch::TemperatureMatrix &scratch, + Assembly::CopyData::TemperatureMatrix &data) +{ + const unsigned int dofs_per_cell = scratch.temperature_fe_values.get_fe().dofs_per_cell; + const unsigned int n_q_points = scratch.temperature_fe_values.n_quadrature_points; - typename DoFHandler::active_cell_iterator - cell = stokes_dof_handler.begin_active(), - endc = stokes_dof_handler.end(); - typename DoFHandler::active_cell_iterator - temperature_cell = temperature_dof_handler.begin_active(); + scratch.temperature_fe_values.reinit (cell); + cell->get_dof_indices (data.local_dof_indices); + + data.local_mass_matrix = 0; + data.local_stiffness_matrix = 0; - for (; cell!=endc; ++cell, ++temperature_cell) - if (cell->subdomain_id() == - Utilities::Trilinos::get_this_mpi_process(trilinos_communicator)) - { - stokes_fe_values.reinit (cell); - temperature_fe_values.reinit (temperature_cell); - if (stokes_fe_values.get_cell_similarity() != CellSimilarity::translation) - local_matrix = 0; - local_rhs = 0; - - temperature_fe_values.get_function_values (old_temperature_solution, - old_temperature_values); - - for (unsigned int q=0; q gravity = ( (dim == 2) ? (Point (0,1)) : - (Point (0,0,1)) ); - for (unsigned int i=0; iget_dof_indices (local_dof_indices); - - if (rebuild_stokes_matrix == true) - stokes_constraints.distribute_local_to_global (local_matrix, - local_rhs, - local_dof_indices, - stokes_matrix, - stokes_rhs); - else - stokes_constraints.distribute_local_to_global (local_rhs, - local_dof_indices, - stokes_rhs); - } - - stokes_matrix.compress(); - stokes_rhs.compress(); - - rebuild_stokes_matrix = false; - - pcout << std::endl; - computing_timer.exit_section(); + } } +template +void +BoussinesqFlowProblem:: +copy_local_to_global_temperature_matrix (const Assembly::CopyData::TemperatureMatrix &data) +{ + temperature_constraints.distribute_local_to_global (data.local_mass_matrix, + data.local_dof_indices, + temperature_mass_matrix); + temperature_constraints.distribute_local_to_global (data.local_stiffness_matrix, + data.local_dof_indices, + temperature_stiffness_matrix); +} - - // @sect4{BoussinesqFlowProblem::assemble_temperature_system} template void BoussinesqFlowProblem::assemble_temperature_matrix () { @@ -967,80 +1491,177 @@ void BoussinesqFlowProblem::assemble_temperature_matrix () temperature_stiffness_matrix = 0; const QGauss quadrature_formula(temperature_degree+2); - FEValues temperature_fe_values (temperature_fe, quadrature_formula, - update_values | update_gradients | - update_JxW_values); - const unsigned int dofs_per_cell = temperature_fe.dofs_per_cell; - const unsigned int n_q_points = quadrature_formula.size(); + typedef + FilteredIterator::active_cell_iterator> + SubdomainFilter; + + WorkStream:: + run (SubdomainFilter (IteratorFilters::SubdomainEqualTo + (Utilities::Trilinos::get_this_mpi_process(trilinos_communicator)), + temperature_dof_handler.begin_active()), + SubdomainFilter (IteratorFilters::SubdomainEqualTo + (Utilities::Trilinos::get_this_mpi_process(trilinos_communicator)), + temperature_dof_handler.end()), + std_cxx1x::bind (&BoussinesqFlowProblem:: + local_assemble_temperature_matrix, + this, + _1, + _2, + _3), + std_cxx1x::bind (&BoussinesqFlowProblem:: + copy_local_to_global_temperature_matrix, + this, + _1), + Assembly::Scratch:: + TemperatureMatrix (temperature_fe, quadrature_formula), + Assembly::CopyData:: + TemperatureMatrix (temperature_fe)); + + temperature_mass_matrix.compress(); + temperature_stiffness_matrix.compress(); + + rebuild_temperature_matrices = false; + rebuild_temperature_preconditioner = true; + + computing_timer.exit_section(); +} + + - FullMatrix local_mass_matrix (dofs_per_cell, dofs_per_cell); - FullMatrix local_stiffness_matrix (dofs_per_cell, dofs_per_cell); +template +void BoussinesqFlowProblem:: +local_assemble_temperature_rhs (const std::pair global_T_range, + const double global_max_velocity, + const typename DoFHandler::active_cell_iterator &cell, + Assembly::Scratch::TemperatureRHS &scratch, + Assembly::CopyData::TemperatureRHS &data) +{ + const bool use_bdf2_scheme = (timestep_number != 0); - std::vector local_dof_indices (dofs_per_cell); + const unsigned int dofs_per_cell = scratch.temperature_fe_values.get_fe().dofs_per_cell; + const unsigned int n_q_points = scratch.temperature_fe_values.n_quadrature_points; - std::vector phi_T (dofs_per_cell); - std::vector > grad_phi_T (dofs_per_cell); + EquationData::TemperatureRightHandSide temperature_right_hand_side; + + const FEValuesExtractors::Vector velocities (0); + + data.local_rhs = 0; + + scratch.temperature_fe_values.reinit (cell); typename DoFHandler::active_cell_iterator - cell = temperature_dof_handler.begin_active(), - endc = temperature_dof_handler.end(); - for (; cell!=endc; ++cell) - if (cell->subdomain_id() == - Utilities::Trilinos::get_this_mpi_process(trilinos_communicator) ) - { - temperature_fe_values.reinit (cell); - - if (temperature_fe_values.get_cell_similarity() != CellSimilarity::translation) - { - local_mass_matrix = 0; - local_stiffness_matrix = 0; + stokes_cell (&triangulation, + cell->level(), + cell->index(), + &stokes_dof_handler); + scratch.stokes_fe_values.reinit (stokes_cell); - for (unsigned int q=0; qget_dof_indices (local_dof_indices); + const double nu + = compute_viscosity (scratch.old_temperature_values, + scratch.old_old_temperature_values, + scratch.old_temperature_grads, + scratch.old_old_temperature_grads, + scratch.old_temperature_laplacians, + scratch.old_old_temperature_laplacians, + scratch.old_velocity_values, + scratch.old_old_velocity_values, + scratch.gamma_values, + global_max_velocity, + global_T_range.second - global_T_range.first, + cell->diameter()); + + for (unsigned int q=0; q ext_grad_T + = (use_bdf2_scheme ? + (scratch.old_temperature_grads[q] * + (1+time_step/old_time_step) + - + scratch.old_old_temperature_grads[q] * + time_step / old_time_step) + : + scratch.old_temperature_grads[q]); + + const Tensor<1,dim> extrapolated_u + = (use_bdf2_scheme ? + (scratch.old_velocity_values[q] * (1+time_step/old_time_step) - + scratch.old_old_velocity_values[q] * time_step/old_time_step) + : + scratch.old_velocity_values[q]); + + for (unsigned int i=0; iget_dof_indices (data.local_dof_indices); +} - rebuild_temperature_matrices = false; - rebuild_temperature_preconditioner = true; - computing_timer.exit_section(); +template +void +BoussinesqFlowProblem:: +copy_local_to_global_temperature_rhs (const Assembly::CopyData::TemperatureRHS &data) +{ + temperature_constraints.distribute_local_to_global (data.local_rhs, + data.local_dof_indices, + temperature_rhs); } - template void BoussinesqFlowProblem::assemble_temperature_system (const double maximal_velocity) { @@ -1072,159 +1693,36 @@ void BoussinesqFlowProblem::assemble_temperature_system (const double maxim temperature_rhs = 0; const QGauss quadrature_formula(temperature_degree+2); - FEValues temperature_fe_values (temperature_fe, quadrature_formula, - update_values | - update_gradients | - update_hessians | - update_quadrature_points | - update_JxW_values); - FEValues stokes_fe_values (stokes_fe, quadrature_formula, - update_values); - - const unsigned int dofs_per_cell = temperature_fe.dofs_per_cell; - const unsigned int n_q_points = quadrature_formula.size(); - - Vector local_rhs (dofs_per_cell); - FullMatrix local_matrix (dofs_per_cell, dofs_per_cell); - - std::vector local_dof_indices (dofs_per_cell); - - std::vector > old_velocity_values (n_q_points); - std::vector > old_old_velocity_values (n_q_points); - - std::vector old_temperature_values (n_q_points); - std::vector old_old_temperature_values(n_q_points); - std::vector > old_temperature_grads(n_q_points); - std::vector > old_old_temperature_grads(n_q_points); - std::vector old_temperature_laplacians(n_q_points); - std::vector old_old_temperature_laplacians(n_q_points); - - - EquationData::TemperatureRightHandSide temperature_right_hand_side; - std::vector gamma_values (n_q_points); - - std::vector phi_T (dofs_per_cell); - std::vector > grad_phi_T (dofs_per_cell); - const std::pair global_T_range = get_extrapolated_temperature_range(); - const TrilinosWrappers::BlockVector - localized_stokes_solution (stokes_solution); - const TrilinosWrappers::BlockVector - localized_old_stokes_solution (old_stokes_solution); - - const FEValuesExtractors::Vector velocities (0); - - typename DoFHandler::active_cell_iterator - cell = temperature_dof_handler.begin_active(), - endc = temperature_dof_handler.end(); - typename DoFHandler::active_cell_iterator - stokes_cell = stokes_dof_handler.begin_active(); - - for (; cell!=endc; ++cell, ++stokes_cell) - if (cell->subdomain_id() == - Utilities::Trilinos::get_this_mpi_process(trilinos_communicator) ) - { - local_rhs = 0; - - temperature_fe_values.reinit (cell); - stokes_fe_values.reinit (stokes_cell); - - temperature_fe_values.get_function_values (old_temperature_solution, - old_temperature_values); - temperature_fe_values.get_function_values (old_old_temperature_solution, - old_old_temperature_values); - - temperature_fe_values.get_function_gradients (old_temperature_solution, - old_temperature_grads); - temperature_fe_values.get_function_gradients (old_old_temperature_solution, - old_old_temperature_grads); - - temperature_fe_values.get_function_laplacians (old_temperature_solution, - old_temperature_laplacians); - temperature_fe_values.get_function_laplacians (old_old_temperature_solution, - old_old_temperature_laplacians); - - temperature_right_hand_side.value_list (temperature_fe_values.get_quadrature_points(), - gamma_values); - - stokes_fe_values[velocities].get_function_values (localized_stokes_solution, - old_velocity_values); - stokes_fe_values[velocities].get_function_values (localized_old_stokes_solution, - old_old_velocity_values); - - const double nu - = compute_viscosity (old_temperature_values, - old_old_temperature_values, - old_temperature_grads, - old_old_temperature_grads, - old_temperature_laplacians, - old_old_temperature_laplacians, - old_velocity_values, - old_old_velocity_values, - gamma_values, - maximal_velocity, - global_T_range.second - global_T_range.first, - cell->diameter()); - - for (unsigned int q=0; q ext_grad_T - = (use_bdf2_scheme ? - (old_temperature_grads[q] * - (1+time_step/old_time_step) - - - old_old_temperature_grads[q] * - time_step / old_time_step) - : - old_temperature_grads[q]); - - const Tensor<1,dim> extrapolated_u - = (use_bdf2_scheme ? - (old_velocity_values[q] * (1+time_step/old_time_step) - - old_old_velocity_values[q] * time_step/old_time_step) - : - old_velocity_values[q]); - - for (unsigned int i=0; iget_dof_indices (local_dof_indices); - temperature_constraints.distribute_local_to_global (local_rhs, - local_dof_indices, - temperature_rhs); - } + typedef + FilteredIterator::active_cell_iterator> + SubdomainFilter; + + WorkStream:: + run (SubdomainFilter (IteratorFilters::SubdomainEqualTo + (Utilities::Trilinos::get_this_mpi_process(trilinos_communicator)), + temperature_dof_handler.begin_active()), + SubdomainFilter (IteratorFilters::SubdomainEqualTo + (Utilities::Trilinos::get_this_mpi_process(trilinos_communicator)), + temperature_dof_handler.end()), + std_cxx1x::bind (&BoussinesqFlowProblem:: + local_assemble_temperature_rhs, + this, + global_T_range, + maximal_velocity, + _1, + _2, + _3), + std_cxx1x::bind (&BoussinesqFlowProblem:: + copy_local_to_global_temperature_rhs, + this, + _1), + Assembly::Scratch:: + TemperatureRHS (temperature_fe, stokes_fe, quadrature_formula), + Assembly::CopyData:: + TemperatureRHS (temperature_fe)); temperature_rhs.compress(); } @@ -1320,29 +1818,35 @@ void BoussinesqFlowProblem::solve () SolverControl solver_control (stokes_matrix.m(), 1e-6*stokes_rhs.l2_norm()); - SolverGMRES - gmres (solver_control, - SolverGMRES::AdditionalData(100)); + SolverBicgstab + bicgstab (solver_control, false); + + TrilinosWrappers::MPI::BlockVector + distributed_stokes_solution (stokes_partitioner); + distributed_stokes_solution = stokes_solution; // now treat the hanging nodes correctly. - const unsigned int start = stokes_solution.block(1).local_range().first + - stokes_solution.block(0).size(); - const unsigned int end = stokes_solution.block(1).local_range().second + - stokes_solution.block(0).size(); + const unsigned int start = + distributed_stokes_solution.block(1).local_range().first + + distributed_stokes_solution.block(0).size(); + const unsigned int end = + distributed_stokes_solution.block(1).local_range().second + + distributed_stokes_solution.block(0).size(); for (unsigned int i=start; i::solve () 1e-8*temperature_rhs.l2_norm()); SolverCG cg (solver_control); - cg.solve (temperature_matrix, temperature_solution, + TrilinosWrappers::MPI::Vector + distributed_temperature_solution (temperature_partitioner); + distributed_temperature_solution = temperature_solution; + + cg.solve (temperature_matrix, distributed_temperature_solution, temperature_rhs, *T_preconditioner); - TrilinosWrappers::Vector localized_temperature_solution (temperature_solution); - temperature_constraints.distribute (localized_temperature_solution); - temperature_solution = localized_temperature_solution; + temperature_solution = distributed_temperature_solution; + temperature_constraints.distribute (temperature_solution); pcout << " " << solver_control.last_step() << " CG iterations for temperature" << std::endl; computing_timer.exit_section(); - double min_temperature = localized_temperature_solution(0), - max_temperature = localized_temperature_solution(0); + double min_temperature = temperature_solution(0), + max_temperature = temperature_solution(0); for (unsigned int i=1; i (min_temperature, - localized_temperature_solution(i)); + temperature_solution(i)); max_temperature = std::max (max_temperature, - localized_temperature_solution(i)); + temperature_solution(i)); } pcout << " Temperature range: " @@ -1418,8 +1925,6 @@ void BoussinesqFlowProblem::output_results () const ExcInternalError()); Vector joint_solution (joint_dof_handler.n_dofs()); - TrilinosWrappers::BlockVector localized_stokes_solution (stokes_solution); - TrilinosWrappers::Vector localized_temperature_solution (temperature_solution); if (Utilities::Trilinos::get_this_mpi_process(trilinos_communicator) == 0) { @@ -1448,7 +1953,7 @@ void BoussinesqFlowProblem::output_results () const local_stokes_dof_indices.size(), ExcInternalError()); joint_solution(local_joint_dof_indices[i]) - = localized_stokes_solution(local_stokes_dof_indices[joint_fe.system_to_base_index(i).second]); + = stokes_solution(local_stokes_dof_indices[joint_fe.system_to_base_index(i).second]); } else { @@ -1459,7 +1964,7 @@ void BoussinesqFlowProblem::output_results () const local_stokes_dof_indices.size(), ExcInternalError()); joint_solution(local_joint_dof_indices[i]) - = localized_temperature_solution(local_temperature_dof_indices[joint_fe.system_to_base_index(i).second]); + = temperature_solution(local_temperature_dof_indices[joint_fe.system_to_base_index(i).second]); } } } @@ -1501,12 +2006,10 @@ void BoussinesqFlowProblem::refine_mesh (const unsigned int max_grid_level) computing_timer.enter_section ("Refine mesh structure, part 1"); Vector estimated_error_per_cell (triangulation.n_active_cells()); - TrilinosWrappers::Vector localized_temperature_solution (temperature_solution); - KellyErrorEstimator::estimate (temperature_dof_handler, QGauss(temperature_degree+1), typename FunctionMap::type(), - localized_temperature_solution, + temperature_solution, estimated_error_per_cell); GridRefinement::refine_and_coarsen_fixed_fraction (triangulation, diff --git a/deal.II/examples/step-35/Makefile b/deal.II/examples/step-35/Makefile new file mode 100644 index 0000000000..5df8a9405b --- /dev/null +++ b/deal.II/examples/step-35/Makefile @@ -0,0 +1,154 @@ +# $Id: Makefile 16563 2008-08-15 16:08:28Z bangerth $ + + +# For the small projects Makefile, you basically need to fill in only +# four fields. +# +# The first is the name of the application. It is assumed that the +# application name is the same as the base file name of the single C++ +# file from which the application is generated. +target = step-35 + +# The second field determines whether you want to run your program in +# debug or optimized mode. The latter is significantly faster, but no +# run-time checking of parameters and internal states is performed, so +# you should set this value to `on' while you develop your program, +# and to `off' when running production computations. +debug-mode = on + + +# As third field, we need to give the path to the top-level deal.II +# directory. You need to adjust this to your needs. Since this path is +# probably the most often needed one in the Makefile internals, it is +# designated by a single-character variable, since that can be +# reference using $D only, i.e. without the parentheses that are +# required for most other parameters, as e.g. in $(target). +D = ../../ + + +# The last field specifies the names of data and other files that +# shall be deleted when calling `make clean'. Object and backup files, +# executables and the like are removed anyway. Here, we give a list of +# files in the various output formats that deal.II supports. +clean-up-files = *gmv *gnuplot *gpl *eps *pov *vtk + + + + +# +# +# Usually, you will not need to change anything beyond this point. +# +# +# The next statement tell the `make' program where to find the +# deal.II top level directory and to include the file with the global +# settings +include $D/common/Make.global_options + + +# Since the whole project consists of only one file, we need not +# consider difficult dependencies. We only have to declare the +# libraries which we want to link to the object file, and there need +# to be two sets of libraries: one for the debug mode version of the +# application and one for the optimized mode. Here we have selected +# the versions for 2d. Note that the order in which the libraries are +# given here is important and that your applications won't link +# properly if they are given in another order. +# +# You may need to augment the lists of libraries when compiling your +# program for other dimensions, or when using third party libraries +libs.g = $(lib-deal2-2d.g) \ + $(lib-lac.g) \ + $(lib-base.g) +libs.o = $(lib-deal2-2d.o) \ + $(lib-lac.o) \ + $(lib-base.o) + + +# We now use the variable defined above which switch between debug and +# optimized mode to select the set of libraries to link with. Included +# in the list of libraries is the name of the object file which we +# will produce from the single C++ file. Note that by default we use +# the extension .g.o for object files compiled in debug mode and .o for +# object files in optimized mode (or whatever the local default on your +# system is instead of .o). +ifeq ($(debug-mode),on) + libraries = $(target).g.$(OBJEXT) $(libs.g) +else + libraries = $(target).$(OBJEXT) $(libs.o) +endif + + +# Now comes the first production rule: how to link the single object +# file produced from the single C++ file into the executable. Since +# this is the first rule in the Makefile, it is the one `make' selects +# if you call it without arguments. +$(target) : $(libraries) + @echo ============================ Linking $@ + @$(CXX) -o $@$(EXEEXT) $^ $(LIBS) $(LDFLAGS) + + +# To make running the application somewhat independent of the actual +# program name, we usually declare a rule `run' which simply runs the +# program. You can then run it by typing `make run'. This is also +# useful if you want to call the executable with arguments which do +# not change frequently. You may then want to add them to the +# following rule: +run: $(target) + @echo ============================ Running $< + @./$(target)$(EXEEXT) + + +# As a last rule to the `make' program, we define what to do when +# cleaning up a directory. This usually involves deleting object files +# and other automatically created files such as the executable itself, +# backup files, and data files. Since the latter are not usually quite +# diverse, you needed to declare them at the top of this file. +clean: + -rm -f *.$(OBJEXT) *~ Makefile.dep $(target)$(EXEEXT) $(clean-up-files) + + +# Since we have not yet stated how to make an object file from a C++ +# file, we should do so now. Since the many flags passed to the +# compiler are usually not of much interest, we suppress the actual +# command line using the `at' sign in the first column of the rules +# and write the string indicating what we do instead. +./%.g.$(OBJEXT) : + @echo ==============debug========= $( $@ \ + || (rm -f $@ ; false) + @if test -s $@ ; then : else rm $@ ; fi + + +# To make the dependencies known to `make', we finally have to include +# them: +include Makefile.dep + + diff --git a/deal.II/examples/step-35/include/EqData.h b/deal.II/examples/step-35/include/EqData.h new file mode 100644 index 0000000000..d528e5d16c --- /dev/null +++ b/deal.II/examples/step-35/include/EqData.h @@ -0,0 +1,71 @@ +/* + Header file for the classes that define the exact solution + and driving force. + + These classes are much like any other Function class + so we are not going to comment on them + + by Abner Salgado. +*/ +#ifndef _EQ_DATA_H_ +#define _EQ_DATA_H_ + + +/* + We want to be able to select which component of vector + valued functions we are going to work on +*/ +#include "../include/MultiComponentFunction.h" + + +// this is dealii +#include + + +// We need to define sines and cosines +#include + + + +// This is ugly, we have to remove it +const double PI = std::acos( -1. ); + + + +// The Velocity function +template class Velocity: public Multi_Component_Function { + public: + Velocity(const double initial_time =0.0); + virtual double value(const Point &p, const unsigned int component = 0) const; + virtual Tensor<1,dim> gradient(const Point &p, const unsigned int component=0) const; + virtual void value_list( const std::vector< Point > &points, std::vector &values, + const unsigned int component = 0 ) const; + virtual void gradient_list( const std::vector< Point > &points, std::vector< Tensor<1,dim> > &gradients, + const unsigned int component = 0 ) const; +}; + + + +// The Pressure function +template class Pressure: public Function{ + public: + Pressure(const double initial_time = 0.0); + virtual double value(const Point &p, const unsigned int component = 0) const; + virtual Tensor<1,dim> gradient(const Point &p, const unsigned int component=0) const; + virtual void value_list( const std::vector< Point > &points, std::vector &values, + const unsigned int component = 0 ) const; + virtual void gradient_list( const std::vector< Point > &points, std::vector< Tensor<1,dim> > &gradients, + const unsigned int component = 0 ) const; +}; + + + +// The Force function +template class Force: public Multi_Component_Function{ + public: + Force( const double initial_time =0.0 ); + virtual double value( const Point &p, const unsigned int component = 0 ) const; + virtual void value_list( const std::vector< Point > &points, std::vector &values, const unsigned int component = 0 ) const; +}; + +#endif diff --git a/deal.II/examples/step-35/include/FileReader.h b/deal.II/examples/step-35/include/FileReader.h new file mode 100644 index 0000000000..4047aab37e --- /dev/null +++ b/deal.II/examples/step-35/include/FileReader.h @@ -0,0 +1,70 @@ +// This class is supposed to aid in the reading of parameter files +#ifndef _FILE_READER_H_ +#define _FILE_READER_H_ + + + +#include + + + +#include +#include + + + +using namespace dealii; + +enum Method_Formulation{ + METHOD_STANDARD, + METHOD_ROTATIONAL +}; + +class Data_Storage{ + public: + Data_Storage(); + ~Data_Storage(); + void read_data( char *filename ); + void print_usage(); + void print_status() const; + // The data itself + //// The type of method we want to use + Method_Formulation form; + //// physical data + double initial_time, + final_time, + Reynolds; + //// Time stepping data + double initial_dt, + final_dt, + dt_decrement; + //// Space discretization data + unsigned int n_of_global_refines, + pressure_degree; + //// Data to solve the velocity + unsigned int vel_max_iterations, + vel_Krylov_size, + vel_off_diagonals, + vel_update_prec; + double vel_eps, + vel_diag_strength; + //// Data to solve the projection + unsigned int proj_max_iterations, + proj_off_diagonals; + double proj_eps, + proj_diag_strength; + //// Data to do the pressure update step + unsigned int pres_max_iterations, + pres_off_diagonals; + double pres_eps, + pres_diag_strength; + //// Verbosity + bool verbose; + //// Frequency of the outputted data + unsigned int output; + + protected: + ParameterHandler prm; +}; + +#endif diff --git a/deal.II/examples/step-35/include/MultiComponentFunction.h b/deal.II/examples/step-35/include/MultiComponentFunction.h new file mode 100644 index 0000000000..22ba2939f4 --- /dev/null +++ b/deal.II/examples/step-35/include/MultiComponentFunction.h @@ -0,0 +1,51 @@ +/* + Header file for Multi Component Functions. + The whole purpose of this class to exist is so we do not have to write the function + void set_component( const unsigned int d ) + twice + + by Abner Salgado. +*/ +#ifndef _MULTI_COMPONENT_FUNCTION_H_ +#define _MULTI_COMPONENT_FUNCTION_H_ + + +/* + This is basically a workaround for vector valued functions + when we just want one of its components + so they are derived from the Function class +*/ +#include + + +using namespace dealii; + + +/* + The only funcitonality this class has is that it provides a common wrapper + for vector valued functions for the case when you only want to ask them for + one of their components +*/ +template class Multi_Component_Function: public Function { + public: + /* + Constructor. It does not do anything interesting but set the initial + time for the function to evaluate its values + */ + Multi_Component_Function( const double initial_time = 0. ); + /* + This is the whole reason this class exists. + It wouldn't seem logical to have this written twice. One for the + Velocity function and one for the Force function. + So we are instead going to derive these from this class + and so adding this functionality of selecting which component you + want to give the value of + */ + void set_component(const unsigned int d ); + protected: + // The current component we are working on + unsigned int component; +}; + + +#endif diff --git a/deal.II/examples/step-35/include/NavierStokes.h b/deal.II/examples/step-35/include/NavierStokes.h new file mode 100644 index 0000000000..13c8bfc7f3 --- /dev/null +++ b/deal.II/examples/step-35/include/NavierStokes.h @@ -0,0 +1,278 @@ +/* + Definition of the Projection Method for constant density + Navier Stokes + by Abner Salgado. +*/ +#ifndef _NAVIER_STOKES_H_ +#define _NAVIER_STOKES_H_ + + + + +/* + First we include the local files, where we have the definitions of + 1.- The exact solution + 2.- The class that reads runtime parameters from a file +*/ +#include "EqData.h" +#include "FileReader.h" + + + +/* + These includes are to get the quadrature + and to handle the convergence table +*/ +#include +#include + + + +/* + The includes needed to manage multithreading +*/ +#include +#include + + + +/* + Grid management includes. + Triangulation handler + Grid generator & refinement, accessor and iterators +*/ +#include +#include +#include +#include +#include +#include +#include + + + +/* + Degrees of freedom management includes + Handling, accessing, computing useful stuff from and for them and renumbering. +*/ +#include +#include +#include +#include +#include + + + +/* + Finite element management includes + Definition of finite elements and extraction of info from them + and from finite element functions. +*/ +#include +#include +#include + + + +/* + Linear algebra includes + Sparse matrices, vectors, preconditioners and solvers. +*/ +#include +#include +#include +#include +#include +#include +#include + + + +/* + Numerical algorithms includes + Assembly of right hand sides, computation of errors, output of the solution, etc +*/ +#include +#include +#include + + +/* + This class is the core of all the program + It is the implementation of the projection method for the Navier-Stokes equations. +*/ +template class Navier_Stokes_Projection{ + public: + /* + Constructor. It takes as a parameter a reference to a Data_Storage class, which is + basically a container of all the data this class needs to be defined (and some other stuff) + Here we basically just copy the needed data and/or assign the values that are needed. + */ + Navier_Stokes_Projection( const Data_Storage &data ); + /* + Destructor. Completely trivial. + */ + ~Navier_Stokes_Projection(); + /* + This function creates the triangulation and refines it the required number of times + It also initializes all the quantities that are mesh dependent but not time stepping dependent + i.e. the size of matrices and vectors. + */ + void Create_Triangulation( const unsigned int n_of_refines ); + /* + Having created a mesh and once the time step is set, the next step is to initialize the method + i.e. compute the matrices that are never going to change, load the initial data, etc. + This is what this function does. + */ + void Initialize(); + /* + This is the time marching function, which starting at t_0 advances in time using the projection method + with time step dt until T. + The boolean parameter that it takes is to enable information about what the function is doing at the present + moment, i.e. diffusion, projection substep; updating preconditioners etc. + This is useful mostly for debuggin purposes and so it is by default set to false + */ + void run( const bool verbose = false, const unsigned int n_of_plots = 10 ); + /* + Having reached the final time T, we want to measure the error that we have made. + This method is responsible for that. Saves the results in a ConvergenceTable object + which later we can print or compute things with it + */ + void Post_Process(); + /* + The whole reason for this class, and all this code after all, is to make convergence tests + with respect to the time discretization. For this reason we need to be able to vary the time step + we are going to work with. This method sets up the time step that is to be used in the given run + */ + void set_dt( const double ddt ); + + protected: + // The type of method + Method_Formulation type; + + // Discretization data + //// The polynomial degree + unsigned int deg; + //// The time step + double dt; + + // Physical data: + //// initial time, final time, Reynolds number + double t_0, T, Re; + //// The external driving force + Force rhs; + //// The exact velocity (to apply boundary values) + Velocity vel_exact; + //// The boundary conditions + std::map boundary_values; + + // Finite Element Spaces data + //// The mesh + Triangulation triangulation; + //// The DoF handlers + DoFHandler dof_handler_velocity, dof_handler_pressure; + //// The polynomial spaces + FE_Q fe_velocity, fe_pressure; + //// The quadrature formulae + QGauss quadrature_pressure, quadrature_velocity; + + /* + Linear Algebra Data + The sparsity patterns where the matrices will live + */ + SparsityPattern spar_pattern_velocity, spar_pattern_pressure, spar_pattern_pres_vel; + /* + The actual matrices. The projection matrix never changes. + For the velocity a part of the matrix never changes (neither in time nor for each component), + namely the part related with the time derivative and the diffusion + but, the advection part changes and so we need dim+1 matrices. One to store + the constant part, and dim for each matrix at each iteration + */ + SparseMatrix vel_Laplace_plus_Mass, vel_it_matrix[dim], vel_Mass, vel_Laplace, + pres_Laplace, pres_Mass, pres_Diff[dim]; + /* + We need to regularize the Laplace operator for the pressure + for that we use a constraint + */ + ConstraintMatrix pres_regularization; + //// The solutions at times n and (n-1) + Vector pres_n, pres_n_minus_1, phi_n, phi_n_minus_1, u_n[dim], u_n_minus_1[dim], + //// The time extrapolation of the velocity, used to make the semi-implicit time discretization + //// of the advection term + u_star[dim], + //// Right hand side for the component of the momentum equation + force[dim], + //// Temporary arrays + v_tmp, pres_tmp; + //// The preconditioners +// SparseILU prec_velocity[dim]; + SparseDirectUMFPACK prec_velocity[dim], prec_mass, prec_pressure; + + // The convergence table + ConvergenceTable convergence_table; + + // Exception we will throw if the time step is invalid + DeclException2( ExcInvalidTimeStep, double, double, <<" The time step "< Force::Force( const double initial_time ): Multi_Component_Function( initial_time ){ +} + + + +template void Force::value_list( const std::vector > &points, std::vector &values, + const unsigned int ) const{ + const unsigned int n_points = points.size(); + Assert( values.size() == n_points, ExcDimensionMismatch( values.size(), n_points ) ); + for (unsigned int i=0; i::value( points[i] ); +} + + + +template inline double Force::value(const Point &p, const unsigned int) const{ + double t = FunctionTime::get_time(), + cosx = std::cos( p(0) ), + sinx = std::sin( p(0) ), + cosy = std::cos( p(1) ), + siny = std::sin( p(1) ), + cost = std::cos(t), + sint = std::sin(t), + return_value = 0.; + switch( Multi_Component_Function::component ){ + case 0: + // y*sin(t)-x*cos(t)^2+cos(x)*sin(y)*sin(t) + return_value = p(1)*sint - p(0)*cost*cost + cosx*siny*sint; + break; + case 1: + // -x*sin(t)-y*cos(t)^2+sin(x)*cos(y)*sin(t) + return_value = -p(0)*sint - p(1)*cost*cost + sinx*cosy*sint ; + + break; + default: + Assert( false, ExcNotImplemented() ); + }; + return return_value; +} + + + +// Velocity function methods +template Velocity::Velocity(const double initial_time): Multi_Component_Function( initial_time ){ +} + + + +template void Velocity::value_list( const std::vector > &points, std::vector &values, + const unsigned int ) const{ + const unsigned int n_points = points.size(); + Assert( values.size() == n_points, ExcDimensionMismatch( values.size(), n_points ) ); + for (unsigned int i=0; i::value( points[i] ); +} + + + +template inline double Velocity::value(const Point &p, const unsigned int) const{ + double return_value = std::cos( Function::get_time() ); + switch( Multi_Component_Function::component ){ + case 0: + // -y*cos(t) + return_value *= -p(1); + break; + case 1: + // x*cos(t) + return_value *= p(0); + break; + default: + Assert( false, ExcNotImplemented() ); + }; + return return_value; +} + + + +template inline Tensor<1,dim> Velocity::gradient(const Point &p, const unsigned int) const{ + Tensor<1,dim> return_value; + switch( Multi_Component_Function::component ){ + // [0, -cos(t)] + case 0: + return_value[0] = 0.; + return_value[1] = -std::cos( Function::get_time() ); + break; + case 1: + // [cos(t), 0] + return_value[0] = std::cos( Function::get_time() ); + return_value[1] = 0.; + break; + default: + Assert( false, ExcNotImplemented() ); + }; + return return_value; +} + + + +template void Velocity::gradient_list( const std::vector > &points, std::vector< Tensor<1,dim> > &gradients, + const unsigned int ) const{ + const unsigned int n_points = points.size(); + Assert( gradients.size() == n_points, ExcDimensionMismatch( gradients.size(), n_points ) ); + for (unsigned int i=0; i::gradient( points[i] ); +} + + + +// Pressure function methods +template Pressure::Pressure(const double initial_time): Function(1,initial_time){} + + + +template inline double Pressure::value(const Point &p, const unsigned int) const{ + // sin(x-y+t) + return std::sin( p(0) )*std::sin( p(1) )*std::sin( Function::get_time() ); +} + + + +template inline Tensor<1,dim> Pressure::gradient(const Point &p, const unsigned int) const{ + // [cos(x)*sin(y)*sin(t), sin(x)*cos(y)*sin(t)] + return Point( std::cos( p(0) )*std::sin( p(1) )*std::sin( Function::get_time() ), + std::sin( p(0) )*std::cos( p(1) )*std::sin( Function::get_time() ) ); +} + + + +template void Pressure::value_list( const std::vector > &points, std::vector &values, + const unsigned int ) const{ + const unsigned int n_points = points.size(); + Assert( values.size() == n_points, ExcDimensionMismatch( values.size(), n_points ) ); + for (unsigned int i=0; i::value( points[i] ); +} + + + +template inline void Pressure::gradient_list( const std::vector > &points, std::vector< Tensor<1,dim> > &gradients, + const unsigned int ) const{ + const unsigned int n_points = points.size(); + Assert( gradients.size() == n_points, ExcDimensionMismatch( gradients.size(), n_points ) ); + for (unsigned int i=0; i::gradient( points[i] ); +} + + + +// explicit template instantiation +template class Force; +template class Velocity; +template class Pressure; diff --git a/deal.II/examples/step-35/source/EqData_square.ccc b/deal.II/examples/step-35/source/EqData_square.ccc new file mode 100644 index 0000000000..2f52b5acf9 --- /dev/null +++ b/deal.II/examples/step-35/source/EqData_square.ccc @@ -0,0 +1,211 @@ +/* + Implementation of the classes that represent the exact solution + and external force. + + There's not much to say here. All these classes just represent + mathematical formulae + + by Abner Salgado. +*/ + + + +#include "../include/EqData.h" + + + +// Force function methods +template Force::Force( const double initial_time ): Multi_Component_Function( initial_time ){ +} + + + +template void Force::value_list( const std::vector > &points, std::vector &values, + const unsigned int ) const{ + const unsigned int n_points = points.size(); + Assert( values.size() == n_points, ExcDimensionMismatch( values.size(), n_points ) ); + for (unsigned int i=0; i::value( points[i] ); +} + + + +template inline double Force::value(const Point &p, const unsigned int) const{ + double t = FunctionTime::get_time(), + sin_2pi_x = std::sin( 2.*PI*p(0) ), + sin_2pi_y = std::sin( 2.*PI*p(1) ), + cos_2pi_x = std::cos( 2.*PI*p(0) ), + cos_2pi_y = std::cos( 2.*PI*p(1) ), + sin_pi_x = std::sin( PI*p(0) ), + sin_pi_y = std::sin( PI*p(1) ), + cos_pi_x = std::cos( PI*p(0) ), + cos_pi_y = std::cos( PI*p(1) ), + cos_t = std::cos( t ), + sin_t = std::sin( t ), + return_value = 0.; + switch( Multi_Component_Function::component ){ + case 0: + // Pi*sin(2*Pi*y)*sin(Pi*x)^2*cos(t) + // -2*Pi^3*sin(2*Pi*y)*cos(Pi*x)^2*sin(t) + // +6*Pi^3*sin(2*Pi*y)*sin(Pi*x)^2*sin(t) + // +2*Pi^3*sin(2*Pi*y)^2*sin(Pi*x)^3*sin(t)^2*cos(Pi*x) + // -2*Pi^3*sin(2*Pi*x)*sin(Pi*y)^2*sin(t)^2*cos(2*Pi*y)*sin(Pi*x)^2 + // -Pi*sin(Pi*x)*sin(Pi*y)*sin(t) + return_value = PI*sin_2pi_y*sin_pi_x*sin_pi_x*cos_t + - 2.*PI*PI*PI*sin_2pi_y*cos_pi_x*cos_pi_x*sin_t + + 6.*PI*PI*PI*sin_2pi_y*sin_pi_x*sin_pi_x*sin_t + + 2.*PI*PI*PI*sin_2pi_y*sin_2pi_y*sin_pi_x*sin_pi_x*sin_pi_x*sin_t*sin_t*cos_pi_x + - 2.*PI*PI*PI*sin_2pi_x*sin_pi_y*sin_pi_y*sin_t*sin_t*cos_2pi_y*sin_pi_x*sin_pi_x + - PI*sin_pi_x*sin_pi_y*sin_t; + + break; + case 1: + // -Pi*sin(2*Pi*x)*sin(Pi*y)^2*cos(t) + // -6*Pi^3*sin(2*Pi*x)*sin(Pi*y)^2*sin(t) + // +2*Pi^3*sin(2*Pi*x)*cos(Pi*y)^2*sin(t) + // +Pi*cos(Pi*x)*cos(Pi*y)*sin(t) + // -2*Pi^3*sin(2*Pi*y)*sin(Pi*x)^2*sin(t)^2*cos(2*Pi*x)*sin(Pi*y)^2 + // +2*Pi^3*sin(2*Pi*x)^2*sin(Pi*y)^3*sin(t)^2*cos(Pi*y) + return_value = -PI*sin_2pi_x*sin_pi_y*sin_pi_y*cos_t + - 6.*PI*PI*PI*sin_2pi_x*sin_pi_y*sin_pi_y*sin_t + + 2.*PI*PI*PI*sin_2pi_x*cos_pi_y*cos_pi_y*sin_t + + PI*cos_pi_x*cos_pi_y*sin_t + - 2.*PI*PI*PI*sin_2pi_y*sin_pi_x*sin_pi_x*sin_t*sin_t*cos_2pi_x*sin_pi_y*sin_pi_y + + 2.*PI*PI*PI*sin_2pi_x*sin_2pi_x*sin_pi_y*sin_pi_y*sin_pi_y*sin_t*sin_t*cos_pi_y; + + break; + default: + Assert( false, ExcNotImplemented() ); + }; + return return_value; +} + + + +// Velocity function methods +template Velocity::Velocity(const double initial_time): Multi_Component_Function( initial_time ){ +} + + + +template void Velocity::value_list( const std::vector > &points, std::vector &values, + const unsigned int ) const{ + const unsigned int n_points = points.size(); + Assert( values.size() == n_points, ExcDimensionMismatch( values.size(), n_points ) ); + for (unsigned int i=0; i::value( points[i] ); +} + + + +template inline double Velocity::value(const Point &p, const unsigned int) const{ + double sin_2pi_x = std::sin( 2.*PI*p(0) ), + sin_2pi_y = std::sin( 2.*PI*p(1) ), + sin_pi_x = std::sin( PI*p(0) ), + sin_pi_y = std::sin( PI*p(1) ), + sin_t = std::sin( Function::get_time() ), + return_value = 0.; + switch( Multi_Component_Function::component ){ + case 0: + // pi*sin(2*pi*y)*sin(pi*x)^2*sin(t) + return_value = PI*sin_2pi_y*sin_pi_x*sin_pi_x*sin_t; + break; + case 1: + // -pi*sin(2*pi*x)*sin(pi*y)^2*sin(t) + return_value = -PI*sin_2pi_x*sin_pi_y*sin_pi_y*sin_t; + break; + default: + Assert( false, ExcNotImplemented() ); + }; + return return_value; +} + + + +template inline Tensor<1,dim> Velocity::gradient(const Point &p, const unsigned int) const{ + Tensor<1,dim> return_value; + double sin_2pi_x = std::sin( 2.*PI*p(0) ), + sin_2pi_y = std::sin( 2.*PI*p(1) ), + cos_2pi_x = std::cos( 2.*PI*p(0) ), + cos_2pi_y = std::cos( 2.*PI*p(1) ), + sin_pi_x = std::sin( PI*p(0) ), + sin_pi_y = std::sin( PI*p(1) ), + cos_pi_x = std::cos( PI*p(0) ), + cos_pi_y = std::cos( PI*p(1) ), + sin_t = std::sin( Function::get_time() ); + switch( Multi_Component_Function::component ){ + // [2*Pi^2*sin(2*Pi*y)*sin(Pi*x)*sin(t)*cos(Pi*x), + // 2*Pi^2*cos(2*Pi*y)*sin(Pi*x)^2*sin(t)] + case 0: + return_value[0] = 2.*PI*PI*sin_2pi_y*sin_pi_x*sin_t*cos_pi_x; + return_value[1] = 2.*PI*PI*cos_2pi_y*sin_pi_x*sin_pi_x*sin_t; + break; + case 1: + // [-2*Pi^2*cos(2*Pi*x)*sin(Pi*y)^2*sin(t), + // -2*Pi^2*sin(2*Pi*x)*sin(Pi*y)*sin(t)*cos(Pi*y)] + return_value[0] = -2.*PI*PI*cos_2pi_x*sin_pi_y*sin_pi_y*sin_t; + return_value[1] = -2.*PI*PI*sin_2pi_x*sin_pi_y*sin_t*cos_pi_y; + break; + default: + Assert( false, ExcNotImplemented() ); + }; + return return_value; +} + + + +template void Velocity::gradient_list( const std::vector > &points, std::vector< Tensor<1,dim> > &gradients, + const unsigned int ) const{ + const unsigned int n_points = points.size(); + Assert( gradients.size() == n_points, ExcDimensionMismatch( gradients.size(), n_points ) ); + for (unsigned int i=0; i::gradient( points[i] ); +} + + + +// Pressure function methods +template Pressure::Pressure(const double initial_time): Function(1,initial_time){} + + + +template inline double Pressure::value(const Point &p, const unsigned int) const{ + // cos(pi*x)*sin(pi*y)*sin(t) + return std::cos( PI*p(0) )*std::sin( PI*p(1) )*std::sin( Function::get_time() ); +} + + + +template inline Tensor<1,dim> Pressure::gradient(const Point &p, const unsigned int) const{ + // || -sin(Pi*x)*Pi*sin(Pi*y)*sin(t) || + // || cos(Pi*x)*cos(Pi*y)*Pi*sin(t) || + return ( PI*std::sin( Function::get_time() ) )* Point( -std::sin( PI*p(0) )*std::sin( PI*p(1) ), + std::cos( PI*p(0) )*std::cos( PI*p(1) ) ) ; +} + + + +template void Pressure::value_list( const std::vector > &points, std::vector &values, + const unsigned int ) const{ + const unsigned int n_points = points.size(); + Assert( values.size() == n_points, ExcDimensionMismatch( values.size(), n_points ) ); + for (unsigned int i=0; i::value( points[i] ); +} + + + +template inline void Pressure::gradient_list( const std::vector > &points, std::vector< Tensor<1,dim> > &gradients, + const unsigned int ) const{ + const unsigned int n_points = points.size(); + Assert( gradients.size() == n_points, ExcDimensionMismatch( gradients.size(), n_points ) ); + for (unsigned int i=0; i::gradient( points[i] ); +} + + + +// explicit template instantiation +template class Force; +template class Velocity; +template class Pressure; diff --git a/deal.II/examples/step-35/source/FileReader.cc b/deal.II/examples/step-35/source/FileReader.cc new file mode 100644 index 0000000000..ce58a8c36d --- /dev/null +++ b/deal.II/examples/step-35/source/FileReader.cc @@ -0,0 +1,187 @@ +#include "../include/FileReader.h" + + + +// Constructor. Here we set up the +// Data Format we are going to use +Data_Storage::Data_Storage(){ + prm.declare_entry( "Method_Form", "rotational", Patterns::Selection( "rotational|standard" ), + " Used to select the type of method that we are going to use. " ); + + // Physical data of the problem + prm.enter_subsection( "Physical data" ); + prm.declare_entry( "initial_time", "0.", Patterns::Double( 0. ), " The initial time of the simulation. " ); + prm.declare_entry( "final_time", "1.", Patterns::Double( 0. ), " The final time of the simulation. " ); + prm.declare_entry( "Reynolds", "1.", Patterns::Double( 0. ), " The Reynolds number. " ); + prm.leave_subsection(); + + // Time stepping data of the problem + prm.enter_subsection( "Time step data" ); + prm.declare_entry( "initial_dt", "0.1", Patterns::Double( 0. ), " The initial time step size. " ); + prm.declare_entry( "final_dt", "5e-4", Patterns::Double( 0. ), " The final time step size. " ); + prm.declare_entry( "dt_decrement", "2.", Patterns::Double( 1.5 ), " The factor by which the time step will be divided. " ); + prm.leave_subsection(); + + // Space discretization data + prm.enter_subsection( "Space discretization" ); + prm.declare_entry( "n_of_refines", "5", Patterns::Integer( 1, 15), " The number of global refines we do on the mesh. " ); + prm.declare_entry( "pressure_fe_degree", "1", Patterns::Integer( 1, 5 ), " The polynomial degree for the pressure space. " ); + prm.leave_subsection(); + + // Velocity solution data + prm.enter_subsection( "Data solve velocity" ); + prm.declare_entry( "max_iterations", "1000", Patterns::Integer( 1, 1000 ), " The maximal number of iterations GMRES must make. " ); + prm.declare_entry( "eps", "1e-12", Patterns::Double( 0. ), " The stopping criterion. " ); + prm.declare_entry( "Krylov_size", "30", Patterns::Integer(1), " The size of the Krylov subspace to be used. " ); + prm.declare_entry( "off_diagonals", "60", Patterns::Integer(0), " The number of off-diagonal elements ILU must compute. " ); + prm.declare_entry( "diag_strength", "0.01", Patterns::Double( 0. ), " Diagonal strengthening coefficient. " ); + prm.declare_entry( "update_prec", "15", Patterns::Integer(1), " This number indicates how often we need to update the preconditioner" ); + prm.leave_subsection(); + + // Projection step data + prm.enter_subsection( "Data solve projection" ); + prm.declare_entry( "max_iterations", "1000", Patterns::Integer( 1, 1000 ), " The maximal number of iterations CG must make. " ); + prm.declare_entry( "eps", "1e-12", Patterns::Double( 0. ), " The stopping criterion. " ); + prm.declare_entry( "off_diagonals", "100", Patterns::Integer(1), " The number of off-diagonal elements ILU must compute" ); + prm.declare_entry( "diag_strength", "0.1", Patterns::Double( 0. ), " Diagonal strengthening coefficient. " ); + prm.leave_subsection(); + + // Pressure update data + prm.enter_subsection( "Data solve pressure update" ); + prm.declare_entry( "max_iterations", "1000", Patterns::Integer( 1, 1000 ), " The maximal number of iterations CG must make. " ); + prm.declare_entry( "eps", "1e-12", Patterns::Double( 0. ), " The stopping criterion. " ); + prm.declare_entry( "off_diagonals", "10", Patterns::Integer(0), " The number of off-diagonal elements that ILU must compute" ); + prm.declare_entry( "diag_strength", "0.", Patterns::Double(0), " Diagonal strengthening coefficient" ); + prm.leave_subsection(); + + // Verbosity of output + prm.declare_entry( "verbose", "true", Patterns::Bool(), " This indicates whether the output of the solution process should be verbose. " ); + + // How often we want the data to be outputted + prm.declare_entry( "output", "10", Patterns::Integer(1), " This indicates between how many time steps we print the solution. " ); +} + + + +// Destructor. Does nothing +Data_Storage::~Data_Storage(){ +} + + + +// Here is where all happens. We read all the data from the indicated file +void Data_Storage::read_data( char *filename ){ + std::ifstream file; + file.open( filename ); + if( not file ) + throw ExcFileNotOpen( filename ); + prm.read_input( file ); + + std::string token = prm.get( "Method_Form" ); + if( token == std::string("rotational") ) + form = METHOD_ROTATIONAL; + else + form = METHOD_STANDARD; + + // Physical data of the problem + prm.enter_subsection( "Physical data" ); + initial_time = prm.get_double( "initial_time" ); + final_time = prm.get_double( "final_time" ); + Reynolds = prm.get_double( "Reynolds" ); + prm.leave_subsection(); + + // Time stepping data of the problem + prm.enter_subsection( "Time step data" ); + initial_dt = prm.get_double( "initial_dt" ); + final_dt = prm.get_double( "final_dt" ); + dt_decrement = prm.get_double( "dt_decrement" ); + prm.leave_subsection(); + + // Space discretization data + prm.enter_subsection( "Space discretization" ); + n_of_global_refines = prm.get_integer( "n_of_refines" ); + pressure_degree = prm.get_integer( "pressure_fe_degree" ); + prm.leave_subsection(); + + // Velocity solution data + prm.enter_subsection( "Data solve velocity" ); + vel_max_iterations = prm.get_double( "max_iterations" ); + vel_eps = prm.get_double( "eps" ); + vel_Krylov_size = prm.get_integer( "Krylov_size" ); + vel_off_diagonals = prm.get_integer( "off_diagonals" ); + vel_diag_strength = prm.get_double( "diag_strength" ); + vel_update_prec = prm.get_integer( "update_prec" ); + prm.leave_subsection(); + + // Projection step data + prm.enter_subsection( "Data solve projection" ); + proj_max_iterations = prm.get_integer( "max_iterations" ); + proj_eps = prm.get_double( "eps" ); + proj_off_diagonals = prm.get_integer( "off_diagonals" ); + proj_diag_strength = prm.get_double( "diag_strength" ); + prm.leave_subsection(); + + // Pressure update data + prm.enter_subsection( "Data solve pressure update" ); + pres_max_iterations = prm.get_integer( "max_iterations" ); + pres_eps = prm.get_double( "eps" ); + pres_off_diagonals = prm.get_integer( "off_diagonals" ); + pres_diag_strength = prm.get_double( "diag_strength" ); + prm.leave_subsection(); + + // Verbosity + verbose = prm.get_bool( "verbose" ); + + // Output frequency + output = prm.get_integer( "output" ); + + file.close(); +} + + + +// Prints the current values of the data +// Mostly (if not only) used for debugging purposes +void Data_Storage::print_status() const{ + std::cout<<"Method Form = "<<( (form == METHOD_ROTATIONAL)?"rotational":"standard" )< Multi_Component_Function::Multi_Component_Function( const double initial_time ): + Function( 1, initial_time ), component(0) { +} + + + +// Set Component Function: Check that it is in range and then set it +template void Multi_Component_Function::set_component(const unsigned int d ){ + // Check if the requested component is correct + Assert( d= 2, ExcNotImplemented() ); + component = d; +} + + + +// explicit template instantiation +template class Multi_Component_Function; diff --git a/deal.II/examples/step-35/source/NavierStokes.cc b/deal.II/examples/step-35/source/NavierStokes.cc new file mode 100644 index 0000000000..88961bd709 --- /dev/null +++ b/deal.II/examples/step-35/source/NavierStokes.cc @@ -0,0 +1,792 @@ +/* + Implementation of the Navier_Stokes_projection class + by Abner Salgado. +*/ +#include "../include/NavierStokes.h" + + + +#include + +/// debug +bool show; +double Solve_time; +///---- + + +// Constructor +template Navier_Stokes_Projection::Navier_Stokes_Projection( const Data_Storage &data): + type( data.form ), deg( data.pressure_degree ), dt( data.initial_dt ), t_0( data.initial_time ), + T( data.final_time ), Re( data.Reynolds ), rhs( data.initial_time ), vel_exact( data.initial_time ), + dof_handler_velocity(triangulation), dof_handler_pressure(triangulation), + fe_velocity(deg+1), fe_pressure(deg), quadrature_pressure(deg+1), quadrature_velocity(deg+2), + vel_max_its( data.vel_max_iterations ), vel_Krylov_size( data.vel_Krylov_size ), + vel_off_diagonals( data.vel_off_diagonals ), + vel_update_prec( data.vel_update_prec ), vel_eps( data.vel_eps ), vel_diag_strength( data.vel_diag_strength), + proj_max_its( data.proj_max_iterations ), proj_off_diagonals( data.proj_off_diagonals ), + proj_eps( data.proj_eps ), proj_diag_strength( data.proj_diag_strength ), + pres_max_its( data.pres_max_iterations), pres_off_diagonals( data.pres_off_diagonals ), + pres_eps( data.pres_eps ), pres_diag_strength( data.pres_diag_strength ) +{ + // After having initialized the bunch of data we do nothing + // NOTE TO SELF: Do I need to do this check? + if(deg < 1) + std::cout<<" WARNING: The chosen pair of finite element spaces is not stable."< .5*T ) ), ExcInvalidTimeStep( dt, .5*T ) ); +} + + + +// Destructor +template Navier_Stokes_Projection::~Navier_Stokes_Projection(){ + dof_handler_velocity.clear(); + dof_handler_pressure.clear(); +} + + + +// Set time step +template void Navier_Stokes_Projection::set_dt( const double ddt ){ + // We just check that it is within the permitted limits + AssertThrow( not ( ( ddt <= 0. ) or ( ddt > .5*T ) ), ExcInvalidTimeStep( ddt, .5*T ) ); + dt = ddt; +} + + + +// Initialization of the velocity matrices and assembly of those that do not depend on dt +template void Navier_Stokes_Projection::init_velocity_matrices(){ + //// Init the sparsity pattern for the velocity + spar_pattern_velocity.reinit( dof_handler_velocity.n_dofs(), dof_handler_velocity.n_dofs(), + dof_handler_velocity.max_couplings_between_dofs() ); + DoFTools::make_sparsity_pattern( dof_handler_velocity, spar_pattern_velocity ); + spar_pattern_velocity.compress(); + + //// Init the matrices for the velocity + vel_Laplace_plus_Mass.reinit( spar_pattern_velocity ); + for( unsigned int d=0; d void Navier_Stokes_Projection::init_pressure_matrices(){ + //// Init the sparsity pattern for the pressure + spar_pattern_pressure.reinit( dof_handler_pressure.n_dofs(), dof_handler_pressure.n_dofs(), + dof_handler_pressure.max_couplings_between_dofs() ); + DoFTools::make_sparsity_pattern( dof_handler_pressure, spar_pattern_pressure ); + + // Before we close the sparsity pattern, we need to + // init the constraints for the Laplace operator on the pressure space + pres_regularization.clear(); +// DoFTools::make_hanging_node_constraints( dof_handler_pressure, pres_regularization ); //?? + + // Add the only constraint that we have + pres_regularization.add_line(0); + + // close it + pres_regularization.close(); + + // condense the sparsity pattern + pres_regularization.condense( spar_pattern_pressure ); + + // Compress the sparsity pattern for the pressure + spar_pattern_pressure.compress(); + + //// Init the matrices for the pressure + pres_Laplace.reinit( spar_pattern_pressure ); + pres_Mass.reinit( spar_pattern_pressure ); + + // Now we assemble the matrices + // The Laplace operator is the projection matrix + MatrixCreator::create_laplace_matrix( dof_handler_pressure, quadrature_pressure, pres_Laplace ); + // The pressure mass matrix to do the pressure update + MatrixCreator::create_mass_matrix( dof_handler_pressure, quadrature_pressure, pres_Mass ); + + // Finally we condense the Laplace operator + pres_regularization.condense( pres_Laplace ); +} + + + +template void Navier_Stokes_Projection::init_gradient_operator(){ + //// Init the sparsity pattern for the gradient operator + spar_pattern_pres_vel.reinit( dof_handler_velocity.n_dofs(), dof_handler_pressure.n_dofs(), + dof_handler_velocity.max_couplings_between_dofs() ); + DoFTools::make_sparsity_pattern( dof_handler_velocity, dof_handler_pressure, spar_pattern_pres_vel ); + spar_pattern_pres_vel.compress(); + + /* + To assemble each component of the gradient operator + we need to make a loop over all cells and compute the products of + velocity * d_#(pressure) + where # is the current space component. For this reason we need two cell + iterators, one for the velocity and one for the pressure space. + */ + typename DoFHandler::active_cell_iterator cell_init = dof_handler_velocity.begin_active(), + cell_end = dof_handler_velocity.end(), + cell, + bogus_cell_init = dof_handler_pressure.begin_active(), + bogus_cell; + /* + The FEValues extractors. + For the velocity we need values, + For the pressure we only need gradients. + */ + FEValues fe_values_velocity( fe_velocity, quadrature_velocity, update_values | update_JxW_values ), + fe_values_pressure( fe_pressure, quadrature_velocity, update_gradients ); + + // Usual, and useful, abreviations + const unsigned int vel_dofs_per_cell = fe_velocity.dofs_per_cell, + pres_dofs_per_cell = fe_pressure.dofs_per_cell, + n_q_points = quadrature_velocity.size(); + + // The local gradient operator + FullMatrix local_grad( vel_dofs_per_cell, pres_dofs_per_cell ); + + // Local to global DoF's map + std::vector vel_local_dof_indices( vel_dofs_per_cell ), pres_local_dof_indices( pres_dofs_per_cell ); + + for( unsigned int d=0; dget_dof_indices( vel_local_dof_indices ); + bogus_cell->get_dof_indices( pres_local_dof_indices ); + + // local contributions + local_grad = 0.; + for( unsigned int q=0; q void Navier_Stokes_Projection::Create_Triangulation( const unsigned int n_of_refines ){ + // A disk + GridGenerator::hyper_ball( triangulation ); + static const HyperBallBoundary boundary; + triangulation.set_boundary( 0, boundary ); + +/* // Our domain is a unit square. + GridGenerator::hyper_cube(triangulation);*/ + triangulation.refine_global( n_of_refines ); + std::cout<<" Number of active cells: "< void Navier_Stokes_Projection::plot_solution( const unsigned int step ){ + // This only works in 2d + Assert( dim==2, ExcNotImplemented() ); + + // We need to assemble the vorticy + FEValues fe_values( fe_velocity, quadrature_velocity, update_values | update_gradients | update_JxW_values ); + const unsigned int dofs_per_cell = fe_velocity.n_dofs_per_cell(), + n_q_points = quadrature_velocity.size(); + std::vector< Tensor<1,dim> > grad_vel_1( n_q_points ), grad_vel_2( n_q_points ); + std::vector< unsigned int> local_dof_indices( dofs_per_cell ); + Vector local_rhs( dofs_per_cell ); + double vorticity; + + typename DoFHandler::active_cell_iterator cell = dof_handler_velocity.begin_active(), + cend = dof_handler_velocity.end(); + + force[0] = 0.; + //We start the usual loop + for( ; cell not_eq cend; ++cell ){ + // reinit all the needed stuff + local_rhs = 0.; + fe_values.reinit( cell ); + cell->get_dof_indices( local_dof_indices ); + + // get the gradients of each function + fe_values.get_function_gradients( u_n[0], grad_vel_1 ); + fe_values.get_function_gradients( u_n[1], grad_vel_2 ); + + // usual loop over quad points and local dofs + for( unsigned int q=0; q cg( solver_control ); + static bool is_prec_initted = false; + static SparseILU prec; + if( not is_prec_initted ){ + prec.initialize( vel_Mass, SparseILU::AdditionalData( 1e-5, 70 ) ); + is_prec_initted = true; + } + cg.solve( vel_Mass, force[1], force[0], prec ); + + // Once we have the vorticity we can output it + DataOut data_out; + data_out.attach_dof_handler( dof_handler_velocity ); + data_out.add_data_vector( force[1], "vorticity" ); + data_out.build_patches(); + std::ostringstream filename; + filename<<"vorticity"<; diff --git a/deal.II/examples/step-35/source/main.cc b/deal.II/examples/step-35/source/main.cc new file mode 100644 index 0000000000..69d97384a5 --- /dev/null +++ b/deal.II/examples/step-35/source/main.cc @@ -0,0 +1,66 @@ +#include "../include/NavierStokes.h" + + +#include +#include + + + +int main( int argc, char **argv ){ + try{ + Data_Storage data; + if( argc<2 ){ + std::cout< test( data ); + test.Create_Triangulation( data.n_of_global_refines ); + timeval init_time, end_time; + gettimeofday( &init_time, 0 ); + for( double dt = data.initial_dt; dt >= data.final_dt; dt /= data.dt_decrement ){ + std::cout<<" dt = "< +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include + +// Finally we import all the dealii names to the global namespace +using namespace dealii; + + + +// @sect3{Run Time parameters} +// Since our method has several options that can be fine-tuned we decided to group all these into +// an external file, so that these can be determined at run-time.
    +// First, the formulation of the method, which we set as a enum. +// Next, we declare a class that is going to read and store all the parameters that our program +// needs to run. +namespace RunTimeParameters{ + enum Method_Formulation{ + METHOD_STANDARD, + METHOD_ROTATIONAL + }; + + class Data_Storage{ + public: + Data_Storage(); + ~Data_Storage(); + void read_data( const char *filename ); + Method_Formulation form; + double initial_time, + final_time, + Reynolds; + double initial_dt, + final_dt, + dt_decrement; + unsigned int n_of_global_refines, + pressure_degree; + unsigned int vel_max_iterations, + vel_Krylov_size, + vel_off_diagonals, + vel_update_prec; + double vel_eps, + vel_diag_strength; + unsigned int proj_max_iterations, + proj_off_diagonals; + double proj_eps, + proj_diag_strength; + unsigned int pres_max_iterations, + pres_off_diagonals; + double pres_eps, + pres_diag_strength; + bool verbose; + unsigned int output; + + protected: + ParameterHandler prm; + }; + +// In the constructor of this class we declare all the parameters. +// The details of how this works have been discussed somewhere else *** +// so let's not elaborate on that + + Data_Storage::Data_Storage(){ + prm.declare_entry( "Method_Form", "rotational", Patterns::Selection( "rotational|standard" ), + " Used to select the type of method that we are going to use. " ); + prm.enter_subsection( "Physical data" ); + prm.declare_entry( "initial_time", "0.", Patterns::Double( 0. ), " The initial time of the simulation. " ); + prm.declare_entry( "final_time", "1.", Patterns::Double( 0. ), " The final time of the simulation. " ); + prm.declare_entry( "Reynolds", "1.", Patterns::Double( 0. ), " The Reynolds number. " ); + prm.leave_subsection(); + + prm.enter_subsection( "Time step data" ); + prm.declare_entry( "initial_dt", "0.1", Patterns::Double( 0. ), " The initial time step size. " ); + prm.declare_entry( "final_dt", "5e-4", Patterns::Double( 0. ), " The final time step size. " ); + prm.declare_entry( "dt_decrement", "2.", Patterns::Double( 1.5 ), + " The factor by which the time step will be divided. " ); + prm.leave_subsection(); + + prm.enter_subsection( "Space discretization" ); + prm.declare_entry( "n_of_refines", "5", Patterns::Integer( 1, 15), + " The number of global refines we do on the mesh. " ); + prm.declare_entry( "pressure_fe_degree", "1", Patterns::Integer( 1, 5 ), + " The polynomial degree for the pressure space. " ); + prm.leave_subsection(); + + prm.enter_subsection( "Data solve velocity" ); + prm.declare_entry( "max_iterations", "1000", Patterns::Integer( 1, 1000 ), + " The maximal number of iterations GMRES must make. " ); + prm.declare_entry( "eps", "1e-12", Patterns::Double( 0. ), " The stopping criterion. " ); + prm.declare_entry( "Krylov_size", "30", Patterns::Integer(1), " The size of the Krylov subspace to be used. " ); + prm.declare_entry( "off_diagonals", "60", Patterns::Integer(0), + " The number of off-diagonal elements ILU must compute. " ); + prm.declare_entry( "diag_strength", "0.01", Patterns::Double( 0. ), + " Diagonal strengthening coefficient. " ); + prm.declare_entry( "update_prec", "15", Patterns::Integer(1), + " This number indicates how often we need to update the preconditioner" ); + prm.leave_subsection(); + + prm.enter_subsection( "Data solve projection" ); + prm.declare_entry( "max_iterations", "1000", Patterns::Integer( 1, 1000 ), + " The maximal number of iterations CG must make. " ); + prm.declare_entry( "eps", "1e-12", Patterns::Double( 0. ), " The stopping criterion. " ); + prm.declare_entry( "off_diagonals", "100", Patterns::Integer(1), + " The number of off-diagonal elements ILU must compute" ); + prm.declare_entry( "diag_strength", "0.1", Patterns::Double( 0. ), " Diagonal strengthening coefficient. " ); + prm.leave_subsection(); + + prm.enter_subsection( "Data solve pressure update" ); + prm.declare_entry( "max_iterations", "1000", Patterns::Integer( 1, 1000 ), + " The maximal number of iterations CG must make. " ); + prm.declare_entry( "eps", "1e-12", Patterns::Double( 0. ), " The stopping criterion. " ); + prm.declare_entry( "off_diagonals", "10", Patterns::Integer(0), + " The number of off-diagonal elements that ILU must compute" ); + prm.declare_entry( "diag_strength", "0.", Patterns::Double(0), " Diagonal strengthening coefficient" ); + prm.leave_subsection(); + + prm.declare_entry( "verbose", "true", Patterns::Bool(), + " This indicates whether the output of the solution process should be verbose. " ); + + prm.declare_entry( "output", "10", Patterns::Integer(1), + " This indicates between how many time steps we print the solution. " ); + } + + Data_Storage::~Data_Storage(){} + + void Data_Storage::read_data( const char *filename ){ + std::ifstream file( filename ); + if( not file ) + throw ExcFileNotOpen( filename ); + prm.read_input( file ); + + std::string token = prm.get( "Method_Form" ); + if( token == std::string( "rotational" ) ) + form = METHOD_ROTATIONAL; + else + form = METHOD_STANDARD; + + prm.enter_subsection( "Physical data" ); + initial_time = prm.get_double( "initial_time" ); + final_time = prm.get_double( "final_time" ); + Reynolds = prm.get_double( "Reynolds" ); + prm.leave_subsection(); + + prm.enter_subsection( "Time step data" ); + initial_dt = prm.get_double( "initial_dt" ); + final_dt = prm.get_double( "final_dt" ); + dt_decrement = prm.get_double( "dt_decrement" ); + prm.leave_subsection(); + + prm.enter_subsection( "Space discretization" ); + n_of_global_refines = prm.get_integer( "n_of_refines" ); + pressure_degree = prm.get_integer( "pressure_fe_degree" ); + prm.leave_subsection(); + + prm.enter_subsection( "Data solve velocity" ); + vel_max_iterations = prm.get_double( "max_iterations" ); + vel_eps = prm.get_double( "eps" ); + vel_Krylov_size = prm.get_integer( "Krylov_size" ); + vel_off_diagonals = prm.get_integer( "off_diagonals" ); + vel_diag_strength = prm.get_double( "diag_strength" ); + vel_update_prec = prm.get_integer( "update_prec" ); + prm.leave_subsection(); + + prm.enter_subsection( "Data solve projection" ); + proj_max_iterations = prm.get_integer( "max_iterations" ); + proj_eps = prm.get_double( "eps" ); + proj_off_diagonals = prm.get_integer( "off_diagonals" ); + proj_diag_strength = prm.get_double( "diag_strength" ); + prm.leave_subsection(); + + prm.enter_subsection( "Data solve pressure update" ); + pres_max_iterations = prm.get_integer( "max_iterations" ); + pres_eps = prm.get_double( "eps" ); + pres_off_diagonals = prm.get_integer( "off_diagonals" ); + pres_diag_strength = prm.get_double( "diag_strength" ); + prm.leave_subsection(); + + verbose = prm.get_bool( "verbose" ); + + output = prm.get_integer( "output" ); + + file.close(); + } +} + + + +// @sect3{The Equation Data} +// Here we declare the initial and boundary conditions, as well as the right hand side. +namespace EquationData{ + // Because of our implementation, we do not take advantage of the capabilities of the library + // to handle vector valued problems. Whether this is a good or bad idea is another issue. The + // point here is that we want to be able to write an interface for the equation data that is + // somehow dimension indenpendent. To be able to do that, our functions should be able to know + // on which space component we are currently working, and we should be able to have a + // common interface to do that. The following class is an attempt in that direction. + template class MultiComponentFunction: public Function{ + public: + MultiComponentFunction( const double initial_time = 0. ); + void set_component( const unsigned int d ); + protected: + unsigned int comp; + }; + + template MultiComponentFunction::MultiComponentFunction( const double initial_time ): + Function( 1, initial_time ), comp(0){} + + template void MultiComponentFunction::set_component(const unsigned int d ){ + Assert( d class Velocity: public MultiComponentFunction{ + public: + Velocity( const double initial_time = 0.0 ); + virtual double value( const Point &p, const unsigned int component = 0 ) const; + virtual Tensor<1,dim> gradient( const Point &p, const unsigned int component = 0 ) const; + virtual void value_list( const std::vector< Point > &points, std::vector &values, + const unsigned int component = 0 ) const; + virtual void gradient_list( const std::vector< Point > &points, + std::vector< Tensor<1,dim> > &gradients, + const unsigned int component = 0 ) const; + }; + + template Velocity::Velocity( const double initial_time ): + MultiComponentFunction( initial_time ){} + + template void Velocity::value_list( const std::vector > &points, + std::vector &values, const unsigned int ) const{ + const unsigned int n_points = points.size(); + Assert( values.size() == n_points, ExcDimensionMismatch( values.size(), n_points ) ); + for (unsigned int i=0; i::value( points[i] ); + } + + template inline double Velocity::value( const Point &p, const unsigned int ) const{ + double return_value = std::cos( Function::get_time() ); + switch( MultiComponentFunction::comp ){ + case 0: + return_value *= -p(1); + break; + case 1: + return_value *= p(0); + break; + default: + Assert( false, ExcNotImplemented() ); + }; + return return_value; + } + + template inline Tensor<1,dim> Velocity::gradient( const Point &p, const unsigned int ) const{ + Tensor<1,dim> return_value; + switch( MultiComponentFunction::comp ){ + case 0: + return_value[0] = 0.; + return_value[1] = -std::cos( Function::get_time() ); + break; + case 1: + return_value[0] = std::cos( Function::get_time() ); + return_value[1] = 0.; + break; + default: + Assert( false, ExcNotImplemented() ); + }; + return return_value; + } + + template void Velocity::gradient_list( const std::vector > &points, + std::vector< Tensor<1,dim> > &gradients, + const unsigned int ) const{ + const unsigned int n_points = points.size(); + Assert( gradients.size() == n_points, ExcDimensionMismatch( gradients.size(), n_points ) ); + for( unsigned int i=0; i::gradient( points[i] ); + } + + template class Pressure: public Function{ + public: + Pressure( const double initial_time = 0.0 ); + virtual double value( const Point &p, const unsigned int component = 0 ) const; + virtual Tensor<1,dim> gradient( const Point &p, const unsigned int component = 0 ) const; + virtual void value_list( const std::vector< Point > &points, std::vector &values, + const unsigned int component = 0 ) const; + virtual void gradient_list( const std::vector< Point > &points, std::vector< Tensor<1,dim> > &gradients, + const unsigned int component = 0 ) const; + }; + + template Pressure::Pressure( const double initial_time ): Function( 1, initial_time ){} + + template inline double Pressure::value( const Point &p, const unsigned int ) const{ + return std::sin( p(0) )*std::sin( p(1) )*std::sin( Function::get_time() ); + } + + template inline Tensor<1,dim> Pressure::gradient( const Point &p, const unsigned int ) const{ + return Point( std::cos( p(0) )*std::sin( p(1) )*std::sin( Function::get_time() ), + std::sin( p(0) )*std::cos( p(1) )*std::sin( Function::get_time() ) ); + } + + template void Pressure::value_list( const std::vector > &points, + std::vector &values, + const unsigned int ) const{ + const unsigned int n_points = points.size(); + Assert( values.size() == n_points, ExcDimensionMismatch( values.size(), n_points ) ); + for (unsigned int i=0; i::value( points[i] ); + } + + template inline void Pressure::gradient_list( const std::vector > &points, + std::vector< Tensor<1,dim> > &gradients, + const unsigned int ) const{ + const unsigned int n_points = points.size(); + Assert( gradients.size() == n_points, ExcDimensionMismatch( gradients.size(), n_points ) ); + for (unsigned int i=0; i::gradient( points[i] ); + } + + template class Force: public MultiComponentFunction{ + public: + Force( const double initial_time =0.0 ); + virtual double value( const Point &p, const unsigned int component = 0 ) const; + virtual void value_list( const std::vector< Point > &points, std::vector &values, + const unsigned int component = 0 ) const; + }; + + template Force::Force( const double initial_time ): + MultiComponentFunction( initial_time ){} + + template void Force::value_list( const std::vector > &points, std::vector &values, + const unsigned int ) const{ + const unsigned int n_points = points.size(); + Assert( values.size() == n_points, ExcDimensionMismatch( values.size(), n_points ) ); + for (unsigned int i=0; i::value( points[i] ); + } + + template inline double Force::value( const Point &p, const unsigned int ) const{ + double t = Function::get_time(), + cosx = std::cos( p(0) ), + sinx = std::sin( p(0) ), + cosy = std::cos( p(1) ), + siny = std::sin( p(1) ), + cost = std::cos(t), + sint = std::sin(t), + return_value = 0.; + switch( MultiComponentFunction::comp ){ + case 0: + return_value = p(1)*sint - p(0)*cost*cost + cosx*siny*sint; + break; + case 1: + return_value = -p(0)*sint - p(1)*cost*cost + sinx*cosy*sint ; + + break; + default: + Assert( false, ExcNotImplemented() ); + }; + return return_value; + } +} + + + +// @sect3{The Navier_Stokes_Projection class} +// This is the main class of the program. It implements the various avatars of the projection +// methods for Navier-Stokes equations. +// The names for all the methods and attributes are self-explanatory. +template class Navier_Stokes_Projection{ + public: + Navier_Stokes_Projection( const RunTimeParameters::Data_Storage &data ); + ~Navier_Stokes_Projection(); + void run( const bool verbose = false, const unsigned int n_of_plots = 10 ); +/// + void Initialize(); + void set_dt( const double ddt ); + void Post_Process(); +/// + protected: + RunTimeParameters::Method_Formulation type; + + unsigned int deg; + double dt; + + double t_0, T, Re; + EquationData::Force rhs; + + EquationData::Velocity vel_exact; + std::map boundary_values; + + Triangulation triangulation; + DoFHandler dof_handler_velocity, dof_handler_pressure; + FE_Q fe_velocity, fe_pressure; + QGauss quadrature_pressure, quadrature_velocity; + + SparsityPattern spar_pattern_velocity, spar_pattern_pressure, spar_pattern_pres_vel; + SparseMatrix vel_Laplace_plus_Mass, vel_it_matrix[dim], vel_Mass, vel_Laplace, + vel_Advection, + pres_Laplace, pres_Mass, pres_Diff[dim]; + ConstraintMatrix pres_regularization; + Vector pres_n, pres_n_minus_1, phi_n, phi_n_minus_1, u_n[dim], u_n_minus_1[dim], + u_star[dim], + force[dim], + v_tmp, pres_tmp; + + SparseILU prec_velocity[dim]; + SparseDirectUMFPACK prec_mass, prec_pressure; + + ConvergenceTable convergence_table; + + DeclException2( ExcInvalidTimeStep, double, double, <<" The time step "<::active_cell_iterator, + typename DoFHandler::active_cell_iterator + > IteratorTuple; + typedef parallel::internal::SynchronousIterators SIterators; + struct InitGradPerTaskData{ + unsigned int d, vel_dpc, pres_dpc; + FullMatrix local_grad; + std::vector vel_local_dof_indices, pres_local_dof_indices; + InitGradPerTaskData( const unsigned int dd, const unsigned int vdpc, const unsigned int pdpc ): + d(dd), vel_dpc( vdpc ), pres_dpc( pdpc ), + local_grad( vdpc, pdpc ), vel_local_dof_indices( vdpc ), + pres_local_dof_indices( pdpc ){} + }; + struct InitGradScratchData{ + unsigned int nqp; + FEValues fe_val_vel, fe_val_pres; + InitGradScratchData( const FE_Q &fe_v, const FE_Q &fe_p, const QGauss &quad, + const UpdateFlags flags_v, const UpdateFlags flags_p ) : + nqp( quad.size() ), fe_val_vel( fe_v, quad, flags_v ), + fe_val_pres( fe_p, quad, flags_p ){} + InitGradScratchData( const InitGradScratchData &data ): nqp( data.nqp ), + fe_val_vel( data.fe_val_vel.get_fe(), data.fe_val_vel.get_quadrature(), + data.fe_val_vel.get_update_flags() ), + fe_val_pres( data.fe_val_pres.get_fe(), data.fe_val_pres.get_quadrature(), + data.fe_val_pres.get_update_flags() ) {} + }; + void assemble_one_cell_of_gradient( const SIterators &SI, InitGradScratchData &scratch, + InitGradPerTaskData &data ); + void copy_gradient_local_to_global( const InitGradPerTaskData &data ); + + inline void assemble_advection_term(); + struct AdvectionPerTaskData{ + FullMatrix local_advection; + std::vector local_dof_indices; + AdvectionPerTaskData( const unsigned int dpc ): local_advection( dpc, dpc ), local_dof_indices( dpc ) {} + }; + struct AdvectionScratchData{ + unsigned int nqp, dpc; + std::vector< Point > u_star_local; + std::vector< Tensor<1,dim> > grad_u_star; + std::vector u_star_tmp; + FEValues fe_val; + AdvectionScratchData( const FE_Q &fe, const QGauss &quad, const UpdateFlags flags ): + nqp( quad.size() ), dpc( fe.dofs_per_cell ), + u_star_local( nqp ), grad_u_star( nqp ), u_star_tmp( nqp ), + fe_val( fe, quad, flags ){} + AdvectionScratchData( const AdvectionScratchData &data ): nqp( data.nqp ), dpc( data.dpc ), + u_star_local( nqp ), grad_u_star( nqp ), u_star_tmp( nqp ), + fe_val( data.fe_val.get_fe(), data.fe_val.get_quadrature(), + data.fe_val.get_update_flags() ) {} + }; + + void assemble_one_cell_of_advection( const typename DoFHandler::active_cell_iterator &cell, + AdvectionScratchData &scratch, AdvectionPerTaskData &data ); + void copy_advection_local_to_global( const AdvectionPerTaskData &data ); + inline void diffusion_component_solve( const unsigned int d ); + + inline void plot_solution( const unsigned int step ); +}; + + +template Navier_Stokes_Projection::~Navier_Stokes_Projection(){ + dof_handler_velocity.clear(); + dof_handler_pressure.clear(); +} + +template void Navier_Stokes_Projection::set_dt( const double ddt ){ + AssertThrow( not ( ( ddt <= 0. ) or ( ddt > .5*T ) ), ExcInvalidTimeStep( ddt, .5*T ) ); + dt = ddt; +} + + +// @sect4{ Navier_Stokes_Projection::Navier_Stokes_Projection } +// In the constructor, we just read all the data from the Data_Storage +// object that is passed as an argument, verify that the read data is reasonable +// and, finally, create the triangulation and load the initial data. +template Navier_Stokes_Projection::Navier_Stokes_Projection( + const RunTimeParameters::Data_Storage &data ): + type( data.form ), deg( data.pressure_degree ), dt( data.initial_dt ), t_0( data.initial_time ), + T( data.final_time ), Re( data.Reynolds ), rhs( data.initial_time ), + vel_exact( data.initial_time ), dof_handler_velocity( triangulation ), + dof_handler_pressure( triangulation ), fe_velocity( deg+1 ), fe_pressure( deg ), + quadrature_pressure( deg+1 ), quadrature_velocity( deg+2 ), + vel_max_its( data.vel_max_iterations ), vel_Krylov_size( data.vel_Krylov_size ), + vel_off_diagonals( data.vel_off_diagonals ), + vel_update_prec( data.vel_update_prec ), vel_eps( data.vel_eps ), + vel_diag_strength( data.vel_diag_strength), + proj_max_its( data.proj_max_iterations ), proj_off_diagonals( data.proj_off_diagonals ), + proj_eps( data.proj_eps ), proj_diag_strength( data.proj_diag_strength ), + pres_max_its( data.pres_max_iterations), pres_off_diagonals( data.pres_off_diagonals ), + pres_eps( data.pres_eps ), pres_diag_strength( data.pres_diag_strength ) +{ + if(deg < 1) + std::cout<<" WARNING: The chosen pair of finite element spaces is not stable."< .5*T ) ), ExcInvalidTimeStep( dt, .5*T ) ); + + Create_Triangulation( data.n_of_global_refines ); + Initialize(); +} + + +// @sect4{ Navier_Stokes_Projection::Create_Triangulation } +// The method that creates the triangulation and refines it the needed number of times. +// After creating the triangulation, it creates the mesh dependent data, i.e. it distributes +// degrees of freedom and renumbers them, and initializes the matrices and vectors +// that we will use. +template void Navier_Stokes_Projection::Create_Triangulation( const unsigned int n_of_refines ){ + GridGenerator::hyper_ball( triangulation ); + static const HyperBallBoundary boundary; + triangulation.set_boundary( 0, boundary ); + + triangulation.refine_global( n_of_refines ); + std::cout<<" Number of active cells: "< void Navier_Stokes_Projection::interpolate_velocity(){ + for( unsigned int d=0; dNavier_Stokes_Projection::diffusion_step} +// The implementation of a diffusion step. +template void Navier_Stokes_Projection::diffusion_step( const bool reinit_prec ){ + pres_tmp = pres_n; + pres_tmp.add(4./3., phi_n, -1./3., phi_n_minus_1); + pres_tmp *= -1.; + + assemble_advection_term(); + + for( unsigned int d=0; d tasks; + for(unsigned int d=0; d::AdditionalData( vel_diag_strength, vel_off_diagonals ) ); + tasks += Threads::new_task( &Navier_Stokes_Projection::diffusion_component_solve, *this, d ); + } + tasks.join_all(); +} + +template void Navier_Stokes_Projection::diffusion_component_solve( const unsigned int d){ + SolverControl solver_control( vel_max_its, vel_eps*force[d].l2_norm() ); + SolverGMRES<> gmres( solver_control, SolverGMRES<>::AdditionalData() ); + gmres.solve( vel_it_matrix[d], u_n[d], force[d], prec_velocity[d] ); +} + + +// @sect4{ The Navier_Stokes_Projection::assemble_advection_term method and related} +template void Navier_Stokes_Projection::assemble_advection_term(){ + vel_Advection = 0.; + AdvectionPerTaskData data( fe_velocity.dofs_per_cell ); + AdvectionScratchData scratch( fe_velocity, quadrature_velocity, + update_values | update_JxW_values | update_gradients ); + WorkStream::run( dof_handler_velocity.begin_active(), dof_handler_velocity.end(), *this, + &Navier_Stokes_Projection::assemble_one_cell_of_advection, + &Navier_Stokes_Projection::copy_advection_local_to_global, scratch, data); +} + +template void Navier_Stokes_Projection::assemble_one_cell_of_advection( + const typename DoFHandler::active_cell_iterator &cell, + AdvectionScratchData &scratch, AdvectionPerTaskData &data ){ + scratch.fe_val.reinit(cell); + cell->get_dof_indices( data.local_dof_indices ); + for( unsigned int d=0; d void Navier_Stokes_Projection::copy_advection_local_to_global( + const AdvectionPerTaskData &data ){ + for( unsigned int i=0; iNavier_Stokes_Projection::projection_step} +// This implements the projection step. +template void Navier_Stokes_Projection::projection_step( const bool reinit_prec ){ + if( reinit_prec ) + prec_pressure.initialize( pres_Laplace ); + + pres_tmp = 0.; + for( unsigned d=0; dNavier_Stokes_Projection::update_pressure } +// This is the pressure update step of the projection method. It implements the +// standard formulation of the method, that is +// $$ +// p^{n+1} = p^n + \phi^{n+1}, +// $$ +// or the rotational form, which is +// $$ +// p^{n+1} = p^n + \phi^{n+1} - \frac{1}{Re} \nabla\cdot u^{n+1}. +// $$ +template void Navier_Stokes_Projection::update_pressure( const bool reinit_prec ){ + pres_n_minus_1 = pres_n; + switch( type ){ + case RunTimeParameters::METHOD_STANDARD: + pres_n += phi_n; + break; + case RunTimeParameters::METHOD_ROTATIONAL: + if( reinit_prec ) + prec_mass.initialize( pres_Mass ); + pres_n = pres_tmp; + prec_mass.solve( pres_n ); + pres_n.sadd(1./Re, 1., pres_n_minus_1, 1., phi_n ); + break; + default: + Assert( false, ExcNotImplemented() ); + }; +} + + +// @sect4{ Navier_Stokes_Projection::plot_solution } +// At this stage, we only output the vorticity of the flow. This only works in 2d and +// WILL be changed. +/// +template void Navier_Stokes_Projection::plot_solution( const unsigned int step ){ + const FESystem joint_fe( fe_velocity, dim, fe_pressure, 1 ); + DoFHandler joint_dof_handler( triangulation ); + joint_dof_handler.distribute_dofs( joint_fe ); + Assert( joint_dof_handler.n_dofs() == dim*dof_handler_velocity.n_dofs() + dof_handler_pressure.n_dofs(), + ExcInternalError() ); + static Vector joint_solution( joint_dof_handler.n_dofs() ); + std::vector loc_joint_dof_indices( joint_fe.dofs_per_cell ), + loc_vel_dof_indices( fe_velocity.dofs_per_cell ), + loc_pres_dof_indices( fe_pressure.dofs_per_cell ); + typename DoFHandler::active_cell_iterator + joint_cell = joint_dof_handler.begin_active(), + joint_endc = joint_dof_handler.end(), + vel_cell = dof_handler_velocity.begin_active(), + pres_cell = dof_handler_pressure.begin_active(); + for( ; joint_cell not_eq joint_endc; ++joint_cell, ++vel_cell, ++pres_cell ){ + joint_cell->get_dof_indices( loc_joint_dof_indices ); + vel_cell->get_dof_indices( loc_vel_dof_indices ), + pres_cell->get_dof_indices( loc_pres_dof_indices ); + for( unsigned int i=0; i joint_solution_names( dim, "v" ); + joint_solution_names.push_back( "p" ); + + DataOut data_out; + data_out.attach_dof_handler (joint_dof_handler); + + std::vector< DataComponentInterpretation::DataComponentInterpretation > + component_interpretation( dim+1, DataComponentInterpretation::component_is_part_of_vector ); + component_interpretation[dim] = DataComponentInterpretation::component_is_scalar; + + data_out.add_data_vector( joint_solution, joint_solution_names, DataOut::type_dof_data, + component_interpretation ); + + data_out.build_patches( deg + 1 ); + + std::ostringstream filename; + filename<<"solution-"<Navier_Stokes_Projection::Post_Process} +// Having reached the final time T, we want to measure the error that we have made. +// This method is responsible for that. Saves the results in a ConvergenceTable +// object which later we can print or compute things with it.
    +// The way we compute the errors is very similar to previous tutorials. However, we need the +// pressure to have mean value zero, so we compute its mean value and subtract it from the computed +// pressure. +template void Navier_Stokes_Projection::Post_Process(){ + double tmp, vel_err_L2=0., vel_err_H1=0., pres_err_L2; + + Vector differences( triangulation.n_active_cells() ); + + vel_exact.set_time(T); + for( unsigned int d=0; d pres_exact(T); + differences = 0.; + VectorTools::integrate_difference( dof_handler_pressure, pres_n, pres_exact, + differences, quadrature_pressure, VectorTools::L2_norm ); + pres_err_L2 = differences.l2_norm(); + + convergence_table.add_value( "dt" , dt ); + convergence_table.add_value( "u_L2" , vel_err_L2 ); + convergence_table.add_value( "u_H1" , vel_err_H1 ); + convergence_table.add_value( "pres_L2", pres_err_L2 ); + + convergence_table.set_precision( "dt" , 5 ); + convergence_table.set_precision( "u_L2" , 5 ); + convergence_table.set_precision( "u_H1" , 5 ); + convergence_table.set_precision( "pres_L2", 5 ); + + convergence_table.set_scientific( "u_L2" , true ); + convergence_table.set_scientific( "u_H1" , true ); + convergence_table.set_scientific( "pres_L2", true ); + + convergence_table.write_text(std::cout); +} + + + +// @sect3{ The main function } +// The main function looks very much like in all the other tutorial programs. +int main(){ + try{ + RunTimeParameters::Data_Storage data; + data.read_data( "parameter-file.prm" ); + deallog.depth_console( data.verbose?2:0 ); + Navier_Stokes_Projection<2> test( data ); + for( double dt = data.initial_dt; dt >= data.final_dt; dt /= data.dt_decrement ){ + std::cout<<" dt = "< #include - // The following two files provide - // classes and information for - // multi-threaded programs. In the - // first one, the classes and - // functions are declared which we - // need to start new threads and to - // wait for threads to return - // (i.e. the Thread class - // and the spawn functions). The + // The following two files provide classes + // and information for multi-threaded + // programs. In the first one, the classes + // and functions are declared which we need + // to start new threads and to wait for + // threads to return (i.e. the + // Thread class and the + // new_thread functions). The // second file has a class // MultithreadInfo (and a global // object multithread_info of - // that type) which can be used to - // query the number of processors in - // your system, which is often useful - // when deciding how many threads to - // start in parallel. + // that type) which can be used to query the + // number of processors in your system, which + // is often useful when deciding how many + // threads to start in parallel. #include #include @@ -122,6 +120,18 @@ class AdvectionProblem // which denote the first cell on // which it shall operate, and // the one past the last. + // + // The strategy for parallelization we + // choose here is one of the + // possibilities mentioned in detail in + // the @ref threads module in the + // documentation. While it is a + // straightforward way to distribute the + // work for assembling the system onto + // multiple processor cores. As mentioned + // in the module, there are other, and + // possibly better suited, ways to + // achieve the same goal. void assemble_system (); void assemble_system_interval (const typename DoFHandler::active_cell_iterator &begin, const typename DoFHandler::active_cell_iterator &end); @@ -733,26 +743,39 @@ void AdvectionProblem::assemble_system () // another value. This variable is // also queried by functions inside // the library to determine how - // many threads they shall spawn. + // many threads they shall create. const unsigned int n_threads = multithread_info.n_default_threads; + // It is worth noting, however, that this + // setup determines the load distribution + // onto processor in a static way: it does + // not take into account that some other + // part of our program may also be running + // something in parallel at the same time + // as we get here (this is not the case in + // the current program, but may easily be + // the case in more complex + // applications). A discussion of how to + // deal with this case can be found in the + // @ref threads module. + // // Next, we need an object which is // capable of keeping track of the // threads we created, and allows // us to wait until they all have // finished (to join them in // the language of threads). The - // Threads::ThreadGroup class + // Threads::ThreadGroup class // does this, which is basically // just a container for objects of - // type Threads::Thread that + // type Threads::Thread that // represent a single thread; - // Threads::Thread is what the - // spawn function below will + // Threads::Thread is what the + // Threads::new_thread function below will // return when we start a new // thread. // - // Note that both ThreadGroup - // and Thread have a template + // Note that both Threads::ThreadGroup + // and Threads::Thread have a template // argument that represents the // return type of the function // being called on a separate @@ -769,7 +792,7 @@ void AdvectionProblem::assemble_system () // // If you did not configure for // multi-threading, then the - // spawn function that is + // new_thread function that is // supposed to start a new thread // in parallel only executes the // function which should be run in @@ -781,7 +804,7 @@ void AdvectionProblem::assemble_system () // join that is supposed to // wait for all spawned threads to // return, returns immediately, as - // there can't be threads running. + // there can't be any threads running. Threads::ThreadGroup<> threads; // Now we have to split the range @@ -848,24 +871,25 @@ void AdvectionProblem::assemble_system () // using the following sequence of // function calls: for (unsigned int thread=0; thread::assemble_system_interval) - (thread_ranges[thread].first, - thread_ranges[thread].second); + threads += Threads::new_thread (&AdvectionProblem::assemble_system_interval, + *this, + thread_ranges[thread].first, + thread_ranges[thread].second); // The reasons and internal // workings of these functions can // be found in the report on the // subject of multi-threading, // which is available online as // well. Suffice it to say that we - // spawn a new thread that calls + // create a new thread that calls // the assemble_system_interval // function on the present object // (the this pointer), with the // arguments following in the // second set of parentheses passed - // as parameters. The spawn - // function return an object of - // type Threads::Thread, which + // as parameters. The Threads::new_thread + // function returns an object of + // type Threads::Thread, which // we put into the threads // container. If a thread exits, // the return value of the function @@ -1471,16 +1495,15 @@ GradientEstimation::estimate (const DoFHandler &dof_handler, = Threads::split_interval (0, dof_handler.get_tria().n_active_cells(), n_threads); - // In the same way as before, we - // use a Threads::ThreadGroup - // object to collect the descriptor - // objects of different - // threads. Note that as the + // In the same way as before, we use a + // Threads::ThreadGroup object + // to collect the descriptor objects of + // different threads. Note that as the // function called is not a member - // function, but rather a static - // function, we need not (and can - // not) pass a this pointer to - // the spawn function in this + // function, but rather a static function, + // we need not (and can not) pass a + // this pointer to the + // new_thread function in this // case. // // Taking pointers to templated @@ -1508,9 +1531,10 @@ GradientEstimation::estimate (const DoFHandler &dof_handler, Vector &) = &GradientEstimation::template estimate_interval; for (unsigned int i=0; i class FullMatrix; *@{ */ +namespace internal +{ + namespace SparseMatrix + { + /** + * Like + * internal::Vector::minimum_parallel_grain_size, + * but now denoting the number of rows of + * a matrix that should be worked on as a + * minimum. + */ + extern unsigned int minimum_parallel_grain_size; + } +} + namespace SparseMatrixIterators { // forward declaration @@ -2019,75 +2034,6 @@ class SparseMatrix : public virtual Subscriptor */ unsigned int max_len; - /** - * Version of vmult() which only - * performs its actions on the - * region defined by - * [begin_row,end_row). This - * function is called by vmult() - * in the case of enabled - * multithreading. - */ - template - void threaded_vmult (OutVector& dst, - const InVector& src, - const unsigned int begin_row, - const unsigned int end_row) const; - - /** - * Version of - * matrix_norm_square() which - * only performs its actions on - * the region defined by - * [begin_row,end_row). This - * function is called by - * matrix_norm_square() in the - * case of enabled - * multithreading. - */ - template - void threaded_matrix_norm_square (const Vector &v, - const unsigned int begin_row, - const unsigned int end_row, - somenumber *partial_sum) const; - - /** - * Version of - * matrix_scalar_product() which - * only performs its actions on - * the region defined by - * [begin_row,end_row). This - * function is called by - * matrix_scalar_product() in the - * case of enabled - * multithreading. - */ - template - void threaded_matrix_scalar_product (const Vector &u, - const Vector &v, - const unsigned int begin_row, - const unsigned int end_row, - somenumber *partial_sum) const; - - /** - * Version of residual() which - * only performs its actions on - * the region defined by - * [begin_row,end_row) - * (these numbers are the - * components of - * interval). This - * function is called by - * residual() in the case of - * enabled multithreading. - */ - template - void threaded_residual (Vector &dst, - const Vector &u, - const Vector &b, - const std::pair interval, - somenumber *partial_norm) const; - // make all other sparse matrices // friends template friend class SparseMatrix; diff --git a/deal.II/lac/include/lac/sparse_matrix.templates.h b/deal.II/lac/include/lac/sparse_matrix.templates.h index 84ffb321b1..df34ec9546 100644 --- a/deal.II/lac/include/lac/sparse_matrix.templates.h +++ b/deal.II/lac/include/lac/sparse_matrix.templates.h @@ -2,7 +2,7 @@ // $Id$ // Version: $Name$ // -// Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2009 by the deal.II authors +// Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 by the deal.II authors // // This file is subject to QPL and may not be distributed // without copyright and license information. Please refer @@ -14,9 +14,11 @@ #define __deal2__sparse_matrix_templates_h -//TODO[WB]: the threaded functions can now be converted to return a value, rather than use an additional argument - +#include #include +#include +#include +#include #include #include #include @@ -35,12 +37,10 @@ #include #include #include - #include #include +#include -#include -#include DEAL_II_NAMESPACE_OPEN @@ -331,6 +331,59 @@ SparseMatrix::add (const number factor, } +namespace internal +{ + namespace SparseMatrix + { + /** + * Perform a vmult using the SparseMatrix + * data structures, but only using a + * subinterval for the row indices. + * + * In the sequential case, this function + * is called on all rows, in the parallel + * case it may be called on a subrange, + * at the discretion of the task + * scheduler. + */ + template + void vmult_on_subrange (const unsigned int begin_row, + const unsigned int end_row, + const number *values, + const std::size_t *rowstart, + const unsigned int *colnums, + const InVector &src, + OutVector &dst, + const bool add) + { + const number *val_ptr = &values[rowstart[begin_row]]; + const unsigned int *colnum_ptr = &colnums[rowstart[begin_row]]; + typename OutVector::iterator dst_ptr = dst.begin() + begin_row; + + if (add == false) + for (unsigned int row=begin_row; row template @@ -545,91 +598,18 @@ SparseMatrix::vmult (OutVector& dst, Assert(n() == src.size(), ExcDimensionMismatch(n(),src.size())); Assert (!PointerComparison::equal(&src, &dst), ExcSourceEqualsDestination()); - - const unsigned int n_rows = m(); - - // in MT mode: start new threads - // only if the matrix is - // sufficiently large. the limit - // is mostly artificial - if (DEAL_II_USE_MT && - (multithread_info.n_default_threads > 1) && - (n_rows/multithread_info.n_default_threads > 2000)) - { - const unsigned int n_threads = multithread_info.n_default_threads; - - // then spawn threads. since - // some compilers have trouble - // finding out which - // 'encapsulate' function to - // take of all those possible - // ones if we simply drop in - // the address of an overloaded - // template member function, - // make it simpler for the - // compiler by giving it the - // correct type right away: - typedef - void (SparseMatrix::*mem_fun_p) - (OutVector &, - const InVector &, - const unsigned int , - const unsigned int) const; - const mem_fun_p comp - = (&SparseMatrix:: - template threaded_vmult); - Threads::ThreadGroup<> threads; - for (unsigned int i=0; i > (*this, comp) (dst, src, - n_rows * i / n_threads, - n_rows * (i+1) / n_threads); - threads.join_all(); - return; - } - else - { - // if not in MT mode or size<2000 - // do it in an oldfashioned way - const number *val_ptr = &val[cols->rowstart[0]]; - const unsigned int *colnum_ptr = &cols->colnums[cols->rowstart[0]]; - typename OutVector::iterator dst_ptr = dst.begin(); - for (unsigned int row=0; rowrowstart[row+1]]; - while (val_ptr != val_end_of_row) - s += *val_ptr++ * src(*colnum_ptr++); - *dst_ptr++ = s; - }; - }; -} - - - -template -template -void -SparseMatrix::threaded_vmult (OutVector &dst, - const InVector &src, - const unsigned int begin_row, - const unsigned int end_row) const -{ - // this function should not be called - // when not in parallel mode. - Assert (DEAL_II_USE_MT, ExcInternalError()); - - const number *val_ptr = &val[cols->rowstart[begin_row]]; - const unsigned int *colnum_ptr = &cols->colnums[cols->rowstart[begin_row]]; - typename OutVector::iterator dst_ptr = dst.begin() + begin_row; - for (unsigned int row=begin_row; rowrowstart[row+1]]; - while (val_ptr != val_end_of_row) - s += *val_ptr++ * src(*colnum_ptr++); - *dst_ptr++ = s; - }; + parallel::apply_to_subranges (0U, m(), + std_cxx1x::bind (internal::SparseMatrix::vmult_on_subrange + , + _1, _2, + val, + cols->rowstart, + cols->colnums, + std_cxx1x::cref(src), + std_cxx1x::ref(dst), + false), + internal::SparseMatrix::minimum_parallel_grain_size); } @@ -674,18 +654,17 @@ SparseMatrix::vmult_add (OutVector& dst, Assert (!PointerComparison::equal(&src, &dst), ExcSourceEqualsDestination()); - const unsigned int n_rows = m(); - const number *val_ptr = &val[cols->rowstart[0]]; - const unsigned int *colnum_ptr = &cols->colnums[cols->rowstart[0]]; - typename OutVector::iterator dst_ptr = dst.begin(); - for (unsigned int row=0; rowrowstart[row+1]]; - while (val_ptr != val_end_of_row) - s += *val_ptr++ * src(*colnum_ptr++); - *dst_ptr++ = s; - }; + parallel::apply_to_subranges (0U, m(), + std_cxx1x::bind (internal::SparseMatrix::vmult_on_subrange + , + _1, _2, + val, + cols->rowstart, + cols->colnums, + std_cxx1x::cref(src), + std_cxx1x::ref(dst), + true), + internal::SparseMatrix::minimum_parallel_grain_size); } @@ -712,6 +691,45 @@ SparseMatrix::Tvmult_add (OutVector& dst, } +namespace internal +{ + namespace SparseMatrix + { + /** + * Perform a vmult using the SparseMatrix + * data structures, but only using a + * subinterval for the row indices. + * + * In the sequential case, this function + * is called on all rows, in the parallel + * case it may be called on a subrange, + * at the discretion of the task + * scheduler. + */ + template + number matrix_norm_sqr_on_subrange (const unsigned int begin_row, + const unsigned int end_row, + const number *values, + const std::size_t *rowstart, + const unsigned int *colnums, + const InVector &v) + { + number norm_sqr=0.; + + for (unsigned int i=begin_row; i::conjugate(s); + } + return norm_sqr; + } + } +} + + template template @@ -723,104 +741,56 @@ SparseMatrix::matrix_norm_square (const Vector& v) const Assert(m() == v.size(), ExcDimensionMismatch(m(),v.size())); Assert(n() == v.size(), ExcDimensionMismatch(n(),v.size())); - const unsigned int n_rows = m(); - - // if in MT mode and size sufficiently - // large: do it in parallel; the limit - // is mostly artificial - if (DEAL_II_USE_MT && - (multithread_info.n_default_threads > 1) && - (n_rows/multithread_info.n_default_threads > 2000)) - { - const unsigned int n_threads = multithread_info.n_default_threads; - - // space for the norms of - // the different parts - std::vector partial_sums (n_threads, 0); - // then spawn threads. since - // some compilers have trouble - // finding out which - // 'encapsulate' function to - // take of all those possible - // ones if we simply drop in - // the address of an overloaded - // template member function, - // make it simpler for the - // compiler by giving it the - // correct type right away: - typedef - void (SparseMatrix::*mem_fun_p) - (const Vector &, - const unsigned int , - const unsigned int , - somenumber *) const; - const mem_fun_p comp - = (&SparseMatrix:: - template threaded_matrix_norm_square); - Threads::ThreadGroup<> threads; - for (unsigned int i=0; i > (*this, comp)(v, - n_rows * i / n_threads, - n_rows * (i+1) / n_threads, - &partial_sums[i]); - - // ... and wait until they're finished - threads.join_all (); - // accumulate the partial results - return std::accumulate (partial_sums.begin(), - partial_sums.end(), - static_cast(0.)); - } - else - { - // if not in MT mode or the matrix is - // too small: do it one-by-one - somenumber sum = 0.; - const number *val_ptr = &val[cols->rowstart[0]]; - const unsigned int *colnum_ptr = &cols->colnums[cols->rowstart[0]]; - for (unsigned int row=0; rowrowstart[row+1]]; - while (val_ptr != val_end_of_row) - s += *val_ptr++ * v(*colnum_ptr++); - - sum += v(row) * numbers::NumberTraits::conjugate(s); - } - - return sum; - } + return + parallel::accumulate_from_subranges + (std_cxx1x::bind (internal::SparseMatrix::matrix_norm_sqr_on_subrange + >, + _1, _2, + val, cols->rowstart, cols->colnums, + std_cxx1x::cref(v)), + 0, m(), + internal::SparseMatrix::minimum_parallel_grain_size); } -template -template -void -SparseMatrix:: -threaded_matrix_norm_square (const Vector &v, - const unsigned int begin_row, - const unsigned int end_row, - somenumber *partial_sum) const +namespace internal { - // this function should not be called - // when not in parallel mode. - Assert (DEAL_II_USE_MT, ExcInternalError()); - - somenumber sum = 0.; - const number *val_ptr = &val[cols->rowstart[begin_row]]; - const unsigned int *colnum_ptr = &cols->colnums[cols->rowstart[begin_row]]; - for (unsigned int row=begin_row; row + number matrix_scalar_product_on_subrange (const unsigned int begin_row, + const unsigned int end_row, + const number *values, + const std::size_t *rowstart, + const unsigned int *colnums, + const InVector &u, + const InVector &v) { - somenumber s = 0.; - const number *val_end_of_row = &val[cols->rowstart[row+1]]; - while (val_ptr != val_end_of_row) - s += *val_ptr++ * v(*colnum_ptr++); - - sum += v(row) * numbers::NumberTraits::conjugate(s); - } + number norm_sqr=0.; - *partial_sum = sum; + for (unsigned int i=begin_row; i::conjugate(s); + } + return norm_sqr; + } + } } @@ -836,106 +806,16 @@ SparseMatrix::matrix_scalar_product (const Vector& u, Assert(m() == u.size(), ExcDimensionMismatch(m(),u.size())); Assert(n() == v.size(), ExcDimensionMismatch(n(),v.size())); - const unsigned int n_rows = m(); - - // if in MT mode and size sufficiently - // large: do it in parallel; the limit - // is mostly artificial - if (DEAL_II_USE_MT && - (multithread_info.n_default_threads != 1) && - (n_rows/multithread_info.n_default_threads > 2000)) - { - const unsigned int n_threads = multithread_info.n_default_threads; - - // space for the norms of - // the different parts - std::vector partial_sums (n_threads, 0); - // then spawn threads. since - // some compilers have trouble - // finding out which - // 'encapsulate' function to - // take of all those possible - // ones if we simply drop in - // the address of an overloaded - // template member function, - // make it simpler for the - // compiler by giving it the - // correct type right away: - typedef - void (SparseMatrix::*mem_fun_p) - (const Vector &, - const Vector &, - const unsigned int , - const unsigned int , - somenumber *) const; - const mem_fun_p comp - = (&SparseMatrix:: - template threaded_matrix_scalar_product); - Threads::ThreadGroup<> threads; - for (unsigned int i=0; i > (*this, comp)(u, v, - n_rows * i / n_threads, - n_rows * (i+1) / n_threads, - &partial_sums[i]); - - // ... and wait until they're finished - threads.join_all (); - // accumulate the partial results - return std::accumulate (partial_sums.begin(), - partial_sums.end(), - static_cast(0.)); - } - else - { - // if not in MT mode or the matrix is - // too small: do it one-by-one - somenumber sum = 0.; - const number *val_ptr = &val[cols->rowstart[0]]; - const unsigned int *colnum_ptr = &cols->colnums[cols->rowstart[0]]; - for (unsigned int row=0; rowrowstart[row+1]]; - while (val_ptr != val_end_of_row) - s += *val_ptr++ * v(*colnum_ptr++); - - sum += u(row) * numbers::NumberTraits::conjugate(s); - } - - return sum; - } -} - - - -template -template -void -SparseMatrix:: -threaded_matrix_scalar_product (const Vector &u, - const Vector &v, - const unsigned int begin_row, - const unsigned int end_row, - somenumber *partial_sum) const -{ - // this function should not be called - // when not in parallel mode. - Assert (DEAL_II_USE_MT, ExcInternalError()); - - somenumber sum = 0.; - const number *val_ptr = &val[cols->rowstart[begin_row]]; - const unsigned int *colnum_ptr = &cols->colnums[cols->rowstart[begin_row]]; - for (unsigned int row=begin_row; rowrowstart[row+1]]; - while (val_ptr != val_end_of_row) - s += *val_ptr++ * v(*colnum_ptr++); - - sum += u(row) * numbers::NumberTraits::conjugate(s); - } - - *partial_sum = sum; + return + parallel::accumulate_from_subranges + (std_cxx1x::bind (internal::SparseMatrix::matrix_scalar_product_on_subrange + >, + _1, _2, + val, cols->rowstart, cols->colnums, + std_cxx1x::cref(u), + std_cxx1x::cref(v)), + 0, m(), + internal::SparseMatrix::minimum_parallel_grain_size); } @@ -1276,6 +1156,49 @@ SparseMatrix::frobenius_norm () const +namespace internal +{ + namespace SparseMatrix + { + /** + * Perform a vmult using the SparseMatrix + * data structures, but only using a + * subinterval for the row indices. + * + * In the sequential case, this function + * is called on all rows, in the parallel + * case it may be called on a subrange, + * at the discretion of the task + * scheduler. + */ + template + number residual_sqr_on_subrange (const unsigned int begin_row, + const unsigned int end_row, + const number *values, + const std::size_t *rowstart, + const unsigned int *colnums, + const InVector &u, + const InVector &b, + OutVector &dst) + { + number norm_sqr=0.; + + for (unsigned int i=begin_row; i::conjugate(s); + } + return norm_sqr; + } + } +} + + template template somenumber @@ -1291,107 +1214,17 @@ SparseMatrix::residual (Vector &dst, Assert (&u != &dst, ExcSourceEqualsDestination()); - const unsigned int n_rows = m(); - - // if in MT mode and size sufficiently - // large: do it in parallel; the limit - // is mostly artificial - if (DEAL_II_USE_MT && - (multithread_info.n_default_threads > 1) && - (n_rows/multithread_info.n_default_threads > 2000)) - { - const unsigned int n_threads = multithread_info.n_default_threads; - - // space for the square norms of - // the different parts - std::vector partial_norms (n_threads, 0); - - // then spawn threads. since - // some compilers have trouble - // finding out which - // 'encapsulate' function to - // take of all those possible - // ones if we simply drop in - // the address of an overloaded - // template member function, - // make it simpler for the - // compiler by giving it the - // correct type right away: - typedef - void (SparseMatrix::*mem_fun_p) - (Vector &, - const Vector &, - const Vector &, - const std::pair, - somenumber *) const; - const mem_fun_p comp_residual = &SparseMatrix:: - template threaded_residual; - Threads::ThreadGroup<> threads; - for (unsigned int i=0; i > (*this, comp_residual)(dst, u, b, - std::pair - (n_rows * i / n_threads, - n_rows * (i+1) / n_threads), - &partial_norms[i]); - - // ... and wait until they're finished - threads.join_all (); - // accumulate the partial results - return std::sqrt(std::accumulate (partial_norms.begin(), - partial_norms.end(), - static_cast(0.))); - } - else - { - somenumber norm=0.; - - for (unsigned int i=0; irowstart[i]; jrowstart[i+1] ;j++) - { - const unsigned int p = cols->colnums[j]; - s -= val[j] * u(p); - } - dst(i) = s; - norm += dst(i)*dst(i); - } - return std::sqrt(norm); - }; -} - - -template -template -void -SparseMatrix::threaded_residual (Vector &dst, - const Vector &u, - const Vector &b, - const std::pair interval, - somenumber *partial_norm) const -{ - // this function should not be called - // when not in parallel mode. - Assert (DEAL_II_USE_MT, ExcInternalError()); - - const unsigned int begin_row = interval.first, - end_row = interval.second; - - somenumber norm=0.; - - for (unsigned int i=begin_row; irowstart[i]; jrowstart[i+1] ;j++) - { - const unsigned int p = cols->colnums[j]; - s -= val[j] * u(p); - } - dst(i) = s; - norm += dst(i)*dst(i); - }; - - *partial_norm = norm; + return + std::sqrt (parallel::accumulate_from_subranges + (std_cxx1x::bind (internal::SparseMatrix::residual_sqr_on_subrange + ,Vector >, + _1, _2, + val, cols->rowstart, cols->colnums, + std_cxx1x::cref(u), + std_cxx1x::cref(b), + std_cxx1x::ref(dst)), + 0, m(), + internal::SparseMatrix::minimum_parallel_grain_size)); } diff --git a/deal.II/lac/include/lac/sparse_vanka.templates.h b/deal.II/lac/include/lac/sparse_vanka.templates.h index 1a80a04be1..a0cd767ffa 100644 --- a/deal.II/lac/include/lac/sparse_vanka.templates.h +++ b/deal.II/lac/include/lac/sparse_vanka.templates.h @@ -2,7 +2,7 @@ // $Id$ // Version: $Name$ // -// Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007 by the deal.II authors +// Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 by the deal.II authors // // This file is subject to QPL and may not be distributed // without copyright and license information. Please refer @@ -118,8 +118,9 @@ SparseVanka::compute_inverses () // Now spawn the threads Threads::ThreadGroup<> threads; for (unsigned int i=0; i::vmult (Vector &dst, = &SparseVanka::template apply_preconditioner; Threads::ThreadGroup<> threads; for (unsigned int block=0; block >(*static_cast*>(this), comp) - (dst, src,&dof_masks[block]); + threads += Threads::new_thread (comp, + *static_cast*>(this), + dst, src,&dof_masks[block]); threads.join_all (); } else diff --git a/deal.II/lac/include/lac/swappable_vector.templates.h b/deal.II/lac/include/lac/swappable_vector.templates.h index fb5a85a076..87ed650c4a 100644 --- a/deal.II/lac/include/lac/swappable_vector.templates.h +++ b/deal.II/lac/include/lac/swappable_vector.templates.h @@ -2,7 +2,7 @@ // $Id$ // Version: $Name$ // -// Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005, 2006 by the deal.II authors +// Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2008 by the deal.II authors // // This file is subject to QPL and may not be distributed // without copyright and license information. Please refer @@ -168,7 +168,7 @@ void SwappableVector::alert () // data has not been preloaded so // far, so go on! For this, start // a detached thread - Threads::spawn (*this, &SwappableVector::reload_vector)(true); + Threads::new_thread (&SwappableVector::reload_vector, *this, true); // note that reload_vector also // releases the lock } diff --git a/deal.II/lac/include/lac/vector.h b/deal.II/lac/include/lac/vector.h index cc6aa2ddcc..f3ec986367 100644 --- a/deal.II/lac/include/lac/vector.h +++ b/deal.II/lac/include/lac/vector.h @@ -2,7 +2,7 @@ // $Id$ // Version: $Name$ // -// Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 by the deal.II authors +// Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 by the deal.II authors // // This file is subject to QPL and may not be distributed // without copyright and license information. Please refer @@ -51,6 +51,33 @@ template class BlockVector; template class VectorView; +namespace internal +{ + namespace Vector + { + /** + * If we do computations on vectors in + * parallel (say, we add two vectors to + * get a third, and we do the loop over + * all elements in parallel), then this + * variable determines the minimum number + * of elements for which it is profitable + * to split a range of elements any + * further to distribute to different + * threads. + * + * This variable is available as a global + * writable variable in order to allow + * the testsuite to also test the + * parallel case. By default, it is set + * to several thousand elements, which is + * a case that the testsuite would not + * normally encounter. + */ + extern unsigned int minimum_parallel_grain_size; + } +} + /*! @addtogroup Vectors @@ -117,6 +144,8 @@ class Vector : public Subscriptor */ typedef typename numbers::NumberTraits::real_type real_type; + public: + /** * @name 1: Basic Object-handling */ diff --git a/deal.II/lac/include/lac/vector.templates.h b/deal.II/lac/include/lac/vector.templates.h index 20555a62b9..5916637e57 100644 --- a/deal.II/lac/include/lac/vector.templates.h +++ b/deal.II/lac/include/lac/vector.templates.h @@ -2,7 +2,7 @@ // $Id$ // Version: $Name$ // -// Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007 by the deal.II authors +// Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 by the deal.II authors // // This file is subject to QPL and may not be distributed // without copyright and license information. Please refer @@ -16,6 +16,7 @@ #include #include +#include #include #include @@ -28,6 +29,8 @@ # include #endif +#include + #include #include #include @@ -411,8 +414,12 @@ Vector& Vector::operator -= (const Vector& v) Assert (vec_size!=0, ExcEmptyObject()); Assert (vec_size == v.vec_size, ExcDimensionMismatch(vec_size, v.vec_size)); - for (unsigned int i=0; i::add (const Number v) { Assert (vec_size!=0, ExcEmptyObject()); - for (unsigned int i=0; i::add (const Vector& v) Assert (vec_size!=0, ExcEmptyObject()); Assert (vec_size == v.vec_size, ExcDimensionMismatch(vec_size, v.vec_size)); - for (unsigned int i=0; i::add (const Number a, const Vector& v, Assert (vec_size == v.vec_size, ExcDimensionMismatch(vec_size, v.vec_size)); Assert (vec_size == w.vec_size, ExcDimensionMismatch(vec_size, w.vec_size)); - for (unsigned int i=0; i::sadd (const Number x, Assert (vec_size!=0, ExcEmptyObject()); Assert (vec_size == v.vec_size, ExcDimensionMismatch(vec_size, v.vec_size)); - for (unsigned int i=0; i::sadd (const Number x, const Number a, Assert (vec_size == v.vec_size, ExcDimensionMismatch(vec_size, v.vec_size)); Assert (vec_size == w.vec_size, ExcDimensionMismatch(vec_size, w.vec_size)); - for (unsigned int i=0; i DEAL_II_NAMESPACE_OPEN + +namespace internal +{ + namespace SparseMatrix + { + // set this value to 1/5 of the value of + // the minimum grain size of + // vectors. this rests on the fact that + // we have to do a lot more work per row + // of a matrix than per element of a + // vector. it could possibly be reduced + // even further but that doesn't appear + // worth it any more for anything but + // very small matrices that we don't care + // that much about anyway. + unsigned int minimum_parallel_grain_size = 200; + } +} + + #include "sparse_matrix.inst" + DEAL_II_NAMESPACE_CLOSE diff --git a/deal.II/lac/source/vector.cc b/deal.II/lac/source/vector.cc index 2834504ba6..f37e3c8b64 100644 --- a/deal.II/lac/source/vector.cc +++ b/deal.II/lac/source/vector.cc @@ -2,7 +2,7 @@ // $Id$ // Version: $Name$ // -// Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 by the deal.II authors +// Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 by the deal.II authors // // This file is subject to QPL and may not be distributed // without copyright and license information. Please refer @@ -16,6 +16,27 @@ DEAL_II_NAMESPACE_OPEN + +namespace internal +{ + namespace Vector + { + // set minimum grain size. this value is + // roughly in accordance with the curve + // in the TBB book (fig 3.2) that shows + // run time as a function of grain size + // -- there, values from 200 upward are + // so that the scheduling overhead + // amortizes well (for very large values + // in that example, the grain size is too + // large to split the work load into + // enough chunks and the problem becomes + // badly balanced) + unsigned int minimum_parallel_grain_size = 1000; + } +} + + #include "vector.inst" // do a few functions that currently don't fit the scheme because they have diff --git a/deal.II/lib/Makefile b/deal.II/lib/Makefile index 4a76106284..714f6ec82a 100644 --- a/deal.II/lib/Makefile +++ b/deal.II/lib/Makefile @@ -134,6 +134,9 @@ clean-base clean-lac clean-1d clean-2d clean-3d: clean-contrib: -rm contrib/*/*.$(OBJEXT) + -rm -r contrib/tbb/*debug + -rm -r contrib/tbb/*release + -rm libtbb* clean-bin: -rm bin/detached_ma27