From: kormann
Date: Sat, 5 May 2012 15:11:20 +0000 (+0000)
Subject: Merge branch with matrix-free computations. Until r25443.
X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=04d7ad07ed0f6f7b3848bc302a23b84435d50f46;p=dealii-svn.git
Merge branch with matrix-free computations. Until r25443.
git-svn-id: https://svn.dealii.org/trunk@25492 0785d39b-7218-0410-832d-ea1e28bc413d
---
diff --git a/deal.II/aclocal.m4 b/deal.II/aclocal.m4
index 0f115ea80f..c3a5571bd9 100644
--- a/deal.II/aclocal.m4
+++ b/deal.II/aclocal.m4
@@ -1216,6 +1216,16 @@ AC_DEFUN(DEAL_II_CHECK_CXX1X_COMPONENTS, dnl
[ AC_MSG_RESULT(no); all_cxx1x_classes_available=no ]
)
+ AC_MSG_CHECKING(for std::type_traits)
+ AC_TRY_COMPILE(
+ [#include ],
+ [ const bool m0 = std::is_trivial::value;
+ const bool m1 = std::is_standard_layout::value;
+ const bool m2 = std::is_pod::value; ],
+ [ AC_MSG_RESULT(yes) ],
+ [ AC_MSG_RESULT(no); all_cxx1x_classes_available=no ]
+ )
+
CXXFLAGS="${OLD_CXXFLAGS}"
dnl If the above classes and operations are all defined then we can
@@ -1999,12 +2009,12 @@ AC_DEFUN(DEAL_II_CHECK_CPU_OPTIMIZATIONS, dnl
AC_MSG_RESULT(x86 derivate ($withcpu))
case "$GXX_VERSION" in
gcc*)
- dnl Tune for this processor, but only in optimized mode
- dnl (to prevent the effects of possible compiler bugs to affect
- dnl both debug as well as optimized versions)
+ dnl Tune for this processor
+ CXXFLAGSG="$CXXFLAGSG -march=$withcpu"
CXXFLAGSO="$CXXFLAGSO -march=$withcpu"
dnl Also set the mode for f77 compiler
+ F77FLAGSG="$F77FLAGSG -march=$withcpu"
F77FLAGSO="$F77FLAGSO -march=$withcpu"
;;
esac
@@ -2014,18 +2024,19 @@ AC_DEFUN(DEAL_II_CHECK_CPU_OPTIMIZATIONS, dnl
AC_MSG_RESULT(native processor variant)
case "$GXX_VERSION" in
gcc*)
- dnl Tune for this processor, but only in optimized mode
- dnl (to prevent the effects of possible compiler bugs to affect
- dnl both debug as well as optimized versions)
+ dnl Tune for this processor
+ CXXFLAGSG="$CXXFLAGSG -march=native"
CXXFLAGSO="$CXXFLAGSO -march=native"
dnl Also set the mode for f77 compiler
+ F77FLAGSG="$F77FLAGSG -march=native"
F77FLAGSO="$F77FLAGSO -march=native"
;;
intel_icc*)
dnl Same, but for the icc compiler
CXXFLAGSO="$CXXFLAGSO -xhost"
+ CXXFLAGSG="$CXXFLAGSG -xhost"
;;
esac
;;
@@ -2038,6 +2049,155 @@ AC_DEFUN(DEAL_II_CHECK_CPU_OPTIMIZATIONS, dnl
+dnl -------------------------------------------------------------
+dnl Check whether the compiler allows for vectorization and that
+dnl vectorization actually works. For this test, we use compiler
+dnl intrinsics similar to what is used in the deal.II library and
+dnl check whether the arithmetic operations are correctly performed
+dnl on examples where all numbers are exactly represented as
+dnl floating point numbers.
+dnl
+dnl Usage: DEAL_II_COMPILER_VECTORIZATION_LEVEL
+dnl 0 means no vectorization, 1 support for SSE2, 2 support for AVX
+dnl
+dnl -------------------------------------------------------------
+AC_DEFUN(DEAL_II_DETECT_VECTORIZATION_LEVEL, dnl
+[
+ AC_LANG(C++)
+ CXXFLAGS="$CXXFLAGSG"
+ dnl SSE2 check in debug mode
+ AC_MSG_CHECKING(whether CPU supports SSE2)
+ AC_TRY_RUN(
+ [
+#include
+#include
+ int main()
+ {
+ __m128d a, b;
+ const unsigned int vector_bytes = sizeof(__m128d);
+ const int n_vectors = vector_bytes/sizeof(double);
+ __m128d * data =
+ reinterpret_cast<__m128d*>(_mm_malloc (2*vector_bytes, vector_bytes));
+ double * ptr = reinterpret_cast(&a);
+ ptr[0] = (volatile double)(1.0);
+ for (int i=1; i(&data[1]);
+ unsigned int return_value = 0;
+ if (ptr[0] != 7.3125)
+ return_value = 1;
+ for (int i=1; i
+#include
+ int main()
+ {
+ __m256d a, b;
+ const unsigned int vector_bytes = sizeof(__m256d);
+ const int n_vectors = vector_bytes/sizeof(double);
+ __m256d * data =
+ reinterpret_cast<__m256d*>(_mm_malloc (2*vector_bytes, vector_bytes));
+ double * ptr = reinterpret_cast(&a);
+ ptr[0] = (volatile double)(1.0);
+ for (int i=1; i(&data[1]);
+ unsigned int return_value = 0;
+ if (ptr[0] != 7.3125)
+ return_value = 1;
+ for (int i=1; i
+ ],
+ [
+ __m128d a, b;
+ a = _mm_set_sd (1.0);
+ b = _mm_set1_pd (2.1);
+ __m128d c = a + b;
+ __m128d d = b - c;
+ __m128d e = c * a + d;
+ __m128d f = e/a;
+ (void)f;
+ ],
+ [
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS, 1,
+ [Defined if the compiler can use arithmetic operations on
+ vectorized data types])
+ ],
+ [
+ AC_MSG_RESULT(no)
+ ])
+])
+
+
+
dnl -------------------------------------------------------------
dnl In some cases, -threads (or whatever else command line option)
dnl switches on some preprocessor flags. If this is not the case,
@@ -4953,106 +5113,6 @@ AC_DEFUN(DEAL_II_CHECK_ADVANCE_WARNING, dnl
-dnl -------------------------------------------------------------
-dnl Check whether the compiler allows to use arithmetic operations
-dnl +-*/ on vectorized data types or whether we need to use
-dnl _mm_add_pd for addition and so on. +-*/ is preferred because
-dnl it allows the compiler to choose other optimizations like
-dnl fused multiply add, whereas _mm_add_pd explicitly enforces the
-dnl assembler command.
-dnl
-dnl Usage: DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
-dnl
-dnl -------------------------------------------------------------
-AC_DEFUN(DEAL_II_CHECK_VECTOR_ARITHMETICS, dnl
-
-[
- AC_MSG_CHECKING(whether compiler supports vector arithmetics)
- AC_LANG(C++)
- CXXFLAGS="$CXXFLAGSG"
- AC_TRY_COMPILE(
- [
-#include
- ],
- [
- __m128d a, b;
- a = _mm_set_sd (1.0);
- b = _mm_set1_pd (2.1);
- __m128d c = a + b;
- __m128d d = b - c;
- __m128d e = c * a + d;
- __m128d f = e/a;
- (void)f;
- ],
- [
- AC_MSG_RESULT(yes)
- AC_DEFINE(DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS, 1,
- [Defined if the compiler can use arithmetic operations on
- vectorized data types])
- ],
- [
- AC_MSG_RESULT(no)
- ])
-])
-
-
-
-dnl -------------------------------------------------------------
-dnl Check for existence of a strong inline function. This can be used
-dnl to force a compiler to inline some functions also at low optimization
-dnl levels. We use it in vectorized data types, where we want inlining
-dnl also for debug code. If we cannot find a good inlining routine, we
-dnl just use 'inline'.
-dnl
-dnl Usage: DEAL_II_ALWAYS_INLINE
-dnl
-dnl -------------------------------------------------------------
-AC_DEFUN(DEAL_II_CHECK_ALWAYS_INLINE, dnl
-
-[
- if test "$GXX" = "yes" ; then
- dnl force inline for gcc compiler
- TEMP_ALWAYS_INLINE='__inline __attribute__((__always_inline__))'
- else
- case "$GXX_VERSION" in
- clang*)
- dnl force inline for clang compiler
- TEMP_ALWAYS_INLINE='__inline __attribute__((__always_inline__))'
- ;;
-
- *)
- dnl for all other compilers, try with __forceinline
- TEMP_ALWAYS_INLINE=__forceinline
- ;;
- esac
- fi
- AC_MSG_CHECKING(for forced inlining)
- AC_LANG(C++)
- CXXFLAGS="$CXXFLAGSG"
- AC_TRY_COMPILE(
- [
- $TEMP_ALWAYS_INLINE
- void f() {};
- ],
- [
- f();
- ],
- [
- AC_MSG_RESULT(yes)
- AC_DEFINE_UNQUOTED(DEAL_II_ALWAYS_INLINE, $TEMP_ALWAYS_INLINE,
- [Forces the compiler to always inline functions, also in
- debug mode])
- ],
- [
- AC_MSG_RESULT(no)
- AC_DEFINE(DEAL_II_ALWAYS_INLINE, inline,
- [Forces the compiler to always inline functions, also in
- debug mode])
- ])
-])
-
-
-
dnl -------------------------------------------------------------
dnl
dnl Usage: DEAL_II_CHECK_MIN_VECTOR_CAPACITY
@@ -7425,6 +7485,7 @@ AC_DEFUN(DEAL_II_WITH_LAPACK, dnl
AC_DEFINE([HAVE_LIBLAPACK], [1],
[Defined if deal.II was configured with LAPACK support])
AC_SUBST(DEAL_II_USE_LAPACK, "yes")
+ USE_CONTRIB_LAPACK='yes'
],
[AC_MSG_ERROR([LAPACK library $lapack not found])]
)
@@ -7531,6 +7592,7 @@ AC_DEFUN(DEAL_II_WITH_BLAS, dnl
],,$F77LIBS)
AC_SUBST(DEAL_II_USE_BLAS, "yes")
AC_SUBST(NEEDS_F77LIBS, "yes")
+ USE_CONTRIB_BLAS='yes'
else
DEAL_II_CHECK_BLAS_FRAMEWORK
if test "x$framework_works" != "xyes"; then
@@ -7544,6 +7606,7 @@ AC_DEFUN(DEAL_II_WITH_BLAS, dnl
AC_SUBST(DEAL_II_USE_BLAS, "yes")
AC_SUBST(NEEDS_F77LIBS, "yes")
+ USE_CONTRIB_BLAS='yes'
fi
fi
fi
diff --git a/deal.II/common/Make.global_options.in b/deal.II/common/Make.global_options.in
index e5bc799f72..cd3121065b 100644
--- a/deal.II/common/Make.global_options.in
+++ b/deal.II/common/Make.global_options.in
@@ -76,6 +76,9 @@ DEAL_II_TRILINOS_VERSION_MINOR = @DEAL_II_TRILINOS_VERSION_MINOR@
DEAL_II_TRILINOS_VERSION_SUBMINOR = @DEAL_II_TRILINOS_VERSION_SUBMINOR@
DEAL_II_TRILINOS_LIBPREFIX = @DEAL_II_TRILINOS_LIBPREFIX@
+USE_CONTRIB_BLAS = @USE_CONTRIB_BLAS@
+USE_CONTRIB_LAPACK = @USE_CONTRIB_LAPACK@
+
USE_CONTRIB_MUMPS = @USE_CONTRIB_MUMPS@
DEAL_II_MUMPS_DIR = @DEAL_II_MUMPS_DIR@
DEAL_II_SCALAPACK_DIR = @DEAL_II_SCALAPACK_DIR@
diff --git a/deal.II/configure b/deal.II/configure
index 407e5fbeb9..0d70c483b9 100755
--- a/deal.II/configure
+++ b/deal.II/configure
@@ -1,5 +1,5 @@
#! /bin/sh
-# From configure.in Revision: 25395 .
+# From configure.in Revision: 25397 .
# Guess values for system-dependent variables and create Makefiles.
# Generated by GNU Autoconf 2.68 for deal.II 7.2.pre.
#
@@ -625,6 +625,8 @@ USE_CONTRIB_METIS
TECPLOT_INCLUDE_DIR
USE_CONTRIB_HSL
HSL_INCLUDE_DIR
+USE_CONTRIB_LAPACK
+USE_CONTRIB_BLAS
NEEDS_F77LIBS
DEAL_II_USE_BLAS
DEAL_II_P4EST_DIR
@@ -4387,6 +4389,31 @@ else
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
$as_echo "no" >&6; }; all_cxx1x_classes_available=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for std::type_traits" >&5
+$as_echo_n "checking for std::type_traits... " >&6; }
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include
+int
+main ()
+{
+ const bool m0 = std::is_trivial::value;
+ const bool m1 = std::is_standard_layout::value;
+ const bool m2 = std::is_pod::value;
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_compile "$LINENO"; then :
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }; all_cxx1x_classes_available=no
+
fi
rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
@@ -5126,6 +5153,31 @@ else
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
$as_echo "no" >&6; }; all_cxx1x_classes_available=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for std::type_traits" >&5
+$as_echo_n "checking for std::type_traits... " >&6; }
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+#include
+int
+main ()
+{
+ const bool m0 = std::is_trivial::value;
+ const bool m1 = std::is_standard_layout::value;
+ const bool m2 = std::is_pod::value;
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_compile "$LINENO"; then :
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }; all_cxx1x_classes_available=no
+
fi
rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
@@ -8389,116 +8441,6 @@ rm -f core conftest.err conftest.$ac_objext \
conftest$ac_exeext conftest.$ac_ext
-
- { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports vector arithmetics" >&5
-$as_echo_n "checking whether compiler supports vector arithmetics... " >&6; }
- ac_ext=cpp
-ac_cpp='$CXXCPP $CPPFLAGS'
-ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
-ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
-ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
-
- CXXFLAGS="$CXXFLAGSG"
- cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h. */
-
-#include
-
-int
-main ()
-{
-
- __m128d a, b;
- a = _mm_set_sd (1.0);
- b = _mm_set1_pd (2.1);
- __m128d c = a + b;
- __m128d d = b - c;
- __m128d e = c * a + d;
- __m128d f = e/a;
- (void)f;
-
- ;
- return 0;
-}
-_ACEOF
-if ac_fn_cxx_try_compile "$LINENO"; then :
-
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
-$as_echo "yes" >&6; }
-
-$as_echo "#define DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS 1" >>confdefs.h
-
-
-else
-
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
-$as_echo "no" >&6; }
-
-fi
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-
-
-
- if test "$GXX" = "yes" ; then
- TEMP_ALWAYS_INLINE='__inline __attribute__((__always_inline__))'
- else
- case "$GXX_VERSION" in
- clang*)
- TEMP_ALWAYS_INLINE='__inline __attribute__((__always_inline__))'
- ;;
-
- *)
- TEMP_ALWAYS_INLINE=__forceinline
- ;;
- esac
- fi
- { $as_echo "$as_me:${as_lineno-$LINENO}: checking for forced inlining" >&5
-$as_echo_n "checking for forced inlining... " >&6; }
- ac_ext=cpp
-ac_cpp='$CXXCPP $CPPFLAGS'
-ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
-ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
-ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
-
- CXXFLAGS="$CXXFLAGSG"
- cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h. */
-
- $TEMP_ALWAYS_INLINE
- void f() {};
-
-int
-main ()
-{
-
- f();
-
- ;
- return 0;
-}
-_ACEOF
-if ac_fn_cxx_try_compile "$LINENO"; then :
-
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
-$as_echo "yes" >&6; }
-
-cat >>confdefs.h <<_ACEOF
-#define DEAL_II_ALWAYS_INLINE $TEMP_ALWAYS_INLINE
-_ACEOF
-
-
-else
-
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
-$as_echo "no" >&6; }
-
-$as_echo "#define DEAL_II_ALWAYS_INLINE inline" >>confdefs.h
-
-
-fi
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-
-
ac_ext=cpp
ac_cpp='$CXXCPP $CPPFLAGS'
ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
@@ -11964,6 +11906,7 @@ fi
NEEDS_F77LIBS="yes"
+ USE_CONTRIB_BLAS='yes'
else
if (echo $target | grep apple-darwin > /dev/null) ; then
@@ -12092,6 +12035,7 @@ fi
NEEDS_F77LIBS="yes"
+ USE_CONTRIB_BLAS='yes'
fi
fi
fi
@@ -12100,6 +12044,8 @@ fi
+
+
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for HSL subroutines" >&5
$as_echo_n "checking for HSL subroutines... " >&6; }
hsl_subroutines=""
@@ -13456,6 +13402,7 @@ $as_echo "#define HAVE_LIBLAPACK 1" >>confdefs.h
DEAL_II_USE_LAPACK="yes"
+ USE_CONTRIB_LAPACK='yes'
else
as_fn_error $? "LAPACK library $lapack not found" "$LINENO" 5
@@ -13614,9 +13561,11 @@ $as_echo "PowerPC64" >&6; }
$as_echo "x86 derivate ($withcpu)" >&6; }
case "$GXX_VERSION" in
gcc*)
- CXXFLAGSO="$CXXFLAGSO -march=$withcpu"
+ CXXFLAGSG="$CXXFLAGSG -march=$withcpu"
+ CXXFLAGSO="$CXXFLAGSO -march=$withcpu"
- F77FLAGSO="$F77FLAGSO -march=$withcpu"
+ F77FLAGSG="$F77FLAGSG -march=$withcpu"
+ F77FLAGSO="$F77FLAGSO -march=$withcpu"
;;
esac
;;
@@ -13626,13 +13575,16 @@ $as_echo "x86 derivate ($withcpu)" >&6; }
$as_echo "native processor variant" >&6; }
case "$GXX_VERSION" in
gcc*)
- CXXFLAGSO="$CXXFLAGSO -march=native"
+ CXXFLAGSG="$CXXFLAGSG -march=native"
+ CXXFLAGSO="$CXXFLAGSO -march=native"
- F77FLAGSO="$F77FLAGSO -march=native"
+ F77FLAGSG="$F77FLAGSG -march=native"
+ F77FLAGSO="$F77FLAGSO -march=native"
;;
intel_icc*)
CXXFLAGSO="$CXXFLAGSO -xhost"
+ CXXFLAGSG="$CXXFLAGSG -xhost"
;;
esac
;;
@@ -13709,6 +13661,181 @@ rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ ac_ext=cpp
+ac_cpp='$CXXCPP $CPPFLAGS'
+ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
+
+ CXXFLAGS="$CXXFLAGSG"
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether CPU supports SSE2" >&5
+$as_echo_n "checking whether CPU supports SSE2... " >&6; }
+ if test "$cross_compiling" = yes; then :
+ { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error $? "cannot run test program while cross compiling
+See \`config.log' for more details" "$LINENO" 5; }
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+#include
+#include
+ int main()
+ {
+ __m128d a, b;
+ const unsigned int vector_bytes = sizeof(__m128d);
+ const int n_vectors = vector_bytes/sizeof(double);
+ __m128d * data =
+ reinterpret_cast<__m128d*>(_mm_malloc (2*vector_bytes, vector_bytes));
+ double * ptr = reinterpret_cast(&a);
+ ptr[0] = (volatile double)(1.0);
+ for (int i=1; i(&data[1]);
+ unsigned int return_value = 0;
+ if (ptr[0] != 7.3125)
+ return_value = 1;
+ for (int i=1; i&5
+$as_echo "yes" >&6; }
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether CPU supports AVX" >&5
+$as_echo_n "checking whether CPU supports AVX... " >&6; }
+ if test "$cross_compiling" = yes; then :
+ { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error $? "cannot run test program while cross compiling
+See \`config.log' for more details" "$LINENO" 5; }
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+#include
+#include
+ int main()
+ {
+ __m256d a, b;
+ const unsigned int vector_bytes = sizeof(__m256d);
+ const int n_vectors = vector_bytes/sizeof(double);
+ __m256d * data =
+ reinterpret_cast<__m256d*>(_mm_malloc (2*vector_bytes, vector_bytes));
+ double * ptr = reinterpret_cast(&a);
+ ptr[0] = (volatile double)(1.0);
+ for (int i=1; i(&data[1]);
+ unsigned int return_value = 0;
+ if (ptr[0] != 7.3125)
+ return_value = 1;
+ for (int i=1; i&5
+$as_echo "yes" >&6; }
+
+$as_echo "#define DEAL_II_COMPILER_VECTORIZATION_LEVEL 2" >>confdefs.h
+
+
+else
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+
+$as_echo "#define DEAL_II_COMPILER_VECTORIZATION_LEVEL 1" >>confdefs.h
+
+
+fi
+rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \
+ conftest.$ac_objext conftest.beam conftest.$ac_ext
+fi
+
+
+else
+
+
+$as_echo "#define DEAL_II_COMPILER_VECTORIZATION_LEVEL 0" >>confdefs.h
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+
+fi
+rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \
+ conftest.$ac_objext conftest.beam conftest.$ac_ext
+fi
+
+
+
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports vector arithmetics" >&5
+$as_echo_n "checking whether compiler supports vector arithmetics... " >&6; }
+ ac_ext=cpp
+ac_cpp='$CXXCPP $CPPFLAGS'
+ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
+
+ CXXFLAGS="$CXXFLAGSG"
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+#include
+
+int
+main ()
+{
+
+ __m128d a, b;
+ a = _mm_set_sd (1.0);
+ b = _mm_set1_pd (2.1);
+ __m128d c = a + b;
+ __m128d d = b - c;
+ __m128d e = c * a + d;
+ __m128d f = e/a;
+ (void)f;
+
+ ;
+ return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_compile "$LINENO"; then :
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+
+$as_echo "#define DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS 1" >>confdefs.h
+
+
+else
+
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+
+
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: " >&5
$as_echo "" >&6; }
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: ---------------- configuring other programs -----------------" >&5
diff --git a/deal.II/configure.in b/deal.II/configure.in
index 0402549871..f2e3b1c4dd 100644
--- a/deal.II/configure.in
+++ b/deal.II/configure.in
@@ -245,8 +245,6 @@ DEAL_II_HAVE_BUILTIN_EXPECT
DEAL_II_HAVE_VERBOSE_TERMINATE
DEAL_II_HAVE_GLIBC_STACKTRACE
DEAL_II_HAVE_DEMANGLER
-DEAL_II_CHECK_VECTOR_ARITHMETICS
-DEAL_II_CHECK_ALWAYS_INLINE
DEAL_II_CHECK_MIN_VECTOR_CAPACITY
DEAL_II_CHECK_ABORT
DEAL_II_CHECK_GETRUSAGE
@@ -590,6 +588,8 @@ AC_ARG_WITH(blas,
is given, use -lblas. Default is to use -lblas only
if required by other libraries.],
DEAL_II_WITH_BLAS($withval))
+AC_SUBST(USE_CONTRIB_BLAS)
+AC_SUBST(USE_CONTRIB_LAPACK)
DEAL_II_CONFIGURE_HSL
AC_SUBST(USE_CONTRIB_HSL)
@@ -696,10 +696,15 @@ AC_MSG_RESULT(---------------- checking compiler flags --------------------)
dnl Possibly add some flags if optimizations are requested
DEAL_II_CHECK_CPU_OPTIMIZATIONS
-dnl Last check: test whether CXXFLAGS and F77FLAGS are ok
+dnl Test whether CXXFLAGS and F77FLAGS are ok
DEAL_II_CHECK_CXXFLAGS_CONSISTENCY
DEAL_II_CHECK_F77FLAGS_CONSISTENCY
+dnl Last check: check whether the compiler and host system allows for
+dnl vectorization
+DEAL_II_DETECT_VECTORIZATION_LEVEL
+DEAL_II_CHECK_VECTOR_ARITHMETICS
+
dnl -------------------------------------------------------------
dnl Third party programs
dnl -------------------------------------------------------------
diff --git a/deal.II/doc/doxygen/tutorial/navbar.html b/deal.II/doc/doxygen/tutorial/navbar.html
index daf4ff0109..7608b3ca95 100644
--- a/deal.II/doc/doxygen/tutorial/navbar.html
+++ b/deal.II/doc/doxygen/tutorial/navbar.html
@@ -79,17 +79,19 @@
35
36
- 38
+ 37
+ 38
39
- 40
- 41
+ 40
+ 41
43
- 44
- 45
+ 44
+ 45
46
+ 48
diff --git a/deal.II/doc/doxygen/tutorial/steps.cmapx b/deal.II/doc/doxygen/tutorial/steps.cmapx
index f000285fac..66bc3f06b4 100644
--- a/deal.II/doc/doxygen/tutorial/steps.cmapx
+++ b/deal.II/doc/doxygen/tutorial/steps.cmapx
@@ -1,45 +1,47 @@
diff --git a/deal.II/doc/doxygen/tutorial/steps.pl b/deal.II/doc/doxygen/tutorial/steps.pl
index b18b68ebb0..191cdb62e6 100644
--- a/deal.II/doc/doxygen/tutorial/steps.pl
+++ b/deal.II/doc/doxygen/tutorial/steps.pl
@@ -11,8 +11,8 @@ use strict;
my @steps = (1,2,3,4,5,6,7,8,9,
10,11,12,13,14,15,16,17,18,19,
20,21,22,23,24,25, 27,28,29,
- 30,31,32,33,34,35,36, 38,39,
- 40,41, 43,44,45,46);
+ 30,31,32,33,34,35,36,37,38,39,
+ 40,41, 43,44,45,46, 48);
# List of additional node attributes to highlight purpose and state of the example
diff --git a/deal.II/doc/doxygen/tutorial/steps.png b/deal.II/doc/doxygen/tutorial/steps.png
index 81836339f0..583cbaa611 100644
Binary files a/deal.II/doc/doxygen/tutorial/steps.png and b/deal.II/doc/doxygen/tutorial/steps.png differ
diff --git a/deal.II/doc/doxygen/tutorial/toc-list.html b/deal.II/doc/doxygen/tutorial/toc-list.html
index a3a8fc2b81..f00cd561a7 100644
--- a/deal.II/doc/doxygen/tutorial/toc-list.html
+++ b/deal.II/doc/doxygen/tutorial/toc-list.html
@@ -244,6 +244,12 @@
problem. The Schrödinger wave equation.
+
+ Step-37 |
+ Solving a Poisson problem with a multilevel preconditioner without
+ explicitly storing the matrix (a matrix-free method).
+ |
+
Step-38 |
Solving the Laplace-Beltrami equation on curved manifolds embedded
@@ -289,6 +295,13 @@
| Step-46 |
Coupling different kinds of equations in different parts of the domain.
|
+
+
+ Step-48 |
+ Explicit time stepping for the Sine–Gordon equation based on
+ a diagonal mass matrix. Efficient implementation of (nonlinear) finite
+ element operators.
+ |
diff --git a/deal.II/doc/doxygen/tutorial/toc-topics.html b/deal.II/doc/doxygen/tutorial/toc-topics.html
index f54f2a6cee..d0b0662b02 100644
--- a/deal.II/doc/doxygen/tutorial/toc-topics.html
+++ b/deal.II/doc/doxygen/tutorial/toc-topics.html
@@ -75,7 +75,8 @@
Step-9,
Step-28,
Step-32,
- Step-44
+ Step-44,
+ Step-48
Multithreading
|
@@ -265,6 +266,16 @@
+
+ |
+
+ Step-37,
+ Step-48
+ |
+ Matrix-free methods
+ |
+
+
|
@@ -294,6 +305,7 @@
Step-16,
Step-31,
Step-32,
+ Step-37,
Step-39,
Step-41,
Step-43
@@ -433,7 +445,8 @@
Step-23,
Step-24,
- Step-25
+ Step-25,
+ Step-48
|
The wave equation, in linear and nonlinear variants
|
@@ -552,7 +565,9 @@
|
Step-23,
Step-24,
- Step-25 |
+ Step-25,
+ Step-48
+
The wave equation, in linear and nonlinear variants
|
diff --git a/deal.II/examples/step-25/doc/results.dox b/deal.II/examples/step-25/doc/results.dox
index 4abd99cca9..e7e332e453 100644
--- a/deal.II/examples/step-25/doc/results.dox
+++ b/deal.II/examples/step-25/doc/results.dox
@@ -128,10 +128,16 @@ is defined as
contributions to the energy are positive, and so that decaying solutions have
finite energy on unbounded domains.)
-Beyond this, clearly, adaptivity (i.e. time-adaptive grids) would be of
-interest to problems like these. Their complexity leads us to leave this out
-of this program again, though the general comments in the introduction of @ref
-step_23 "step-23" remain true.
-
-
-
+Beyond this, there are two obvious areas:
+
+- Clearly, adaptivity (i.e. time-adaptive grids) would be of interest
+ to problems like these. Their complexity leads us to leave this out
+ of this program again, though the general comments in the
+ introduction of @ref step_23 "step-23" remain true.
+
+- Faster schemes to solve this problem. While computers today are
+ plenty fast enough to solve 2d and, frequently, even 3d stationary
+ problems within not too much time, time dependent problems present
+ an entirely different class of problems. We address this topic in
+ step-48 where we show how to solve this problem in parallel and
+ without assembling or inverting any matrix at all.
diff --git a/deal.II/examples/step-37/Makefile b/deal.II/examples/step-37/Makefile
index 0b41eb0b8d..15898a61c7 100644
--- a/deal.II/examples/step-37/Makefile
+++ b/deal.II/examples/step-37/Makefile
@@ -45,6 +45,21 @@ clean-up-files = *gmv *gnuplot *gpl *eps *pov *vtk *ucd *.d2
# settings
include $D/common/Make.global_options
+################################################################
+# This example program will only work if Trilinos is installed. If this
+# # is not the case, then simply redefine the main targets to do nothing
+ifneq ($(USE_CONTRIB_LAPACK),yes)
+default run clean:
+ @echo
+ @echo "==========================================================="
+ @echo "= This program can only be run with LAPACK enabled. ="
+ @echo "==========================================================="
+ @echo
+else
+#
+#################################################################
+
+
# Since the whole project consists of only one file, we need not
# consider difficult dependencies. We only have to declare the
@@ -141,4 +156,5 @@ Makefile.dep: $(target).cc Makefile \
# them:
include Makefile.dep
+endif # USE_CONTRIB_LAPACK
diff --git a/deal.II/examples/step-37/doc/intro.dox b/deal.II/examples/step-37/doc/intro.dox
index 9d488ece7f..54c3686564 100644
--- a/deal.II/examples/step-37/doc/intro.dox
+++ b/deal.II/examples/step-37/doc/intro.dox
@@ -4,62 +4,94 @@
This program was contributed by Katharina Kormann and Martin
Kronbichler.
-This program is currently under construction.
-
-The algorithm for the matrix-vector product is built upon the report "MPI
-parallelization of a cell-based matrix-vector product for finite elements. An
-application from quantum dynamics" by Katharina Kormann, Uppsala
-University, June 2009.
+The algorithm for the matrix-vector product is built upon the preprint "A generic interface for parallel cell-based finite element operator application" by Martin Kronbichler and Katharina Kormann, Uppsala
+University, October 2011, and the paper "Parallel finite element operator application: Graph partitioning and coloring" by Katharina Kormann and Martin Kronbichler in: Proceedings of the 7th IEEE International Conference on e-Science, 2011.
Introduction
-This example shows how to implement a matrix-free method, that is, a method
-that does not explicitly store the matrix elements, for a
-second-order Poisson equation with variable coefficients on a fairly
-unstructured mesh representing a circle.
+This example shows how to implement a matrix-free method, that is, a
+method that does not explicitly store the matrix elements, for a
+second-order Poisson equation with variable coefficients on a
+hypercube. The eliptic equation will be solved with a multigrid
+method.
+
+The major motivation for matrix-free methods is the fact that today
+access to main memory (i.e., for objects that don't fit in the cache)
+has become the bottleneck in scientific computing: To perform a
+matrix-vector product, modern CPUs spend far more time waiting for
+data to arrive from memory than on actually doing the floating point
+multiplications and additions. Thus, if we could substitute looking up
+matrix elements in memory by re-computing them — or rather, the
+operator represented by these entries —, we may win in terms of
+overall run-time (even if this requires a significant number of
+additional floating point operations). That said, to realize this with
+a trivial implementation is not enough and one needs to really look at
+what it takes to make this happen. This tutorial program (and the
+papers referenced above) show how one can implement such a scheme and
+demonstrates the speedup that can be obtained.
+
+
+The test case
+
+In this example, we consider the Poisson problem @f{eqnarray*} -
+\nabla \cdot a(\mathbf x) \nabla u &=& 1, \\ u &=& 0 \quad \text{on}\
+\partial \Omega @f} where $a(\mathbf x)$ is a variable coefficient.
+Below, we explain how to implement a matrix-vector product for this
+problem without explicitly forming the matrix. The construction can,
+of course, be done in a similar way for other equations as well.
+
+We choose as domain $\Omega=[0,1]^3$ and $a(\mathbf x)=\frac{1}{0.05 +
+2\|\mathbf x\|^2}$. Since the coefficient is symmetric around the
+origin but the domain is not, we will end up with a non-symmetric
+solution.
+
Matrix-vector product implementation
In order to find out how we can write a code that performs a matrix-vector
product, but does not need to store the matrix elements, let us start at
-looking how some finite-element related matrix A is assembled:
+looking how a finite element matrix A is assembled:
@f{eqnarray*}
-A = \sum_{\mathrm{cell}=1}^{\mathrm{n\_cells}} P_\mathrm{cell,{loc-glob}}^T A_\mathrm{cell}
-P_\mathrm{cell,{loc-glob}}.
+A = \sum_{\mathrm{cell}=1}^{\mathrm{n\_cells}}
+P_{\mathrm{cell,{loc-glob}}}^T A_{\mathrm{cell}} P_{\mathrm{cell,{loc-glob}}}.
@f}
In this formula, the matrix Pcell,loc-glob is a rectangular
matrix that defines the index mapping from local degrees of freedom in the
current cell to the global degrees of freedom. The information from which this
operator can be built is usually encoded in the local_dof_indices
-variable we have always used in the assembly of matrices.
+variable we have always used in the assembly of matrices. Moreover,
+Acell denotes the cell-operation associated with A.
If we are to perform a matrix-vector product, we can hence use that
@f{eqnarray*}
-y &=& A\cdot x = \left(\sum_{\text{cell}=1}^{\mathrm{n\_cells}} P_\mathrm{cell,{loc-glob}}^T
-A_\mathrm{cell} P_\mathrm{cell,{loc-glob}}\right) \cdot x
+y &=& A\cdot u = \left(\sum_{\text{cell}=1}^{\mathrm{n\_cells}} P_\mathrm{cell,{loc-glob}}^T
+A_\mathrm{cell} P_\mathrm{cell,{loc-glob}}\right) \cdot u
\\
&=& \sum_{\mathrm{cell}=1}^{\mathrm{n\_cells}} P_\mathrm{cell,{loc-glob}}^T
-A_\mathrm{cell} x_\mathrm{cell}
+A_\mathrm{cell} u_\mathrm{cell}
\\
&=& \sum_{\mathrm{cell}=1}^{\mathrm{n\_cells}} P_\mathrm{cell,{loc-glob}}^T
-y_\mathrm{cell},
+v_\mathrm{cell},
@f}
-where xcell are the values of x at the degrees of freedom
-of the respective cell, and xcell correspondingly for the result.
+where ucell are the values of u at the degrees of freedom
+of the respective cell, and
+vcell=Acellucell
+correspondingly for the result.
A naive attempt to implement the local action of the Laplacian would hence be
to use the following code:
@code
-MatrixFree::vmult (Vector &dst,
- const Vector &src) const
+MatrixFree::vmult (Vector &dst,
+ const Vector &src) const
{
dst = 0;
QGauss quadrature_formula(fe.degree+1);
FEValues fe_values (fe, quadrature_formula,
- update_gradients | update_JxW_values);
+ update_gradients | update_JxW_values|
+ update_quadrature_points);
const unsigned int dofs_per_cell = fe.dofs_per_cell;
const unsigned int n_q_points = quadrature_formula.size();
@@ -67,6 +99,8 @@ MatrixFree::vmult (Vector &dst,
FullMatrix cell_matrix (dofs_per_cell, dofs_per_cell);
Vector cell_src (dofs_per_cell),
cell_dst (dofs_per_cell);
+ const Coefficient coefficient;
+ std::vector coefficient_values(n_q_points);
std::vector local_dof_indices (dofs_per_cell);
@@ -77,13 +111,16 @@ MatrixFree::vmult (Vector &dst,
{
cell_matrix = 0;
fe_values.reinit (cell);
+ coefficient.value_list(fe_values.get_quadrature_points(),
+ coefficient_values);
- for (unsigned int q_point=0; q_pointget_dof_indices (local_dof_indices);
@@ -101,8 +138,9 @@ MatrixFree::vmult (Vector &dst,
Here we neglected boundary conditions as well as any hanging nodes we may
have, though neither would be very difficult to include using the
ConstraintMatrix class. Note how we first generate the local matrix in the
-usual way. To form the actual product as expressed in the above formula, we
-read in the values of src
of the cell-related degrees of freedom
+usual way as a sum over all quadrature points for each local matrix entry.
+To form the actual product as expressed in the above formula, we
+extract the values of src
of the cell-related degrees of freedom
(the action of Pcell,loc-glob), multiply by the local matrix
(the action of Acell), and finally add the result to the
destination vector dst
(the action of
@@ -110,8 +148,8 @@ destination vector dst
(the action of
is not more difficult than that, in principle.
While this code is completely correct, it is very slow. For every cell, we
-generate a local matrix, which takes three nested loops with as many
-elements as there are degrees of freedom on the actual cell to compute. The
+generate a local matrix, which takes three nested loops with loop length equal
+to the number of local degrees of freedom to compute. The
multiplication itself is then done by two nested loops, which means that it
is much cheaper.
@@ -125,110 +163,85 @@ element of Bcell is given by
fe_values.shape_grad(i,q)[d]
. The matrix consists of
dim*n_q_points
rows and @p dofs_per_cell columns). The matrix
Dcell is diagonal and contains the values
-fe_values.JxW(q)
(or, rather, @p dim copies of it).
-
-Every numerical analyst learns in one of her first classes that for
-forming a product of the form
-@f{eqnarray*}
-A_\mathrm{cell}\cdot x_\mathrm{cell} = B_\mathrm{cell} D_\mathrm{cell}
- B_\mathrm{cell}^T \cdot x_\mathrm{cell},
-@f}
-one should never form the matrix-matrix products, but rather multiply with the
-vector from right to left so that only three successive matrix-vector products
-are formed. To put this into code, we can write:
-@code
-...
- for (; cell!=endc; ++cell)
- {
- fe_values.reinit (cell);
-
- cell->get_dof_indices (local_dof_indices);
-
- for (unsigned int i=0; id is a not really a loop, rather two or three
-operations). What happens is as follows: We first transform the vector of
-values on the local dofs to a vector of gradients on the quadrature
-points. In the second loop, we multiply these gradients by the integration
-weight. The third loop applies the second gradient (in transposed form), so
-that we get back to a vector of (Laplacian) values on the cell dofs.
-
-This improves the situation a lot and reduced the complexity of the product
-from something like $\mathcal {O}(\mathrm{dofs\_per\_cell}^3)$ to $\mathcal
-{O}(\mathrm{dofs\_per\_cell}^2)$. In fact, all the remainder is just to make
-a slightly more clever use of data in order to gain some extra speed. It does
-not change the code structure, though.
-
-The bottleneck in the above code is the operations done by the call
-fe_values.reinit(cell)
, which take about as much time as the
+fe_values.JxW(q) * coefficient_values[q]
(or, rather, @p
+dim copies of each of these values). This kind of representation of
+finite element matrices can often be found in the engineering literature.
+
+When the cell-based matrix is applied to a vector @f{eqnarray*}
+A_\mathrm{cell}\cdot u_\mathrm{cell} = B_\mathrm{cell}^T
+D_\mathrm{cell} B_\mathrm{cell} \cdot u_\mathrm{cell}, @f} one would
+then never form the matrix-matrix products, but rather multiply with
+the vector from right to left so that only three successive
+matrix-vector products are formed. This removed the three nested
+loops in the calculation of the local matrix. What happens is as
+follows: We first transform the vector of values on the local dofs to
+a vector of gradients on the quadrature points. In the second loop, we
+multiply these gradients by the integration weight. The third loop
+applies the second gradient (in transposed form), so that we get back
+to a vector of (Laplacian) values on the cell dofs. This reduces the
+complexity of the work on one cell from something like $\mathcal
+{O}(\mathrm{dofs\_per\_cell}^3)$ to $\mathcal
+{O}(\mathrm{dofs\_per\_cell}^2)$.
+
+The bottleneck in the above code is the operations done by the call to
+FEValues::reinit for every cell
, which take about as much time as the
other steps together (at least if the mesh is unstructured; deal.II can
recognize that the gradients are often unchanged on structured meshes). That
is certainly not ideal and we would like to do better than this. What the
reinit function does is to calculate the gradient in real space by
transforming the gradient on the reference cell using the Jacobian of the
transformation from real to reference cell. This is done for each basis
-function on the cell, for each quadrature point. The Jacobian does not depend on
-the basis function, but it is different on different quadrature points in
-general. The trick is now to factor out the Jacobian transformation and first
-apply the operation that leads us to temp_vector
only with the
-gradient on the reference cell. That transforms the vector of values on the
-local dofs to a vector of gradients on the quadrature points. There, we first
-apply the Jacobian that we factored out from the gradient, then we apply the
-weights of the quadrature, and we apply with the transposed Jacobian for
-preparing the third loop which again uses the gradients on the unit cell.
+function on the cell, for each quadrature point. The Jacobian does not depend
+on the basis function, but it is different on different quadrature points in
+general. If you only build the matrix once as we've done in all
+previous tutorial programs, there is nothing one can do about the need
+to call FEValues::reinit on every cell since this transformation has
+to be done when we want to compute the local matrix elements.
+
+However, in a matrix-free implementation, we are not interested in
+applying the matrix only once. Rather, in iterative solvers, we need
+to expect that we have to apply the matrix many times, and so we can
+think about whether we may be able to cache something between
+different applications. On the other hand, we realize that we must not
+cache too much data since otherwise we get back to the situation where
+memory access becomes the dominating factor.
+
+The trick is now to factor out the Jacobian transformation and first
+apply the gradient on the reference cell only. That transforms the vector of
+values on the local dofs to a vector of gradients on the quadrature
+points. There, we first apply the Jacobian that we factored out from the
+gradient, then we apply the weights of the quadrature, and we apply the
+transposed Jacobian for preparing the third loop which again uses the
+gradients on the unit cell.
Let us again write this in terms of matrices. Let the matrix
Bcell denote the cell-related gradient matrix, with each row
-containing the values of the quadrature points. It is constructed by a
+containing the values on the quadrature points. It is constructed by a
matrix-matrix product as
@f{eqnarray*}
B_\mathrm{cell} = J_\mathrm{cell} B_\mathrm{ref\_cell},
@f}
where Bref_cell denotes the gradient on the reference cell
-and Jcell denotes the Jacobian
-transformation. Jcell is block-diagonal, and the blocks size
-is equal to the dimension of the problem. Each diagonal block is the Jacobian
-transformation that goes from the reference cell to the real cell.
+and Jcell denotes the Jacobian transformation from unit to
+real cell (in the language of transformations, the operation represented by
+Jcell represents a covariant
+transformation). Jcell is block-diagonal, and the blocks
+size is equal to the dimension of the problem. Each diagonal block is the
+Jacobian transformation that goes from the reference cell to the real cell.
Putting things together, we find that
@f{eqnarray*}
-A_\mathrm{cell} = B_\mathrm{cell}^T D B_\mathrm{cell}
- = B_\mathrm{ref\_cell}^T J_\mathrm{cell}^T
- D_\mathrm{cell}
+A_\mathrm{cell} = B_\mathrm{cell}^T D B_\mathrm{cell}
+ = B_\mathrm{ref\_cell}^T J_\mathrm{cell}^T
+ D_\mathrm{cell}
J_\mathrm{cell} B_\mathrm{ref\_cell},
@f}
so we calculate the product (starting the local product from the right)
@f{eqnarray*}
-y_\mathrm{cell} = B_\mathrm{ref\_cell}^T J_\mathrm{cell}^T D J_\mathrm{cell}
-B_\mathrm{ref\_cell} x_\mathrm{cell}, \quad
-y = \sum_{\mathrm{cell}=1}^{\mathrm{n\_cells}} P_\mathrm{cell,{loc-glob}}^T
-y_\mathrm{cell}.
+v_\mathrm{cell} = B_\mathrm{ref\_cell}^T J_\mathrm{cell}^T D J_\mathrm{cell}
+B_\mathrm{ref\_cell} u_\mathrm{cell}, \quad
+v = \sum_{\mathrm{cell}=1}^{\mathrm{n\_cells}} P_\mathrm{cell,{loc-glob}}^T
+v_\mathrm{cell}.
@f}
@code
...
@@ -239,11 +252,14 @@ y_\mathrm{cell}.
fe_values_reference.reinit (reference_cell.begin());
FEValues fe_values (fe, quadrature_formula,
- update_inverse_jacobians | update_JxW_values);
+ update_inverse_jacobians | update_JxW_values |
+ update_quadrature_points);
for (; cell!=endc; ++cell)
{
fe_values.reinit (cell);
+ coefficient.value_list(fe_values.get_quadrature_points(),
+ coefficient_values);
cell->get_dof_indices (local_dof_indices);
@@ -251,51 +267,50 @@ y_\mathrm{cell}.
cell_src(i) = src(local_dof_indices(i));
temp_vector = 0;
- for (unsigned int q_point=0; q_point temp;
for (unsigned int d=0; ddim as many
-columns than before (here we assume that the number of quadrature points is
-the same as the number of degrees of freedom per cell, which is usual for
-scalar problems). Then, we also need to keep in mind that we touch some
-degrees of freedom several times because they belong to several cells. This
-also increases computational costs. A realistic value compared to a sparse
-matrix is that we now have to perform about 10 times as many operations (a bit
-less in 2D, a bit more in 3D).
-
-The above is, in essence, what happens in the code below and if you have
-difficulties in understanding the implementation, you should try to first
-understand what happens in the code above. In the actual implementation
-there are a few more points done to be even more efficient, namely:
-
- - We pre-compute the inverse of the Jacobian of the transformation and
- store it in an extra array. This allows us to fuse the three
- operations JcellT Dcell
- Jcell (apply Jacobian, multiply by
- weights, apply transposed
- Jacobian) into one second-rank tensor that is also symmetric (so we
- only need to store half the tensor).
-
- We work on several cells at once when we apply the gradients of the
- unit cell (it is always the same matrix with the reference cell
- data). This allows us to replace the matrix-vector product by a
- matrix-matrix product (several vectors of cell-data form a matrix),
- which enables a faster implementation. Obviously, we need some adapted
- data structures for that, but it isn't too hard to provide that. What
- is nice is that dense matrix-matrix products are close to today's
- processors' peak performance if the matrices are neither too small nor
- too large — and these operations are the most expensive part in
- the implementation shown here.
-
+derivative data is then applied by the inverse, transposed Jacobians (deal.II
+calls the Jacobian matrix from unit to real cell inverse_jacobian, because the
+transformation direction in deal.II is from real to unit cell).
+
+Finally, we are using tensor product basis functions and now that we have
+separated out the gradient on the reference cell Bref_cell,
+we can exploit the tensor-product structure to further reduce the
+complexity. We illustrate this in two space dimensions, but the same technique
+can be used in higher dimensions. On the reference cell, the basis functions
+are of the tensor product form $\phi(x,y,z) = \varphi_i(x) \varphi_j(y)$. The
+part of the matrix Bref_cell that computes the first
+component has the form $B_\mathrm{sub\_cell}^x = B_\mathrm{grad,x} \otimes
+B_\mathrm{val,y}$, where Bgrad,x and
+Bval,y contain the evaluation of all the 1D basis functions
+on all the 1D quadrature points. Forming a matrix U with U(j,i)
+containing the coefficient belonging to basis function $\varphi_i(x)
+\varphi_j(y)$, we get $(B_\mathrm{grad,x} \otimes
+B_\mathrm{val,y})u_\mathrm{cell} = B_\mathrm{val,y} U B_\mathrm{grad,x}$. This
+reduces the complexity for computing this product from $p^4$ to $2 p^3$, where
+p-1 is the degree of the finite element (i.e., equivalently,
+p is the number of shape functions in each coordinate
+direction), or $p^{2d}$ to $d p^{d+1}$ in general.
+
The implementation of the matrix-free matrix-vector product shown in this
tutorial is slower than a matrix-vector product using a sparse matrix for
-linear and quadratic elements, but on par with third order elements and faster
-for even higher order elements. An additional gain with this implementation is
-that we do not have to build the sparse matrix itself, which can also be quite
-expensive depending on the underlying differential equation.
+linear elements, but faster for all higher order elements thanks to the
+reduced complexity due to the tensor product structure and due to less memory
+transfer during computations. The impact of reduced memory transfer is
+particularly beneficial when working on a multi-core processor where several
+processing units share access to memory. In that case, an algorithm which is
+computation bound will show almost perfect parallel speedup, whereas an
+algorithm that is bound by memory transfer might not achieve similar speedup
+(even when the work is perfectly parallel and one could expect perfect scaling
+like in sparse matrix-vector products). An additional gain with this
+implementation is that we do not have to build the sparse matrix itself, which
+can also be quite expensive depending on the underlying differential
+equation. Moreover, the above framework is simple to generalize to nonlinear
+operations, as we demonstrate in step-48.
Combination with multigrid
Above, we have gone to significant lengths to implement a matrix-vector
product that does not actually store the matrix elements. In many user codes,
-however, one wants more than just performing some uncertain number of
+however, one wants more than just performing some number of
matrix-vector products — one wants to do as little of these operations
as possible when solving linear equation systems. In theory, we could use the
CG method without preconditioning; however, that would not be very
efficient. Rather, one uses preconditioners for improving speed. On the other
hand, most of the more frequently used preconditioners such as SSOR, ILU or
-algebraic multigrid (AMG) can now no longer be used here because their
+algebraic multigrid (AMG) cannot be used here because their
implementation requires knowledge of the elements of the system matrix.
-One solution is to use multigrid methods as shown in
-step-16. They are known to be very fast, and they are suitable for our
-purpose since they can be designed based purely on matrix-vector products. All
-one needs to do is to find a smoother that works with matrix-vector products
-only (our choice requires knowledge of the diagonal entries of the matrix,
-though). One such candidate would be a damped Jacobi iteration, but that is
-often not sufficiently good in damping high-frequency errors.
-A Chebyshev preconditioner, eventually, is what we use here. It can be
-seen as an extension of the Jacobi method by using Chebyshev polynomials. With
-degree zero, the Jacobi method with optimal damping parameter is retrieved,
-whereas higher order corrections improve the smoothing properties if some
-parameters are suitably chosen. The effectiveness of Chebyshev smoothing in
-multigrid has been demonstrated, e.g., in the article M. Adams, M. Brezina,
-J. Hu, R. Tuminaro. Parallel multigrid smoothers: polynomial versus
-Gauss–Seidel, J. Comput. Phys. 188:593–610, 2003. This
-publication also identifies one more advantage of Chebyshev smoothers that we
-exploit here, namely that they are easy to parallelize, whereas
-SOR/Gauss–Seidel smoothing relies on substitutions, which can often only
-be parallelized by working on diagonal sub-blocks of the matrix, which
-decreases efficiency.
-
-The implementation into the multigrid framework is then straightforward. This
+One solution is to use multigrid methods as shown in step-16. They are known
+to be very fast, and they are suitable for our purpose since they can be
+designed based purely on matrix-vector products. All one needs to do is to
+find a smoother that works with matrix-vector products only (our choice
+requires knowledge of the diagonal entries of the matrix, though). One such
+candidate would be a damped Jacobi iteration, but that is often not
+sufficiently good in damping high-frequency errors. A Chebyshev
+preconditioner, eventually, is what we use here. It can be seen as an
+extension of the Jacobi method by using Chebyshev polynomials. With degree
+zero, the Jacobi method with optimal damping parameter is retrieved, whereas
+higher order corrections improve the smoothing properties if some parameters
+are suitably chosen. The effectiveness of Chebyshev smoothing in multigrid has
+been demonstrated, e.g., in the article
+M. Adams, M. Brezina, J. Hu, R. Tuminaro. Parallel multigrid smoothers:
+polynomial versus Gauss–Seidel, J. Comput. Phys. 188:593–610,
+2003 . This publication also identifies one more advantage of
+Chebyshev smoothers that we exploit here, namely that they are easy to
+parallelize, whereas SOR/Gauss–Seidel smoothing relies on substitutions,
+for which a naive parallelization works on diagonal sub-blocks of the matrix,
+thereby decreases efficiency (for more detail see e.g. Y. Saad,
+Iterative Methods for Sparse Linear Systems, SIAM, 2nd edition, 2003, chapters
+11 & 12).
+
+The implementation into the multigrid framework is then straightforward. The multigrid implementation in this
program is based on an earlier version of step-16 that demonstrated multigrid
-on uniformly refined grids. However, the present matrix-free techniques would
-obviously also apply to the adaptive meshes the current step-16 uses.
+on uniformly refined grids.
-The test case
-
-In order to demonstrate the capabilities of the method, we work on a rather
-general Poisson problem, based on a more or less unstructured mesh (where
-the Jacobians are different from cell to cell), higher order mappings to a
-curved boundary, and a non-constant coefficient in the equation. If we
-worked on a constant-coefficient case with structured mesh, we could
-decrease the operation count by a factor of 4 in 2D and 6 in 3D by building
-a local matrix (which is then the same for all cells), and doing the
-products as in the first developing step of the above code pieces.
diff --git a/deal.II/examples/step-37/doc/results.dox b/deal.II/examples/step-37/doc/results.dox
index 6e00f0b1a1..8c269d575b 100644
--- a/deal.II/examples/step-37/doc/results.dox
+++ b/deal.II/examples/step-37/doc/results.dox
@@ -3,309 +3,236 @@
Program output
Since this example solves the same problem as step-5 (except for
-a different coefficient), we refer to the graphical output there. Here, we
-evaluate some aspects of the multigrid solver.
+a different coefficient), there is little to say about the
+solution. We show a picture anyway, illustrating the size of the
+solution through both isocontours and volume rendering:
+@image html "step-37.solution.png"
+
+Of more interest is to evaluate some aspects of the multigrid solver.
When we run this program in 2D for quadratic ($Q_2$) elements, we get the
following output:
@code
Cycle 0
-Number of degrees of freedom: 337
-System matrix memory consumption: 0.02573 MiB.
-Multigrid objects memory consumption: 0.05083 MiB.
-Convergence in 10 CG iterations.
+Number of degrees of freedom: 81
+System matrix memory consumption: 0.008982 MB.
+Multigrid objects memory consumption: 0.02617 MB.
+Total setup time (wall) 0.001811s
+Time solve (5 iterations) (CPU/wall) 0s/0.0002651s
Cycle 1
-Number of degrees of freedom: 1313
-System matrix memory consumption: 0.09257 MiB.
-Multigrid objects memory consumption: 0.1794 MiB.
-Convergence in 10 CG iterations.
+Number of degrees of freedom: 289
+System matrix memory consumption: 0.01817 MB.
+Multigrid objects memory consumption: 0.05779 MB.
+Total setup time (wall) 0.001223s
+Time solve (5 iterations) (CPU/wall) 0s/0.000926s
Cycle 2
-Number of degrees of freedom: 5185
-System matrix memory consumption: 0.3553 MiB.
-Multigrid objects memory consumption: 0.6779 MiB.
-Convergence in 10 CG iterations.
+Number of degrees of freedom: 1089
+System matrix memory consumption: 0.05286 MB.
+Multigrid objects memory consumption: 0.1581 MB.
+Total setup time (wall) 0.003045s
+Time solve (6 iterations) (CPU/wall) 0.012s/0.003393s
Cycle 3
-Number of degrees of freedom: 20609
-System matrix memory consumption: 1.397 MiB.
-Multigrid objects memory consumption: 2.645 MiB.
-Convergence in 10 CG iterations.
+Number of degrees of freedom: 4225
+System matrix memory consumption: 0.1957 MB.
+Multigrid objects memory consumption: 0.5228 MB.
+Total setup time (wall) 0.008561s
+Time solve (6 iterations) (CPU/wall) 0.02s/0.01133s
Cycle 4
-Number of degrees of freedom: 82177
-System matrix memory consumption: 5.546 MiB.
-Multigrid objects memory consumption: 10.46 MiB.
-Convergence in 10 CG iterations.
+Number of degrees of freedom: 16641
+System matrix memory consumption: 0.7343 MB.
+Multigrid objects memory consumption: 1.925 MB.
+Total setup time (wall) 0.02938s
+Time solve (6 iterations) (CPU/wall) 0.068s/0.03312s
Cycle 5
-Number of degrees of freedom: 328193
-System matrix memory consumption: 22.11 MiB.
-Multigrid objects memory consumption: 41.65 MiB.
-Convergence in 10 CG iterations.
+Number of degrees of freedom: 66049
+System matrix memory consumption: 2.856 MB.
+Multigrid objects memory consumption: 7.435 MB.
+Total setup time (wall) 0.1128s
+Time solve (6 iterations) (CPU/wall) 0.228s/0.09577s
+
+Cycle 6
+Number of degrees of freedom: 263169
+System matrix memory consumption: 11.28 MB.
+Multigrid objects memory consumption: 29.3 MB.
+Total setup time (wall) 0.4553s
+Time solve (6 iterations) (CPU/wall) 1.272s/0.3955s
@endcode
As in step-16, we see that the number of CG iterations remains constant with
increasing number of degrees of freedom. We can also see that the various
objects we have to store for the multigrid method on the individual levels of
-our mesh together make up about twice as much as the matrix on the finest
-level.
+our mesh together make up more than twice as much as the matrix on the finest
+level. For the present example, about half the memory consumption of the
+multigrid objects are the level transfer matrices, and the other half is
+consumed by the matrix-free objects (and there, mainly the indices and the
+variable coefficient).
-Not much changes if we run the
-program in three spatial dimensions, with the exception that the multilevel
-objects now take up comparatively less space (because in 3d, each level has
-only one eighth the number of cells of the next finer one, whereas in 2d this
-factor if one quarter):
+Not much changes if we run the program in three spatial dimensions, with the
+exception that the multilevel objects now take up some more memory (because
+the level transfer matrices are denser) and the computing times are somewhat
+larger:
@code
Cycle 0
-Number of degrees of freedom: 517
-System matrix memory consumption: 0.1001 MiB.
-Multigrid objects memory consumption: 0.1463 MiB.
-Convergence in 9 CG iterations.
+Number of degrees of freedom: 125
+System matrix memory consumption: 0.01093 MB.
+Multigrid objects memory consumption: 0.03094 MB.
+Total setup time (wall) 0.002481s
+Time solve (5 iterations) (CPU/wall) 0s/0.000334s
Cycle 1
-Number of degrees of freedom: 3817
-System matrix memory consumption: 0.6613 MiB.
-Multigrid objects memory consumption: 0.8896 MiB.
-Convergence in 10 CG iterations.
+Number of degrees of freedom: 729
+System matrix memory consumption: 0.04105 MB.
+Multigrid objects memory consumption: 0.1274 MB.
+Total setup time (wall) 0.004471s
+Time solve (5 iterations) (CPU/wall) 0.004s/0.001979s
Cycle 2
-Number of degrees of freedom: 29521
-System matrix memory consumption: 5.1 MiB.
-Multigrid objects memory consumption: 6.653 MiB.
-Convergence in 10 CG iterations.
+Number of degrees of freedom: 4913
+System matrix memory consumption: 0.2821 MB.
+Multigrid objects memory consumption: 0.8048 MB.
+Total setup time (wall) 0.01651s
+Time solve (4 iterations) (CPU/wall) 0.036s/0.01295s
Cycle 3
-Number of degrees of freedom: 232609
-System matrix memory consumption: 40.4 MiB.
-Multigrid objects memory consumption: 52.24 MiB.
-Convergence in 11 CG iterations.
+Number of degrees of freedom: 35937
+System matrix memory consumption: 1.948 MB.
+Multigrid objects memory consumption: 5.734 MB.
+Total setup time (wall) 0.1072s
+Time solve (5 iterations) (CPU/wall) 0.16s/0.0709s
Cycle 4
-Number of degrees of freedom: 1847617
-System matrix memory consumption: 322 MiB.
-Multigrid objects memory consumption: 415.1 MiB.
-Convergence in 11 CG iterations.
+Number of degrees of freedom: 274625
+System matrix memory consumption: 14.49 MB.
+Multigrid objects memory consumption: 44.41 MB.
+Total setup time (wall) 0.8173s
+Time solve (5 iterations) (CPU/wall) 1.52s/0.5093s
+
+Cycle 5
+Number of degrees of freedom: 2146689
+System matrix memory consumption: 115.9 MB.
+Multigrid objects memory consumption: 342.6 MB.
+Total setup time (wall) 6.387s
+Time solve (5 iterations) (CPU/wall) 12.45s/3.767s
@endcode
Comparison with a sparse matrix
-In order to understand the capabilities of this class, we compare the memory
-consumption and execution (wallclock) time for assembly and 50 matrix-vector
-products (MV) on a 3D problem with one million unknowns the classical
-sparse matrix implementation (SpM) and the MatrixFree implementation shown
-here (M-F). Both matrices are based on @p double %numbers. The program is run
-on a 2.8 GHz Opteron processor with the ACML
-BLAS. We present results running on one core core and four cores,
-respectively. Moreover, we measure the time it takes to construct the
-individual matrices and filling them with data (@p setup and @p assemble
-functions). The sparse matrix is initialized using a
-CompressedSimpleSparsityPattern for calling the
-DoFTools::make_sparsity_pattern function, and then copied to a SparsityPattern
-object. The boundary nodes are eliminated using the ConstraintMatrix class, so
-that only elements that are actually nonzero are stored in the matrix.
+In order to understand the capabilities of the matrix-free implementation, we
+compare the performance on the 3d example above with a SparseMatrix
+implementation and we measure the computation times for both initialization of
+the problem (distribute DoFs, setup and assemble matrices, setup multigrid
+structures) and the actual solution for the matrix-free variant and the
+variant based on sparse matrices. We base the preconditioner on float
+numbers and the actual matrix and vectors on double numbers, as shown
+above. Tests are run on an Intel Core i7-2620M notebook processor (two cores
+and AVX
+support, i.e., four operations on doubles can be done with one CPU
+instruction, which is heavily used in FEEvaluation) and optimized mode. The
+example makes use of multithreading, so both cores are actually used.
|
- Memory consumption |
- Time assembly |
- Time 50 MV, 1 CPU |
- Time 50 MV, 4 CPUs |
+ Sparse matrix |
+ Matrix-free implementation |
- element order |
- SpM |
- M-F |
- SpM |
- M-F |
- SpM |
- M-F |
- SpM |
- M-F |
+ n_dofs |
+ Setup + assemble |
+ Solve |
+ Setup + assemble |
+ Solve |
- 1 |
- 299 MiB |
- 394 MiB |
- 8.09 s |
- 3.43 s |
- 5.50 s |
- 22.4 s |
- 4.30 s |
- 11.0 s |
+ 125 |
+ 0.0048s |
+ 0.00075s |
+ 0.0025s |
+ 0.00033s |
- 2 |
- 698 MiB |
- 177 MiB |
- 12.43 s |
- 1.32 s |
- 12.0 s |
- 18.6 s |
- 9.10 s |
- 6.31 s |
+ 729 |
+ 0.014s |
+ 0.0022s |
+ 0.0026s |
+ 0.0018s |
- 3 |
- 1295 MiB |
- 124 MiB |
- 41.1 s |
- 1.31 s |
- 21.2 s |
- 23.7 s |
- 16.0 s |
- 7.43 s |
+ 4,913 |
+ 0.10s |
+ 0.012s |
+ 0.017s |
+ 0.013s |
- 4 |
- 2282 MiB |
- 107 MiB |
- 117 s |
- 1.97 s |
- 40.8 s |
- 36.3 s |
- 19.7 s |
- 10.9 s |
+ 35,937 |
+ 0.80s |
+ 0.14s |
+ 0.11s |
+ 0.071s |
- 5 |
- 3597 MiB |
- 96.4 MiB |
- 510 s |
- 5.52 s |
- 75.7 s |
- 53.9 s |
- 29.3 s |
- 15.7 s |
+ 274,625 |
+ 5.93s |
+ 1.05s |
+ 0.82s |
+ 0.51s |
- 6 |
- 5679 MiB |
- 96.3 MiB |
- 2389 s |
- 26.1 s |
- 135 s |
- 79.1 s |
- 45.8 s |
- 24.3 s |
+ 2,146,689 |
+ 46.7s |
+ 8.44s |
+ 6.39s |
+ 3.77s |
-There are a few interesting things with the %numbers in this table.
-
-Firstly, we see the disappointing fact that for linear elements the
-MatrixFree class does actually consume more memory than a SparseMatrix with
-its SparsityPattern, despite the efforts made in this program. As mentioned
-earlier, this is mostly because the Transformation data is stored for every
-quadrature point. For each quadrature point, the transformation consists of
-six doubles, and there are about eight times as many quadrature points as
-there are degrees of freedom. In first approximation, this means that the
-matrix consumes 384 (= @p sizeof(double) * 6 * 8) bytes for each degree of
-freedom. On the other hand, the sparse matrix has a bandwidth of 27 or less,
-so each dof gives rise to at most 324 (= 27 * 12) bytes. A more clever
-implementation would try to compress the Jacobian transformation data, by
-exploiting similarities between the mappings within the cells, as well as from
-one cell to the next. This could dramatically reduce the memory requirements,
-and hence, increase the speed for lower-order implementations.
-
-Secondly, we observe that the memory requirements for a SparseMatrix grow
-quickly as the order of the elements increases. This is because there are
-increasingly many entries in each row, which exist due to more degrees of
-freedom that couple to each other. The matrix-free implementation does not
-suffer from this drawback. Here, the memory consumption decreases instead,
-since there are less DoFs that are shared among elements, which decreases the
-relative amount of quadrature points. Regarding the execution speed, we see
-that the matrix-free variant gets more competitive with higher order, and it
-does scale better when run on multiple processors (3.5 speedup with four
-processors compared to the serial case, compared to 2-2.5 speedup for the
-SparseMatrix). The advantage in %parallel scaling was expected, because the
-matrix-free variant is less memory-bound for higher order implementations, so
-that the additional computing power from many cores can better be exploited.
-
-A third thing, which is unrelated to this tutorial program, is the fact that
-standard matrix assembly gets really slow for high order elements. The %numbers
-shown here are based on the usual routines that many other tutorial programs
-make use of. A closer analysis of this shows that the cell data does not fit
-into cache anymore. One could circumvent this problem by writing the assembly
-as a matrix-matrix product, and using (cache-aware) BLAS implementations.
-
-For completeness, here comes a similar table for a 2D problem with 5.7
-million unknowns. Since the excess in work for the matrix-free
-implementation is less compared to 3D, the implementation is more competitive
-for lower-order elements.
-
-
-
- |
- Memory consumption |
- Time assembly |
- Time 50 MV, 4 CPUs |
-
-
- element order |
- SpM |
- M-F |
- SpM |
- M-F |
- SpM |
- M-F |
-
-
- 1 |
- 659 MiB |
- 661 MiB |
- 18.8 s |
- 6.45 s |
- 11.0 s |
- 28.8 s |
-
-
- 2 |
- 1119 MiB |
- 391 MiB |
- 15.6 s |
- 2.46 s |
- 17.1 s |
- 16.2 s |
-
-
- 3 |
- 1711 MiB |
- 318 MiB |
- 17.4 s |
- 1.82 s |
- 23.1 s |
- 13.7 s |
-
-
- 4 |
- 2434 MiB |
- 285 MiB |
- 24.2 s |
- 1.34 s |
- 31.1 s |
- 14.6 s |
-
-
- 5 |
- 3289 MiB |
- 266 MiB |
- 35.9 s |
- 1.26 s |
- 29.6 s |
- 16.7 s |
-
-
- 6 |
- 4274 MiB |
- 254 MiB |
- 58.0 s |
- 1.12 s |
- 35.9 s |
- 19.4 s |
-
-
+The table clearly shows that the matrix-free implementation is twice as fast
+for the solver, and more than six times as fast when it comes to
+initialization costs. As the problem size is made a factor 8 larger, we note
+that the times usually go up by a factor eight, too (as the solver iterations
+are constant at 5). There are two deviations. The first is in the sparse
+matrix between 5k and 36k degrees of freedom, where the time increases by a
+factor 12. This is the threshold when the cache in the processor can no longer
+hold all data necessary for the matrix-vector products and all matrix elements
+must be fetched from main memory. The second deviation is the times for the
+matrix-free solve which increase by less than a factor 8. This is because of
+more parallelism from more cells, exploited by the (involved) dynamic tasks
+scheduling approach CellFEOperator. Note that about 30% of the time in the
+matrix-free solver is spent on restriction and prolongation, which use sparse
+matrices. So the speedup could be even better if all parts where done
+efficiently.
+
+Of course, this picture does not necessarily translate to all cases, as there
+are problems where knowledge of matrix entries enables much better solvers (as
+happens when the coefficient is varying more strongly than in the above
+example). Moreover, it also depends on the computer system. The present system
+has good memory performance, so sparse matrices perform comparably
+well. Nonetheless, the matrix-free implementation gives a nice speedup already
+for the Q2 elements used in this example. This becomes
+particularly apparent for time-dependent or nonlinear problems where sparse
+matrices would need to be reassembled over and over again, which becomes much
+easier with this class. And of course, thanks to the better complexity of the
+products, the method gains increasingly larger advantages when the order of the
+elements increases (the matrix-free implementation has costs
+4d2p per degree of freedom, compared to
+2pd for the sparse matrix, so it will win anyway for order 4
+and higher in 3d).
+
+Possibilities for extensions
+
+Above, we have shown figures for second-order finite elements. Our
+implementation gains more compared to sparse matrices if higher order elements
+are used. However, FE_Q elements with equidistant nodes are badly conditioned
+if the order increases. In this case, the smoother and the multigrid solver
+break down. Node clustering close to the element boundaries resolves this
+problem (and the multigrid solver converges in 5 or 6 iterations also for very
+high order). Elements with this properties are the Gauss-Lobatto FE_Q
+elements, which are presented in step-48.
diff --git a/deal.II/examples/step-37/doc/step-37.solution.png b/deal.II/examples/step-37/doc/step-37.solution.png
new file mode 100644
index 0000000000..9a30a53ec0
Binary files /dev/null and b/deal.II/examples/step-37/doc/step-37.solution.png differ
diff --git a/deal.II/examples/step-37/doc/tooltip b/deal.II/examples/step-37/doc/tooltip
index d469776e90..65cc3a07fc 100644
--- a/deal.II/examples/step-37/doc/tooltip
+++ b/deal.II/examples/step-37/doc/tooltip
@@ -1 +1 @@
-Matrix-free methods. Multigrid.
+Matrix-free methods. Multigrid. Cell-based finite element operator.
diff --git a/deal.II/examples/step-37/step-37.cc b/deal.II/examples/step-37/step-37.cc
index eb8224b20b..8586e70297 100644
--- a/deal.II/examples/step-37/step-37.cc
+++ b/deal.II/examples/step-37/step-37.cc
@@ -1,21 +1,20 @@
-/* Authors: Katharina Kormann, Martin Kronbichler, Uppsala University, 2009 */
+/* Author: Katharina Kormann, Martin Kronbichler, Uppsala University, 2009-2011 */
/* $Id$ */
/* */
-/* Copyright (C) 2009, 2010, 2012 by the deal.II authors */
+/* Copyright (C) 2009, 2010, 2011, 2012 by the deal.II authors */
/* */
/* This file is subject to QPL and may not be distributed */
/* without copyright and license information. Please refer */
/* to the file deal.II/doc/license.html for the text and */
/* further information on this license. */
-
- // To start with the include files are more
- // or less the same as in step-16:
+ // First include the necessary files
+ // from the deal.II library.
#include
#include
#include
-#include
+#include
#include
#include
@@ -43,1571 +42,1482 @@
#include
#include
+ // This includes the data structures for the
+ // efficient implementation of matrix-free
+ // methods or more generic finite element
+ // operators with the class MatrixFree.
+#include
+#include
+
#include
#include
-using namespace dealii;
-
-
-
- // @sect3{Equation data}
-
- // We define a variable coefficient function
- // for the Poisson problem. It is similar to
- // the function in step-5 but we use the form
- // $a(\mathbf x)=\frac{1}{0.1 + \|\bf x\|^2}$
- // instead of a discontinuous one. It is
- // merely to demonstrate the possibilities of
- // this implementation, rather than making
- // much sense physically.
-template
-class Coefficient : public Function
-{
- public:
- Coefficient () : Function() {}
-
- virtual double value (const Point &p,
- const unsigned int component = 0) const;
-
- virtual void value_list (const std::vector > &points,
- std::vector &values,
- const unsigned int component = 0) const;
-};
-
-
-
-template
-double Coefficient::value (const Point &p,
- const unsigned int /*component*/) const
-{
- return 1./(0.1+p.square());
-}
-
-
-template
-void Coefficient::value_list (const std::vector > &points,
- std::vector &values,
- const unsigned int component) const
+namespace Step37
{
- Assert (values.size() == points.size(),
- ExcDimensionMismatch (values.size(), points.size()));
- Assert (component == 0,
- ExcIndexRange (component, 0, 1));
-
- const unsigned int n_points = points.size();
-
- for (unsigned int i=0; i
+ class Coefficient : public Function
+ {
+ public:
+ Coefficient () : Function() {}
+ virtual double value (const Point &p,
+ const unsigned int component = 0) const;
+ template
+ number value (const Point &p,
+ const unsigned int component = 0) const;
- // @sect3{Matrix-free implementation}
-
- // In this program, we want to make
- // use of the ability of deal.II to
- // runs things in %parallel if compute
- // resources are available. We will
- // follow the general framework laid
- // out in the @ref threads module and
- // use the WorkStream class to do
- // operations on the range of all
- // cells.
- //
- // To this end, we first have to have
- // a few declarations that we use for
- // defining the %parallel layout of
- // the vector multiplication function
- // with the WorkStream concept in the
- // Matrix-free class. These comprise
- // so-called scratch data that we use
- // for calculating cell-related
- // information, and copy data that is
- // eventually used in a separate
- // function for writing local data
- // into the global vector. The reason
- // for this split-up definition is
- // that many threads at a time can
- // execute the local multiplications
- // (and filling up the copy data),
- // but than that copy data needs to
- // be worked on by one process at a
- // time.
-namespace WorkStreamData
-{
- template
- struct ScratchData
- {
- ScratchData ();
- ScratchData (const ScratchData &scratch);
- FullMatrix solutions;
+ virtual void value_list (const std::vector > &points,
+ std::vector &values,
+ const unsigned int component = 0) const;
};
- template
- ScratchData::ScratchData ()
- :
- solutions ()
- {}
- template
- ScratchData::ScratchData (const ScratchData &)
- :
- solutions ()
- {}
+ // This is the new function mentioned
+ // above: Evaluate the coefficient for
+ // abstract type @p number: It might be
+ // just a usual double, but it can also be
+ // a somewhat more complicated type that we
+ // call VectorizedArray. This data type is
+ // essentially a short array of doubles
+ // whose length depends on the particular
+ // computer system in use. For example,
+ // systems based on x86-64 support the
+ // streaming SIMD extensions (SSE), where
+ // the processor's vector units can process
+ // two doubles (or four single-precision
+ // floats) by one CPU instruction. Newer
+ // processors with support for the
+ // so-called advanced vector extensions
+ // (AVX) with 256 bit operands can use four
+ // doubles and eight floats,
+ // respectively. Vectorization is a
+ // single-instruct/multiple-data (SIMD)
+ // concept, that is, one CPU instruction is
+ // used to process multiple data values at
+ // once. Often, finite element programs do
+ // not use vectorization explicitly as the
+ // benefits of this concept are only in
+ // arithmetic intensive operations. The
+ // bulk of typical finite element workloads
+ // are memory bandwidth limited (operations
+ // on sparse matrices and vectors) where
+ // the additional computational power is
+ // useless.
+ //
+ // Behind the scenes, optimized BLAS
+ // packages might heavily rely on
+ // vectorization, though. Also, optimizing
+ // compilers might automatically transform
+ // loops involving standard code into more
+ // efficient vectorized form. However, the
+ // data flow must be very regular in order
+ // for compilers to produce efficient
+ // code. For example, already the automatic
+ // vectorization of the prototype operation
+ // that benefits from vectorization,
+ // matrix-matrix products, fails on most
+ // compilers (as of writing this tutorial
+ // in early 2012, neither gcc-4.6 nor the
+ // Intel compiler v. 12 manage to produce
+ // useful vectorized code for the
+ // FullMatrix::mmult function, and not even
+ // on the more simpler case where the
+ // matrix bounds are compile-time constants
+ // instead of run-time constants as in
+ // FullMatrix::mmult). The main reason for
+ // this is that the information to be
+ // processed at the innermost loop (that is
+ // where vectorization is applied) is not
+ // necessarily a multiple of the vector
+ // length, leaving parts of the resources
+ // unused. Moreover, the data that can
+ // potentially be processed together might
+ // not be laid out in a contiguous way in
+ // memory or not with the necessary
+ // alignment to address boundaries that are
+ // needed by the processor. Or the compiler
+ // might not be able to prove that.
+ //
+ // In the matrix-free implementation in
+ // deal.II, we have therefore chosen to
+ // apply vectorization at the level which
+ // is most appropriate for finite element
+ // computations: The cell-wise computations
+ // are typically exactly the same for all
+ // cells (except for reading from and
+ // writing to vectors), and hence SIMD can
+ // be used to process several cells at
+ // once. In all what follows, you can think
+ // of an AlignedVector to hold data from
+ // several cells. For example, we evaluate
+ // the coefficient shown here not on a
+ // simple point as usually done, but we
+ // hand it a
+ // Point >
+ // point, which is actually a collection of
+ // two points in the case of SSE2. Do not
+ // confuse the entries in
+ // VectorizedArray with the
+ // different coordinates of the
+ // point. Indeed, the data is laid out such
+ // that p[0]
returns a
+ // VectorizedArray, which in turn
+ // contains the x-coordinate for the first
+ // point and the second point. You may
+ // access the coordinates individually
+ // using e.g. p[0][j]
, j=1,2,
+ // but it is recommended to define
+ // operations on a VectorizedArray as much
+ // as possible in order to make use of
+ // vectorized operations.
+ //
+ // In the function implementation, we
+ // assume that the number type overloads
+ // basic arithmetic operations, so we just
+ // write the code as usual. The standard
+ // functions @p value and value_list that
+ // are virtual functions contained in the
+ // base class are then computed from the
+ // templated function with double type, in
+ // order to avoid duplicating code.
+ template
template
- struct CopyData : public ScratchData
+ number Coefficient::value (const Point &p,
+ const unsigned int /*component*/) const
{
- CopyData ();
- CopyData (const CopyData &scratch);
- unsigned int first_cell;
- unsigned int n_dofs;
- };
+ return 1. / (0.05 + 2.*p.square());
+ }
- template
- CopyData::CopyData ()
- :
- ScratchData ()
- {}
-
- template
- CopyData::CopyData (const CopyData &)
- :
- ScratchData ()
- {}
-
-}
-
-
-
- // Next comes the implementation of the
- // matrix-free class. It provides some
- // standard information we expect for
- // matrices (like returning the dimensions
- // of the matrix), it implements
- // matrix-vector multiplications in several
- // forms, and it provides functions for
- // filling the matrix with data.
- //
- // We choose to make this class generic,
- // i.e., we do not implement the actual
- // differential operator (here: Laplace
- // operator) directly in this class. We
- // instead let the actual transformation
- // (which happens on the level of quadrature
- // points, see the discussion in the
- // introduction) be a template parameter that
- // is implemented by another class. We then
- // only have to store a list of these objects
- // for each quadrature point on each cell in
- // a big list – we choose a
- // Table<2,Transformation>
data
- // format) – and call a transform
- // command of the @p Transformation
- // class. This template magic makes it easy
- // to reuse this MatrixFree class for other
- // problems that are based on a symmetric
- // operation without the need for substantial
- // changes.
-template
-class MatrixFree : public Subscriptor
-{
- public:
- MatrixFree ();
-
- void reinit (const unsigned int n_dofs,
- const unsigned int n_cells,
- const FullMatrix &cell_matrix,
- const unsigned int n_points_per_cell);
- void clear();
-
- unsigned int m () const;
- unsigned int n () const;
- ConstraintMatrix & get_constraints ();
-
- void set_local_dof_indices (const unsigned int cell_no,
- const std::vector &local_dof_indices);
- void set_derivative_data (const unsigned int cell_no,
- const unsigned int quad_point,
- const Transformation &trans_in);
-
- template
- void vmult (Vector &dst,
- const Vector &src) const;
- template
- void Tvmult (Vector &dst,
- const Vector &src) const;
- template
- void vmult_add (Vector &dst,
- const Vector &src) const;
- template
- void Tvmult_add (Vector &dst,
- const Vector &src) const;
-
- number el (const unsigned int row,
- const unsigned int col) const;
- void calculate_diagonal () const;
-
- std::size_t memory_consumption () const;
-
- // The private member variables of the
- // @p MatrixFree class are a
- // small matrix that does the
- // transformation from solution values to
- // quadrature points, a list with the
- // mapping between local degrees of freedom
- // and global degrees of freedom for each
- // cell (stored as a two-dimensional array,
- // where each row corresponds to one
- // cell, and the columns within individual
- // cells are the local degrees of freedom),
- // the transformation variable for
- // implementing derivatives, a constraint
- // matrix for handling boundary conditions
- // as well as a few other variables that
- // store matrix properties.
- private:
- typedef std::vector >::const_iterator
- CellChunkIterator;
- template
- void local_vmult (CellChunkIterator cell_range,
- WorkStreamData::ScratchData &scratch,
- WorkStreamData::CopyData ©,
- const Vector &src) const;
-
- template
- void
- copy_local_to_global (const WorkStreamData::CopyData ©,
- Vector &dst) const;
-
- FullMatrix B_ref_cell;
- Table<2,unsigned int> indices_local_to_global;
- Table<2,Transformation> derivatives;
-
- ConstraintMatrix constraints;
-
- mutable Vector diagonal_values;
- mutable bool diagonal_is_calculated;
-
- struct MatrixSizes
- {
- unsigned int n_dofs, n_cells;
- unsigned int m, n;
- unsigned int n_points, n_comp;
- std::vector > chunks;
- } matrix_sizes;
-};
-
-
-
- // This is the constructor of the @p
- // MatrixFree class. All it does is to
- // subscribe to the general deal.II @p
- // Subscriptor scheme that makes sure that we
- // do not delete an object of this class as
- // long as it used somewhere else, e.g. in a
- // preconditioner.
-template
-MatrixFree::MatrixFree ()
- :
- Subscriptor()
-{}
-
-
-
- // The next functions return the
- // number of rows and columns of the
- // global matrix (i.e. the dimensions
- // of the operator this class
- // represents, the point of this
- // tutorial program was, after all,
- // that we don't actually store the
- // elements of the rows and columns
- // of this operator). Since the
- // matrix is square, the returned
- // numbers are the same.
-template
-unsigned int
-MatrixFree::m () const
-{
- return matrix_sizes.n_dofs;
-}
-
-
-
-template
-unsigned int
-MatrixFree::n () const
-{
- return matrix_sizes.n_dofs;
-}
-
-
-
- // One more function that just returns an
- // %internal variable. Note that the user
- // will need to change this variable, so it
- // returns a non-constant reference to the
- // ConstraintMatrix.
-template
-ConstraintMatrix &
-MatrixFree::get_constraints ()
-{
- return constraints;
-}
-
-
-
- // The following function takes a vector of
- // local dof indices on cell level and writes
- // the data into the
- // @p indices_local_to_global field
- // in order to have fast access to it. It
- // performs a few sanity checks like whether
- // the sizes in the matrix are set
- // correctly. One tiny thing: Whenever we
- // enter this function, we probably make some
- // modification to the matrix. This means
- // that the diagonal of the matrix, which we
- // might have computed to have fast access to
- // those elements, is invalidated. We set the
- // respective flag to @p false.
-template
-void MatrixFree::
-set_local_dof_indices (const unsigned int cell_no,
- const std::vector &local_dof_indices)
-{
- Assert (local_dof_indices.size() == matrix_sizes.m,
- ExcDimensionMismatch(local_dof_indices.size(),
- matrix_sizes.m));
- for (unsigned int i=0; i
-void MatrixFree::
-set_derivative_data (const unsigned int cell_no,
- const unsigned int quad_point,
- const Transformation &trans_in)
-{
- Assert (quad_point < matrix_sizes.n_points, ExcInternalError());
- derivatives(cell_no,quad_point) = trans_in;
- diagonal_is_calculated = false;
-}
-
-
-
- // Now finally to the central function of the
- // matrix-free class, implementing the
- // multiplication of the matrix with a
- // vector. This function does not actually
- // work on all cells of a mesh, but only the
- // subset of cells specified by the first
- // argument @p cell_range. Since this
- // function operates similarly irrespective
- // on which cell chunk we are sitting, we can
- // call it simultaneously on many processors,
- // but with different cell range data.
- //
- // The goal of this function is to provide
- // the multiplication of a vector with the
- // local contributions of a set of cells. As
- // mentioned in the introduction, if we were
- // to deal with a single cell, this would
- // amount to performing the product
- // @f{eqnarray*}
- // P^T_\mathrm{cell,local-global} A_\mathrm{cell}
- // P_\mathrm{cell,local-global} x
- // @f}
- // where
- // @f{eqnarray*}
- // A_\mathrm{cell} =
- // B_\mathrm{ref\_cell}^T J_\mathrm{cell}^T
- // D_\mathrm{cell}
- // J_\mathrm{cell} B_\mathrm{ref\_cell}
- // @f}
- // and Pcell,local-global
- // is the transformation from local to global
- // indices.
- //
- // To do this, we would have to do the
- // following steps:
- //
- // - Form $x_\mathrm{cell} =
- // P_\mathrm{cell,local-global} x$. This is
- // done by using the command
- // ConstraintMatrix::get_dof_values.
- //
- Form $x_1 = B_\mathrm{ref\_cell}
- // x_\mathrm{cell}$. The vector
- // x1 contains the
- // reference cell gradient to the local
- // cell vector.
- //
- Form $x_2 = J_\mathrm{cell}^T
- // D_\mathrm{cell} J_\mathrm{cell}
- // x_1$. This is a block-diagonal
- // operation, with the block size equal to
- // @p dim. The blocks just
- // correspond to the individual quadrature
- // points. The operation on each quadrature
- // point is implemented by the
- // Transformation class object that this
- // class is equipped with. Compared to the
- // introduction, the matrix
- // Dcell now contains the
- // @p JxW values and the
- // inhomogeneous coefficient.
- //
- Form $y_\mathrm{cell} =
- // B_\mathrm{ref\_cell}^T x_2$. This gives
- // the local result of the matrix-vector
- // product.
- //
- Form $y \leftarrow y +
- // P_\mathrm{cell,local-global}^T
- // y_\mathrm{cell}$. This adds the local
- // result to the global vector, which is
- // realized using the method
- // ConstraintMatrix::distribute_local_to_global.
- // Note that we do this in an extra
- // function called
- // @p copy_local_to_global
- // because that operation must not be done
- // in %parallel, in order to avoid two or
- // more processes trying to add to the same
- // positions in the result vector y.
- //
- // The steps 1 to 4 can be done in %parallel
- // by multiple processes.
-
- // Now, it turns out that the most expensive
- // part of the above is the multiplication
- // Bref_cell
- // xcell in the second step
- // and the transpose operation in step
- // 4. Note that the matrix
- // JT D J is
- // block-diagonal, and hence, its application
- // is cheaper. Since the matrix
- // Bref_cell is the same
- // for all cells, all that changes is the
- // vector xcell. Hence,
- // nothing prevents us from collecting
- // several cell vectors to a (rectangular)
- // matrix, and then perform a matrix-matrix
- // product. These matrices are both full, but
- // not very large, having of the order @p
- // dofs_per_cell rows and columns. This is an
- // operation that can be much better
- // optimized than matrix-vector products. The
- // functions @p FullMatrix::mmult and
- // @p FullMatrix::mTmult use the BLAS
- // dgemm function (as long as BLAS has been
- // detected in deal.II configuration), which
- // provides optimized kernels for doing this
- // product. In our case, a matrix-matrix
- // product is between three and five times
- // faster than doing the matrix-vector
- // product on one cell after the other. The
- // variables that hold the solution on the
- // respective cell's support points and the
- // quadrature points are thus full matrices,
- // which we set to the correct size as a
- // first action in this function. The number
- // of rows in the two matrices @p
- // scratch.solutions and @p copy.solutions is
- // given by the number of cells they work on,
- // and the number of columns is the number of
- // degrees of freedom per cell for the first
- // and the number of quadrature points times
- // the number of components per point for the
- // latter.
-template
-template
-void
-MatrixFree::
-local_vmult (CellChunkIterator cell_range,
- WorkStreamData::ScratchData &scratch,
- WorkStreamData::CopyData ©,
- const Vector &src) const
-{
- const unsigned int chunk_size = cell_range->second - cell_range->first;
-
- scratch.solutions.reinit (chunk_size, matrix_sizes.n, true);
- copy.solutions.reinit (chunk_size, matrix_sizes.m, true);
- copy.first_cell = cell_range->first;
- copy.n_dofs = chunk_size*matrix_sizes.m;
-
- constraints.get_dof_values(src, &indices_local_to_global(copy.first_cell,0),
- ©.solutions(0,0),
- ©.solutions(0,0)+copy.n_dofs);
-
- copy.solutions.mmult (scratch.solutions, B_ref_cell);
-
- for (unsigned int i=0, k = copy.first_cell; i
+ double Coefficient::value (const Point &p,
+ const unsigned int component) const
+ {
+ return value(p,component);
+ }
-template
-template
-void
-MatrixFree::
-copy_local_to_global (const WorkStreamData::CopyData ©,
- Vector &dst) const
-{
- constraints.distribute_local_to_global (©.solutions(0,0),
- ©.solutions(0,0)+copy.n_dofs,
- &indices_local_to_global(copy.first_cell,0),
- dst);
-}
+ template
+ void Coefficient::value_list (const std::vector > &points,
+ std::vector &values,
+ const unsigned int component) const
+ {
+ Assert (values.size() == points.size(),
+ ExcDimensionMismatch (values.size(), points.size()));
+ Assert (component == 0,
+ ExcIndexRange (component, 0, 1));
+
+ const unsigned int n_points = points.size();
+ for (unsigned int i=0; i(points[i],component);
+ }
+
+
+
+ // @sect3{Matrix-free implementation}
+
+ // The following class, called
+ // LaplaceOperator
,
+ // implements the differential
+ // operator. For all practical
+ // purposes, it is a matrix, i.e.,
+ // you can ask it for its size
+ // (member functions m(),
+ // n()
) and you can apply it
+ // to a vector (the various
+ // variants of the
+ // vmult()
+ // function). The difference to a
+ // real matrix of course lies in
+ // the fact that this class doesn't
+ // actually store the
+ // elements of the matrix,
+ // but only knows how to compute
+ // the action of the operator when
+ // applied to a vector.
+
+ // In this program, we want to make use of
+ // the data cache for finite element operator
+ // application that is integrated in
+ // deal.II. The main class that collects all
+ // data is called MatrixFree. It contains
+ // mapping information (Jacobians) and index
+ // relations between local and global degrees
+ // of freedom. It also contains constraints
+ // like the ones from Dirichlet boundary
+ // conditions (or hanging nodes, if we had
+ // any). Moreover, it can issue a loop over
+ // all cells in %parallel, where it makes
+ // sure that only cells are worked on that do
+ // not share any degree of freedom (this
+ // makes the loop thread-safe when writing
+ // into destination vectors). This is a more
+ // advanced strategy compared to the
+ // WorkStream class described in the @ref
+ // threads module that serializes operations
+ // that might not be thread-safe. Of course,
+ // to not destroy thread-safety, we have to
+ // be careful when writing into class-global
+ // structures.
+ //
+ // First comes the implementation of the
+ // matrix-free class. It provides some
+ // standard information we expect for
+ // matrices (like returning the dimensions of
+ // the matrix), it implements matrix-vector
+ // multiplications in several forms
+ // (transposed and untransposed), and it
+ // provides functions for initializing the
+ // structure with data. The class has three
+ // template arguments, one for the dimension
+ // (as many deal.II classes carry), one for the
+ // degree of the finite element (which we
+ // need to enable efficient computations
+ // through the FEEvaluation class), and one
+ // for the underlying scalar type. We want to use
+ // double
numbers
+ // (i.e., double precision, 64-bit
+ // floating point) for the final
+ // matrix, but floats (single
+ // precision, 32-bit floating point
+ // numbers) for the multigrid level
+ // matrices (as that is only a
+ // preconditioner, and floats can
+ // be worked with twice as fast).
+ //
+ // In this class, we store the actual MatrixFree
+ // object, the variable
+ // coefficient that is evaluated at all
+ // quadrature points (so that we don't have
+ // to recompute it during matrix-vector
+ // products), and a vector that contains the
+ // diagonal of the matrix that we need for
+ // the multigrid smoother. We choose to let
+ // the user provide the diagonal in this
+ // program, but we could also integrate a
+ // function in this class to evaluate the
+ // diagonal. Unfortunately, this forces us to
+ // define matrix entries at two places,
+ // once when we evaluate the product and once
+ // for the diagonal, but the work is still
+ // much less than when we compute sparse
+ // matrices.
+ //
+ // As a sidenote, if we implemented
+ // several different operations on
+ // the same grid and degrees of
+ // freedom (like a mass matrix and
+ // a Laplace matrix), we would have
+ // to have two classes like the
+ // current one for each of the
+ // operators (maybe with a common
+ // base class). However, in that
+ // case, we would not store a
+ // MatrixFree object in this
+ // class to avoid doing the
+ // expensive work of pre-computing
+ // everything MatrixFree stores
+ // twice. Rather, we would keep
+ // this object in the main class
+ // and simply store a reference.
+ //
+ // @note Observe how we store the values
+ // for the coefficient: We use a vector
+ // type
+ // AlignedVector
+ // >
structure. One would think that
+ // one can use
+ // std::vector
+ // >
as well, but there are some
+ // technicalities with vectorization: A
+ // certain alignment of the data with the
+ // memory address boundaries is required
+ // (essentially, a VectorizedArray of 16
+ // bytes length as in SSE needs to start at
+ // a memory address that is divisible by
+ // 16). The chosen class makes sure that
+ // this alignment is respected, whereas
+ // std::vector can in general not, which
+ // may lead to segmentation faults at
+ // strange places for some systems or
+ // suboptimal performance for other
+ // systems.
+ template
+ class LaplaceOperator : public Subscriptor
+ {
+ public:
+ LaplaceOperator ();
- // Now to the @p vmult function that is
- // called externally: In addition to what we
- // do in a @p vmult_add function, we set the
- // destination to zero first.
-template
-template
-void
-MatrixFree::vmult (Vector &dst,
- const Vector &src) const
-{
- dst = 0;
- vmult_add (dst, src);
-}
+ void clear();
+ void reinit (const MGDoFHandler &dof_handler,
+ const ConstraintMatrix &constraints,
+ const unsigned int level = numbers::invalid_unsigned_int);
+ unsigned int m () const;
+ unsigned int n () const;
- // Transposed matrix-vector products (needed
- // for the multigrid operations to be
- // well-defined): do the same. Since we
- // implement a symmetric operation, we can
- // refer to the @p vmult_add operation.
-template
-template
-void
-MatrixFree::Tvmult (Vector &dst,
- const Vector &src) const
-{
- dst = 0;
- Tvmult_add (dst,src);
-}
+ void vmult (Vector &dst,
+ const Vector &src) const;
+ void Tvmult (Vector &dst,
+ const Vector &src) const;
+ void vmult_add (Vector &dst,
+ const Vector &src) const;
+ void Tvmult_add (Vector &dst,
+ const Vector &src) const;
+ number el (const unsigned int row,
+ const unsigned int col) const;
+ void set_diagonal (const Vector &diagonal);
+ std::size_t memory_consumption () const;
-template
-template
-void
-MatrixFree::Tvmult_add (Vector &dst,
- const Vector &src) const
-{
- vmult_add (dst,src);
-}
+ private:
+ void local_apply (const MatrixFree &data,
+ Vector &dst,
+ const Vector &src,
+ const std::pair &cell_range) const;
+ void evaluate_coefficient(const Coefficient &function);
+ MatrixFree data;
+ AlignedVector > coefficient;
- // This is the @p vmult_add function that
- // multiplies the matrix with vector @p src
- // and adds the result to vector @p dst. We
- // include a few sanity checks to make sure
- // that the size of the vectors is the same
- // as the dimension of the matrix. We call a
- // %parallel function that applies the
- // multiplication on a chunk of cells at once
- // using the WorkStream module (cf. also the
- // @ref threads module). The subdivision into
- // chunks will be performed in the reinit
- // function and is stored in the field @p
- // matrix_sizes.chunks. What the rather
- // cryptic command to @p std_cxx1x::bind does
- // is to transform a function that has
- // several arguments (source vector, chunk
- // information) into a function which has
- // three arguments (in the first case) or one
- // argument (in the second), which is what
- // the WorkStream::run function expects. The
- // placeholders _1, std_cxx1x::_2, _3
in
- // the local vmult specify variable input
- // values, given by the chunk information,
- // scratch data and copy data that the
- // WorkStream::run function will provide,
- // whereas the other arguments to the @p
- // local_vmult function are bound: to @p this
- // and a constant reference to the @p src in
- // the first case, and @p this and a
- // reference to the output vector in the
- // second. Similarly, the placeholder
- // @p _1 argument in the
- // @p copy_local_to_global function
- // sets the first explicit argument of that
- // function, which is of class
- // @p CopyData. We need to
- // abstractly specify these arguments because
- // the tasks defined by different cell chunks
- // will be scheduled by the WorkStream class,
- // and we will reuse available scratch and
- // copy data.
-template
-template
-void
-MatrixFree::vmult_add (Vector &dst,
- const Vector &src) const
-{
- Assert (src.size() == n(), ExcDimensionMismatch(src.size(), n()));
- Assert (dst.size() == m(), ExcDimensionMismatch(dst.size(), m()));
-
- WorkStream::run (matrix_sizes.chunks.begin(), matrix_sizes.chunks.end(),
- std_cxx1x::bind(&MatrixFree::
- template local_vmult,
- this, std_cxx1x::_1, std_cxx1x::_2, std_cxx1x::_3, boost::cref(src)),
- std_cxx1x::bind(&MatrixFree::
- template copy_local_to_global,
- this, std_cxx1x::_1, boost::ref(dst)),
- WorkStreamData::ScratchData(),
- WorkStreamData::CopyData(),
- 2*multithread_info.n_default_threads,1);
-
- // One thing to be cautious about:
- // The deal.II classes expect that
- // the matrix still contains a
- // diagonal entry for constrained
- // dofs (otherwise, the matrix
- // would be singular, which is not
- // what we want). Since the
- // distribute_local_to_global
- // command of the constraint matrix
- // which we used for adding the
- // local elements into the global
- // vector does not do anything with
- // constrained elements, we have to
- // circumvent that problem by
- // artificially setting the
- // diagonal to some non-zero value
- // and adding the source values. We
- // simply set it to one, which
- // corresponds to copying the
- // respective elements of the
- // source vector into the matching
- // entry of the destination vector.
- for (unsigned int i=0; i
-void MatrixFree::
-reinit (const unsigned int n_dofs_in,
- const unsigned int n_cells_in,
- const FullMatrix &B_ref_cell_in,
- const unsigned int n_points_per_cell)
-{
- B_ref_cell = B_ref_cell_in;
-
- derivatives.reinit (n_cells_in, n_points_per_cell);
- indices_local_to_global.reinit (n_cells_in, B_ref_cell.m());
-
- diagonal_is_calculated = false;
-
- matrix_sizes.n_dofs = n_dofs_in;
- matrix_sizes.n_cells = n_cells_in;
- matrix_sizes.m = B_ref_cell.m();
- matrix_sizes.n = B_ref_cell.n();
- matrix_sizes.n_points = n_points_per_cell;
- matrix_sizes.n_comp = B_ref_cell.n()/matrix_sizes.n_points;
- Assert(matrix_sizes.n_comp * n_points_per_cell == B_ref_cell.n(),
- ExcInternalError());
-
- // One thing to make the matrix-vector
- // product with this class efficient is to
- // decide how many cells should be combined
- // to one chunk, which will determine the
- // size of the full matrix that we work
- // on. If we choose too few cells, then the
- // gains from using the matrix-matrix
- // product will not be fully utilized
- // (dgemm tends to provide more efficiency
- // the larger the matrix dimensions get),
- // so we choose at least 60 cells for one
- // chunk (except when there are very few
- // cells, like on the coarse levels of the
- // multigrid scheme). If we choose too
- // many, we will degrade parallelization
- // (we need to have sufficiently
- // independent tasks). We need to also
- // think about the fact that most high
- // performance BLAS implementations
- // internally work with square
- // sub-matrices. Choosing as many cells in
- // a chunk as there are degrees of freedom
- // on each cell (coded in @p
- // matrix_sizes.m) respects the BLAS GEMM
- // design, whenever we exceed 60. Clearly,
- // the chunk size is an
- // architecture-dependent value and the
- // interested user can squeeze out some
- // extra performance by hand-tuning this
- // parameter. Once we have chosen the
- // number of cells we collect in one chunk,
- // we determine how many chunks we have on
- // the given cell range and recalculate the
- // actual chunk size in order to evenly
- // distribute the chunks.
- const unsigned int divisor = std::max(60U, matrix_sizes.m);
- const unsigned int n_chunks = std::max (matrix_sizes.n_cells/divisor + 1,
- 2*multithread_info.n_default_threads);
-
- const unsigned int chunk_size = (matrix_sizes.n_cells/n_chunks +
- (matrix_sizes.n_cells%n_chunks>0));
-
- std::pair chunk;
- for (unsigned int i=0; i matrix_sizes.n_cells)
- chunk.second = matrix_sizes.n_cells;
- else
- chunk.second = (i+1)*chunk_size;
-
- if (chunk.second > chunk.first)
- matrix_sizes.chunks.push_back(chunk);
- else
- break;
- }
-}
+ Vector diagonal_values;
+ bool diagonal_is_available;
+ };
- // Then we need a function if we want to
- // delete the content of the matrix,
- // e.g. when we are finished with one grid
- // level and continue to the next one. Just
- // set all the field sizes to 0.
-template
-void
-MatrixFree::clear ()
-{
- B_ref_cell.reinit(0,0);
- derivatives.reinit (0,0);
- indices_local_to_global.reinit(0,0);
+ // This is the constructor of the @p
+ // LaplaceOperator class. All it does is to
+ // subscribe to the general deal.II @p
+ // Subscriptor scheme that makes sure that we
+ // do not delete an object of this class as
+ // long as it used somewhere else, e.g. in a
+ // preconditioner.
+ template
+ LaplaceOperator::LaplaceOperator ()
+ :
+ Subscriptor()
+ {}
- constraints.clear();
- diagonal_values.reinit (0);
- diagonal_is_calculated = false;
-
- matrix_sizes.n_dofs = 0;
- matrix_sizes.n_cells = 0;
- matrix_sizes.chunks.clear();
-}
+ // The next functions return the
+ // number of rows and columns of
+ // the global matrix (i.e. the
+ // dimensions of the operator this
+ // class represents, the point of
+ // this tutorial program was, after
+ // all, that we don't actually
+ // store the elements of the rows
+ // and columns of this
+ // operator). Since the matrix is
+ // square, the returned numbers are
+ // the same. We get the number from
+ // the vector partitioner stored in
+ // the data field (a partitioner
+ // distributes elements of a vector
+ // onto a number of different
+ // machines if programs are run in
+ // %parallel; since this program is
+ // written to run on only a single
+ // machine, the partitioner will
+ // simply say that all elements of
+ // the vector -- or, in the current
+ // case, all rows and columns of a
+ // matrix -- are stored on the
+ // current machine).
+ template
+ unsigned int
+ LaplaceOperator::m () const
+ {
+ return data.get_vector_partitioner()->size();
+ }
- // The next function returns the entries of the
- // matrix. Since this class is intended not
- // to store the matrix entries, it would make
- // no sense to provide all those
- // elements. However, diagonal entries are
- // explicitly needed for the implementation
- // of the Chebyshev smoother that we intend
- // to use in the multigrid
- // preconditioner. This matrix is equipped
- // with a vector that stores the diagonal,
- // and we compute it when this function is
- // called for the first time.
-template
-number
-MatrixFree::el (const unsigned int row,
- const unsigned int col) const
-{
- Assert (row == col, ExcNotImplemented());
- if (diagonal_is_calculated == false)
- calculate_diagonal();
- return diagonal_values(row);
-}
+ template
+ unsigned int
+ LaplaceOperator::n () const
+ {
+ return data.get_vector_partitioner()->size();
+ }
- // Regarding the calculation of the diagonal,
- // remember that this is as simple (or
- // complicated) as assembling a right hand
- // side in deal.II. Well, it is a bit easier
- // to do this within this class since we have
- // all the derivative information
- // available. What we do is to go through all
- // the cells (now in serial, since this
- // function should not be called very often
- // anyway), then all the degrees of
- // freedom. At this place, we first copy the
- // first basis functions in all the
- // quadrature points to a temporary array,
- // apply the derivatives from the Jacobian
- // matrix, and finally multiply with the
- // second basis function. This is exactly the
- // value that would be written into the
- // diagonal of a sparse matrix. Note that we
- // need to condense hanging node constraints
- // and set the constrained diagonals to one.
-template
-void
-MatrixFree::calculate_diagonal() const
-{
- diagonal_values.reinit (matrix_sizes.n_dofs);
- std::vector calculation (matrix_sizes.n);
- for (unsigned int cell=0; cell
+ void
+ LaplaceOperator::clear ()
+ {
+ data.clear();
+ diagonal_is_available = false;
+ diagonal_values.reinit(0);
+ }
+
+
+ // @sect4{Initialization}
+
+ // Once we have created the multi-grid
+ // dof_handler and the constraints, we can
+ // call the reinit function for each level
+ // of the multi-grid routine (and the
+ // active cells). The main purpose of the
+ // reinit function is to setup the
+ // MatrixFree
instance for the
+ // problem. Also, the coefficient is
+ // evaluated. For this, we need to activate
+ // the update flag in the AdditionalData
+ // field of MatrixFree that enables the
+ // storage of quadrature point coordinates
+ // in real space (by default, it only
+ // caches data for gradients (inverse
+ // transposed Jacobians) and JxW
+ // values). Note that if we call the reinit
+ // function without specifying the level
+ // (i.e., giving level =
+ // numbers::invalid_unsigned_int
),
+ // we have told the class to loop over the
+ // active cells.
+ //
+ // We also set one option regarding
+ // task parallelism. We choose to
+ // use the @p partition_color
+ // strategy, which is based on
+ // subdivision of cells into
+ // partitions where cells in
+ // partition $k$ (or, more
+ // precisely, the degrees of
+ // freedom on these cells) only
+ // interact with cells in
+ // partitions $k-1$, $k$, and
+ // $k+1$. Within each partition,
+ // cells are colored in such a way
+ // that cells with the same color
+ // do not share degrees of freedom
+ // and can, therefore, be worked on
+ // at the same time without
+ // interference. This determines a
+ // task dependency graph that is
+ // scheduled by the Intel Threading
+ // Building Blocks library. Another
+ // option would be the strategy @p
+ // partition_partition, which
+ // performs better when the grid is
+ // more unstructured. We could also
+ // manually set the size of chunks
+ // that form one task in the
+ // scheduling process by setting @p
+ // tasks_block_size, but the
+ // default strategy to let the
+ // function decide works well
+ // already.
+ //
+ // To initialize the coefficient,
+ // we directly give it the
+ // Coefficient class defined above
+ // and then select the method
+ // coefficient_function.value
+ // with vectorized number (which
+ // the compiler can deduce from the
+ // point data type). The use of the
+ // FEEvaluation class (and its
+ // template arguments) will be
+ // explained below.
+ template
+ void
+ LaplaceOperator::reinit (const MGDoFHandler &dof_handler,
+ const ConstraintMatrix &constraints,
+ const unsigned int level)
+ {
+ typename MatrixFree::AdditionalData additional_data;
+ additional_data.tasks_parallel_scheme =
+ MatrixFree::AdditionalData::partition_color;
+ additional_data.level_mg_handler = level;
+ additional_data.mapping_update_flags = (update_gradients | update_JxW_values |
+ update_quadrature_points);
+ data.reinit (dof_handler, constraints, QGauss<1>(fe_degree+1),
+ additional_data);
+ evaluate_coefficient(Coefficient());
+ }
+
+
+
+ template
+ void
+ LaplaceOperator::
+ evaluate_coefficient (const Coefficient &coefficient_function)
+ {
+ const unsigned int n_cells = data.get_size_info().n_macro_cells;
+ FEEvaluation phi (data);
+ coefficient.resize (n_cells * phi.n_q_points);
+ for (unsigned int cell=0; cell
-std::size_t MatrixFree::memory_consumption () const
-{
- std::size_t glob_size = derivatives.memory_consumption() +
- indices_local_to_global.memory_consumption() +
- constraints.memory_consumption() +
- B_ref_cell.memory_consumption() +
- diagonal_values.memory_consumption() +
- matrix_sizes.chunks.size()*2*sizeof(unsigned int) +
- sizeof(*this);
- return glob_size;
-}
-
-
-
- // @sect3{Laplace operator implementation}
-
- // This class implements the local action of
- // a Laplace operator on a quadrature
- // point. This is a very basic class
- // implementation, providing functions for
- // initialization with a Tensor of rank 2 and
- // implementing the @p transform operation
- // needed by the @p MatrixFree class. There
- // is one point worth noting: The
- // quadrature-point related action of the
- // Laplace operator is a tensor of rank
- // two. It is symmetric since it is the
- // product of the inverse Jacobian
- // transformation between unit and real cell
- // with its transpose (times quadrature
- // weights and a coefficient, which are
- // scalar), so we can just save the diagonal
- // and upper diagonal part. We could use the
- // SymmetricTensor<2,dim> class for doing
- // this, however, that class is only based on
- // @p double %numbers. Since we also want to
- // use @p float %numbers for the multigrid
- // preconditioner (in order to save memory
- // and computing time), we manually implement
- // this operator. Note that @p dim is a
- // template argument and hence known at
- // compile-time, so the compiler knows that
- // this symmetric rank-2 tensor has 3 entries
- // if used in 2D and 6 entries if used in 3D.
-template
-class LaplaceOperator
-{
- public:
- LaplaceOperator ();
-
- LaplaceOperator (const Tensor<2,dim> &tensor);
-
- void transform (number * result) const;
-
- LaplaceOperator&
- operator = (const Tensor<2,dim> &tensor);
-
- unsigned int memory_consumption () const;
-
- private:
- number transformation[dim*(dim+1)/2];
-};
-
-template
-LaplaceOperator::LaplaceOperator()
-{}
-
-
-template
-LaplaceOperator::LaplaceOperator(const Tensor<2,dim> &tensor)
-{
- *this = tensor;
-}
-
- // Now implement the transformation, which is
- // just a so-called contraction
- // operation between a tensor of rank two and a
- // tensor of rank one. Unfortunately, we
- // need to implement this by hand, since we
- // chose not to use the
- // SymmetricTensor<2,dim> class (note that
- // the resulting values are entries in a full
- // matrix that consists of doubles or
- // floats). It feels a bit unsafe to operate
- // on a pointer to the data, but that is the
- // only possibility if we do not want to copy
- // data back and forth, which is expensive
- // since this is the innermost position of
- // the loop in the @p vmult
- // operation of the MatrixFree class. We need
- // to pay attention to the fact that we only
- // saved half of the (symmetric) rank-two
- // tensor.
- //
- // At first sight, it seems inefficient that
- // we have an @p if clause at this position
- // in the code at the innermost loop, but
- // note once again that @p dim is known when
- // this piece of code is compiled, so the
- // compiler can optimize away the @p if
- // statement (and actually even inline these
- // few lines of code into the @p MatrixFree
- // class).
-template
-void LaplaceOperator::transform (number* result) const
-{
- if (dim == 2)
- {
- const number temp = result[0];
- result[0] = transformation[0] * temp + transformation[1] * result[1];
- result[1] = transformation[1] * temp + transformation[2] * result[1];
- }
- else if (dim == 3)
- {
- const number temp1 = result[0];
- const number temp2 = result[1];
- result[0] = transformation[0] * temp1 + transformation[1] * temp2 +
- transformation[2] * result[2];
- result[1] = transformation[1] * temp1 + transformation[3] * temp2 +
- transformation[4] * result[2];
- result[2] = transformation[2] * temp1 + transformation[4] * temp2 +
- transformation[5] * result[2];
- }
- else
- AssertThrow(false, ExcNotImplemented());
-}
+ }
+
+
+
+ // @sect4{Local evaluation of Laplace operator}
+
+ // Here comes the main function of this
+ // class, the evaluation of the
+ // matrix-vector product (or, in general, a
+ // finite element operator
+ // evaluation). This is done in a function
+ // that takes exactly four arguments, the
+ // MatrixFree object, the destination and
+ // source vectors, and a range of cells
+ // that are to be worked on. The method
+ // cell_loop
in the MatrixFree
+ // class will internally call this function
+ // with some range of cells that is
+ // obtained by checking which cells are
+ // possible to work on simultaneously so
+ // that write operations do not cause any
+ // race condition. Note that the total
+ // range of cells as visible in this class
+ // is usually not equal to the number of
+ // (active) cells in the triangulation. In
+ // fact, "cell" may be the wrong term to
+ // begin with, since it is rather a
+ // collection of quadrature points from
+ // several cells, and the MatrixFree class
+ // groups the quadrature points of several
+ // cells into one block to enable a higher
+ // degree of vectorization. The number of
+ // such "cells" is stored in MatrixFree and
+ // can be queried through
+ // MatrixFree::get_size_info().n_macro_cells. Compared
+ // to the deal.II cell iterators, in this
+ // class all cells are laid out in a plain
+ // array with no direct knowledge of level
+ // or neighborship relations, which makes
+ // it possible to index the cells by
+ // unsigned integers.
+ //
+ // The implementation of the Laplace
+ // operator is quite simple: First, we need
+ // to create an object FEEvaluation that
+ // contains the computational kernels and
+ // has data fields to store temporary
+ // results (e.g. gradients evaluated on all
+ // quadrature points on a collection of a
+ // few cells). Note that temporary results
+ // do not use a lot of memory, and since we
+ // specify template arguments with the
+ // element order, the data is stored on the
+ // stack (without expensive memory
+ // allocation). Usually, one only needs to
+ // set two template arguments, the
+ // dimension as first argument and the
+ // degree of the finite element as
+ // the second argument (this is equal to
+ // the number of degrees of freedom per
+ // dimension minus one for FE_Q
+ // elements). However, here we also want to
+ // be able to use float numbers for the
+ // multigrid preconditioner, which is the
+ // last (fifth) template
+ // argument. Therefore, we cannot rely on
+ // the default template arguments and must
+ // also fill the third and fourth field,
+ // consequently. The third argument
+ // specifies the number of quadrature
+ // points per direction and has a default
+ // value equal to the degree of the element
+ // plus one. The fourth argument sets
+ // the number of components (one can also
+ // evaluate vector-valued functions in
+ // systems of PDEs, but the default is a
+ // scalar element), and finally the last
+ // argument sets the number type.
+ //
+ // Next, we loop over the given cell range and
+ // then we continue with the actual
+ // implementation:
+ //
+ // - Tell the FEEvaluation object the
+ // (macro) cell we want to work on.
+ //
- Read in the values of the
+ // source vectors (@p read_dof_values),
+ // including the resolution of
+ // constraints. This stores
+ // $u_\mathrm{cell}$ as described in the
+ // introduction.
+ //
- Compute the unit-cell gradient
+ // (the evaluation of finite element
+ // functions). Since FEEvaluation can
+ // combine value computations with
+ // gradient computations, it uses a
+ // unified interface to all kinds of
+ // derivatives of order between zero and
+ // two. We only want gradients, no values
+ // and no second derivatives, so we set
+ // the function arguments to true in the
+ // gradient slot (second slot), and to
+ // false in the values slot (first slot)
+ // and Hessian slot (third slot). Note
+ // that the FEEvaluation class internally
+ // evaluates shape functions in an
+ // efficient way where one dimension is
+ // worked on at a time (using the tensor
+ // product form of shape functions and
+ // quadrature points as mentioned in the
+ // introduction). This gives complexity
+ // equal to $\mathcal O(d^2 (p+1)^{d+1})$
+ // for polynomial degree $p$ in $d$
+ // dimensions, compared to the naive
+ // approach with loops over all local
+ // degrees of freedom and quadrature
+ // points that is used in FEValues that
+ // costs $\mathcal O(d (p+1)^{2d})$.
+ //
- Next comes the application of the
+ // Jacobian transformation, the
+ // multiplication by the variable
+ // coefficient and the quadrature
+ // weight. FEEvaluation has an access
+ // function @p get_gradient that applies
+ // the Jacobian and returns the gradient
+ // in real space. Then, we just need to
+ // multiply by the (scalar) coefficient,
+ // and let the function @p
+ // submit_gradient apply the second
+ // Jacobian (for the test function) and
+ // the quadrature weight and Jacobian
+ // determinant (JxW). Note that the
+ // submitted gradient is stored in the
+ // same data field as where it is read
+ // from in @p get_gradient. Therefore,
+ // you need to make sure to not read from
+ // the same quadrature point again after
+ // having called @p submit_gradient on
+ // that particular quadrature point. In
+ // general, it is a good idea to copy the
+ // result of @p get_gradient when it is
+ // used more often than once.
+ //
- Next follows the summation over
+ // quadrature points for all test
+ // functions that corresponds to the
+ // actual integration step. For the
+ // Laplace operator, we just multiply by
+ // the gradient, so we call the integrate
+ // function with the respective argument
+ // set. If you have an equation where you
+ // test by both the values of the test
+ // functions and the gradients, both
+ // template arguments need to be set to
+ // true. Calling first the integrate
+ // function for values and then gradients
+ // in a separate call leads to wrong
+ // results, since the second call will
+ // internally overwrite the results from
+ // the first call. Note that there is no
+ // function argument for the second
+ // derivative for integrate step.
+ //
- Eventually, the local
+ // contributions in the vector
+ // $v_\mathrm{cell}$ as mentioned in the
+ // introduction need to be added into the
+ // result vector (and constraints are
+ // applied). This is done with a call to
+ // @p distribute_local_to_global, the
+ // same name as the corresponding
+ // function in the ConstraintMatrix (only
+ // that we now store the local vector in
+ // the FEEvaluation object, as are the
+ // indices between local and global
+ // degrees of freedom).
+ template
+ void
+ LaplaceOperator::
+ local_apply (const MatrixFree &data,
+ Vector &dst,
+ const Vector &src,
+ const std::pair &cell_range) const
+ {
+ FEEvaluation phi (data);
+ AssertDimension (coefficient.size(),
+ data.get_size_info().n_macro_cells * phi.n_q_points);
- // The final function in this group
- // takes the content of a rank-2
- // tensor and writes it to the field
- // @p transformation of
- // this class. We save the upper part
- // of the symmetric tensor row-wise:
- // we first take the (0,0)-entry,
- // then the (0,1)-entry, and so
- // on. We only implement this for
- // dimensions two and three, which
- // for the moment should do just
- // fine:
-template
-LaplaceOperator&
-LaplaceOperator::operator=(const Tensor<2,dim> &tensor)
-{
- if (dim == 2)
- {
- transformation[0] = tensor[0][0];
- transformation[1] = tensor[0][1];
- transformation[2] = tensor[1][1];
- Assert (std::fabs(tensor[1][0]-tensor[0][1])<1e-15,
- ExcInternalError());
- }
- else if (dim == 3)
- {
- transformation[0] = tensor[0][0];
- transformation[1] = tensor[0][1];
- transformation[2] = tensor[0][2];
- transformation[3] = tensor[1][1];
- transformation[4] = tensor[1][2];
- transformation[5] = tensor[2][2];
- Assert (std::fabs(tensor[1][0]-tensor[0][1])<1e-15,
- ExcInternalError());
- Assert (std::fabs(tensor[2][0]-tensor[0][2])<1e-15,
- ExcInternalError());
- Assert (std::fabs(tensor[2][1]-tensor[1][2])<1e-15,
- ExcInternalError());
- }
- else
- AssertThrow(false, ExcNotImplemented());
- return *this;
-}
+ for (unsigned int cell=cell_range.first; cell
+ void
+ LaplaceOperator::vmult (Vector &dst,
+ const Vector &src) const
+ {
+ dst = 0;
+ vmult_add (dst, src);
+ }
-template
-unsigned int
-LaplaceOperator::memory_consumption () const
-{
- return sizeof(*this);
-}
+ template
+ void
+ LaplaceOperator::Tvmult (Vector &dst,
+ const Vector &src) const
+ {
+ dst = 0;
+ vmult_add (dst,src);
+ }
- // @sect3{LaplaceProblem class}
- // This class is based on the same
- // class in step-16. However, we
- // replaced the SparseMatrix
- // class by our matrix-free
- // implementation, which means that
- // we can also skip the sparsity
- // patterns.
-template
-class LaplaceProblem
-{
- public:
- LaplaceProblem (const unsigned int degree);
- void run ();
-
- private:
- void setup_system ();
- void assemble_system ();
- void assemble_multigrid ();
- void solve ();
- void output_results (const unsigned int cycle) const;
-
- Triangulation triangulation;
- FE_Q fe;
- MGDoFHandler mg_dof_handler;
-
- MatrixFree > system_matrix;
- typedef MatrixFree > MatrixFreeType;
- MGLevelObject mg_matrices;
- FullMatrix coarse_matrix;
-
- Vector solution;
- Vector system_rhs;
-};
-
-
-
-template
-LaplaceProblem::LaplaceProblem (const unsigned int degree)
- :
- fe (degree),
- mg_dof_handler (triangulation)
-{}
-
-
-
- // @sect4{LaplaceProblem::setup_system}
-
- // This is the function of step-16 with
- // relevant changes due to the MatrixFree
- // class. What we need to do is to somehow
- // create a local gradient matrix that does
- // not contain any cell-related data
- // (gradient on the reference cell). The
- // way to get to this matrix is to create
- // an FEValues object with gradient
- // information on a cell that corresponds
- // to the reference cell, which is a cube
- // with side length 1. So we create a
- // pseudo triangulation, initialize the
- // FEValues to the only cell of that
- // triangulation, and read off the
- // gradients (which we put in a
- // FullMatrix). That full matrix is then
- // passed to the reinit function of the
- // MatrixFree class used as a system matrix
- // and, further down, as multigrid matrices
- // on the individual levels. We need to
- // implement Dirichlet boundary conditions
- // here, which is done with the
- // ConstraintMatrix function as shown,
- // e.g., in step-22.
-template