From c5353a6d4becad5382069eccce048b3fb90be36d Mon Sep 17 00:00:00 2001
From: heltai
Date: Wed, 30 Oct 2013 16:23:44 +0000
Subject: [PATCH] Merged from trunk.
git-svn-id: https://svn.dealii.org/branches/branch_manifold_id@31487 0785d39b-7218-0410-832d-ea1e28bc413d
---
deal.II/cmake/config/CMakeLists.txt | 121 +-
deal.II/cmake/config/Config.cmake.in | 26 +-
deal.II/cmake/config/Make.global_options.in | 12 +-
.../cmake/configure/configure_2_petsc.cmake | 1 +
...mfpack.cmake => configure_2_umfpack.cmake} | 0
deal.II/cmake/configure/configure_hdf5.cmake | 23 +-
deal.II/cmake/macros/macro_add_test.cmake | 16 +-
.../macro_deal_ii_invoke_autopilot.cmake | 2 +-
.../macros/macro_expand_instantiations.cmake | 7 +-
deal.II/cmake/modules/FindARPACK.cmake | 5 -
deal.II/cmake/modules/FindHDF5.cmake | 12 +-
deal.II/cmake/modules/FindMUMPS.cmake | 1 -
deal.II/cmake/modules/FindSCALAPACK.cmake | 6 -
deal.II/cmake/modules/FindUMFPACK.cmake | 9 -
deal.II/cmake/scripts/CMakeLists.txt | 8 -
deal.II/cmake/scripts/run_test.cmake | 32 +-
deal.II/cmake/setup_cached_variables.cmake | 26 +-
deal.II/cmake/setup_custom_targets.cmake | 1 +
deal.II/cmake/setup_deal_ii.cmake | 33 +-
deal.II/cmake/setup_finalize.cmake | 5 +-
deal.II/cmake/setup_testsuite.cmake | 29 +-
.../contrib/mesh_conversion/CMakeLists.txt | 2 +-
deal.II/contrib/parameter_gui/CMakeLists.txt | 2 +-
deal.II/doc/authors.html | 88 +-
deal.II/doc/developers/porting.html | 23 +-
deal.II/doc/developers/testsuite.html | 17 +-
deal.II/doc/news/changes.h | 16 +-
deal.II/doc/publications/index.html | 5 +-
deal.II/examples/step-13/doc/intro.dox | 31 +-
deal.II/examples/step-13/doc/results.dox | 62 +-
deal.II/examples/step-13/step-13.cc | 437 +++----
deal.II/examples/step-14/doc/intro.dox | 4 +-
deal.II/examples/step-14/step-14.cc | 303 +++--
deal.II/examples/step-32/doc/intro.dox | 5 +-
deal.II/examples/step-32/step-32.cc | 39 +-
deal.II/examples/step-37/step-37.cc | 2 -
deal.II/examples/step-42/doc/intro.dox | 17 +-
deal.II/examples/step-42/step-42.cc | 2 +-
deal.II/examples/step-51/doc/builds-on | 2 +-
deal.II/examples/step-51/doc/intro.dox | 39 +-
deal.II/examples/step-51/doc/results.dox | 102 +-
deal.II/examples/step-9/doc/intro.dox | 7 +-
deal.II/examples/step-9/step-9.cc | 1018 ++++++++---------
deal.II/include/deal.II/base/exceptions.h | 10 +-
.../include/deal.II/base/thread_management.h | 10 +-
deal.II/include/deal.II/base/work_stream.h | 328 ++----
deal.II/include/deal.II/grid/grid_tools.h | 1 -
deal.II/include/deal.II/grid/tria_objects.h | 61 +-
.../include/deal.II/lac/chunk_sparse_matrix.h | 2 +
.../deal.II/lac/chunk_sparsity_pattern.h | 4 +-
.../deal.II/lac/generic_linear_algebra.h | 3 +-
deal.II/include/deal.II/lac/precondition.h | 14 +-
.../include/deal.II/lac/vector.templates.h | 8 +-
.../numerics/derivative_approximation.h | 343 ++----
.../include/deal.II/numerics/vector_tools.h | 20 +-
deal.II/source/CMakeLists.txt | 2 +-
deal.II/source/base/exceptions.cc | 75 +-
deal.II/source/base/subscriptor.cc | 6 +-
deal.II/source/grid/tria_objects.cc | 22 +-
deal.II/source/numerics/data_out.cc | 47 +-
deal.II/source/numerics/data_out_faces.cc | 2 +-
deal.II/source/numerics/data_out_rotation.cc | 2 +-
.../numerics/derivative_approximation.cc | 6 +-
deal.II/tests/CMakeLists.txt | 35 +-
deal.II/tests/all-headers/CMakeLists.txt | 18 +-
deal.II/tests/build_tests/CMakeLists.txt | 19 +-
deal.II/tests/mesh_converter/CMakeLists.txt | 2 +-
67 files changed, 1795 insertions(+), 1843 deletions(-)
rename deal.II/cmake/configure/{configure_1_umfpack.cmake => configure_2_umfpack.cmake} (100%)
diff --git a/deal.II/cmake/config/CMakeLists.txt b/deal.II/cmake/config/CMakeLists.txt
index a84bc40978..0e46258b42 100644
--- a/deal.II/cmake/config/CMakeLists.txt
+++ b/deal.II/cmake/config/CMakeLists.txt
@@ -21,8 +21,9 @@
# deal.IIConfig.cmake
# deal.IIVersionConfig.cmake
#
-# and copies it (a) to the build directory and (b) prepares it for later
-# installation.
+# We support two configurations out of which deal.II can be used - directly
+# from the build directory or after installation. So we have to prepare
+# two distinct set ups.
#
#
@@ -121,7 +122,9 @@ ENDFOREACH()
#
# For binary dir:
#
-SET(CONFIG_INCLUDE_DIRS_BINARY
+
+SET(CONFIG_BUILD_DIR TRUE)
+SET(CONFIG_INCLUDE_DIRS
\${DEAL_II_PATH}/include
\${DEAL_II_PATH}/include/deal.II
${CMAKE_SOURCE_DIR}/include/
@@ -129,10 +132,23 @@ SET(CONFIG_INCLUDE_DIRS_BINARY
${deal_ii_source_includes}
${DEAL_II_USER_INCLUDE_DIRS}
)
+CONFIGURE_FILE(
+ ${CMAKE_CURRENT_SOURCE_DIR}/Config.cmake.in
+ ${CMAKE_BINARY_DIR}/${DEAL_II_PROJECT_CONFIG_RELDIR}/${DEAL_II_PROJECT_CONFIG_NAME}Config.cmake
+ @ONLY
+ )
+CONFIGURE_FILE(
+ ${CMAKE_CURRENT_SOURCE_DIR}/ConfigVersion.cmake.in
+ ${CMAKE_BINARY_DIR}/${DEAL_II_PROJECT_CONFIG_RELDIR}/${DEAL_II_PROJECT_CONFIG_NAME}ConfigVersion.cmake
+ @ONLY
+ )
+
#
# For installation:
#
-SET(CONFIG_INCLUDE_DIRS_INSTALLATION
+
+SET(CONFIG_BUILD_DIR FALSE)
+SET(CONFIG_INCLUDE_DIRS
\${DEAL_II_PATH}/\${DEAL_II_INCLUDE_RELDIR}
\${DEAL_II_PATH}/\${DEAL_II_INCLUDE_RELDIR}/deal.II
\${DEAL_II_PATH}/\${DEAL_II_INCLUDE_RELDIR}/deal.II/bundled
@@ -140,40 +156,45 @@ SET(CONFIG_INCLUDE_DIRS_INSTALLATION
)
CONFIGURE_FILE(
${CMAKE_CURRENT_SOURCE_DIR}/Config.cmake.in
- ${CMAKE_BINARY_DIR}/${DEAL_II_PROJECT_CONFIG_RELDIR}/${DEAL_II_PROJECT_CONFIG_NAME}Config.cmake
+ ${CMAKE_CURRENT_BINARY_DIR}/${DEAL_II_PROJECT_CONFIG_NAME}Config.cmake
+ @ONLY
+ )
+CONFIGURE_FILE(
+ ${CMAKE_CURRENT_SOURCE_DIR}/ConfigVersion.cmake.in
+ ${CMAKE_CURRENT_BINARY_DIR}/${DEAL_II_PROJECT_CONFIG_NAME}ConfigVersion.cmake
@ONLY
)
+INSTALL(FILES
+ ${CMAKE_CURRENT_BINARY_DIR}/${DEAL_II_PROJECT_CONFIG_NAME}Config.cmake
+ ${CMAKE_CURRENT_BINARY_DIR}/${DEAL_II_PROJECT_CONFIG_NAME}ConfigVersion.cmake
+ DESTINATION ${DEAL_II_PROJECT_CONFIG_RELDIR}
+ COMPONENT library
+ )
+
#
-# Append feature configuration:
+# Append feature configuration to both configuration files:
#
-SET(_file
+
+SET(_files
${CMAKE_BINARY_DIR}/${DEAL_II_PROJECT_CONFIG_RELDIR}/${DEAL_II_PROJECT_CONFIG_NAME}Config.cmake
+ ${CMAKE_CURRENT_BINARY_DIR}/${DEAL_II_PROJECT_CONFIG_NAME}Config.cmake
)
-FILE(APPEND ${_file}
- "\n\n#\n# Feature configuration:\n#\n\n"
- )
+FOREACH(_file ${_files})
+ FILE(APPEND ${_file}
+ "\n\n#\n# Feature configuration:\n#\n\n"
+ )
+ENDFOREACH()
GET_CMAKE_PROPERTY(res VARIABLES)
FOREACH(var ${res})
IF(var MATCHES "DEAL_II_WITH")
- FILE(APPEND ${_file}
- "SET(${var} ${${var}})\n"
- )
+ FOREACH(_file ${_files})
+ FILE(APPEND ${_file}
+ "SET(${var} ${${var}})\n"
+ )
+ ENDFOREACH()
ENDIF()
ENDFOREACH()
-CONFIGURE_FILE(
- ${CMAKE_CURRENT_SOURCE_DIR}/ConfigVersion.cmake.in
- ${CMAKE_BINARY_DIR}/${DEAL_II_PROJECT_CONFIG_RELDIR}/${DEAL_II_PROJECT_CONFIG_NAME}ConfigVersion.cmake
- @ONLY
- )
-
-INSTALL(FILES
- ${CMAKE_BINARY_DIR}/${DEAL_II_PROJECT_CONFIG_RELDIR}/${DEAL_II_PROJECT_CONFIG_NAME}Config.cmake
- ${CMAKE_BINARY_DIR}/${DEAL_II_PROJECT_CONFIG_RELDIR}/${DEAL_II_PROJECT_CONFIG_NAME}ConfigVersion.cmake
- DESTINATION ${DEAL_II_PROJECT_CONFIG_RELDIR}
- COMPONENT library
- )
-
########################################################################
# #
@@ -181,7 +202,6 @@ INSTALL(FILES
# #
########################################################################
-
IF(DEAL_II_COMPONENT_COMPAT_FILES)
#
# Transform some cmake lists into a string that the old Makefile
@@ -301,8 +321,8 @@ IF(DEAL_II_COMPONENT_COMPAT_FILES)
#
# For binary dir:
#
- SET(MAKEFILE_D_BINARY ${CMAKE_BINARY_DIR})
- TO_STRING_AND_ADD_PREFIX(MAKEFILE_INCLUDE_DIRS_BINARY "${CMAKE_INCLUDE_FLAG_CXX}"
+ SET(MAKEFILE_D ${CMAKE_BINARY_DIR})
+ TO_STRING_AND_ADD_PREFIX(MAKEFILE_INCLUDE_DIRS "${CMAKE_INCLUDE_FLAG_CXX}"
$(D)/install
$(D)/install/deal.II
${CMAKE_SOURCE_DIR}/include/
@@ -310,11 +330,15 @@ IF(DEAL_II_COMPONENT_COMPAT_FILES)
${deal_ii_source_includes}
${DEAL_II_USER_INCLUDE_DIRS}
)
+ CONFIGURE_FILE(
+ ${CMAKE_CURRENT_SOURCE_DIR}/Make.global_options.in
+ ${CMAKE_BINARY_DIR}/${DEAL_II_COMMON_RELDIR}/Make.global_options
+ )
#
# For installation:
#
- SET(MAKEFILE_D_INSTALLATION ${CMAKE_INSTALL_PREFIX})
- TO_STRING_AND_ADD_PREFIX(MAKEFILE_INCLUDE_DIRS_INSTALLATION "${CMAKE_INCLUDE_FLAG_CXX}"
+ SET(MAKEFILE_D ${CMAKE_INSTALL_PREFIX})
+ TO_STRING_AND_ADD_PREFIX(MAKEFILE_INCLUDE_DIRS "${CMAKE_INCLUDE_FLAG_CXX}"
$(D)/${DEAL_II_INCLUDE_RELDIR}
$(D)/${DEAL_II_INCLUDE_RELDIR}/deal.II
$(D)/${DEAL_II_INCLUDE_RELDIR}/deal.II/bundled
@@ -322,43 +346,12 @@ IF(DEAL_II_COMPONENT_COMPAT_FILES)
)
CONFIGURE_FILE(
${CMAKE_CURRENT_SOURCE_DIR}/Make.global_options.in
- ${CMAKE_BINARY_DIR}/${DEAL_II_COMMON_RELDIR}/Make.global_options
+ ${CMAKE_CURRENT_BINARY_DIR}/Make.global_options
)
-
INSTALL(FILES
${CMAKE_BINARY_DIR}/${DEAL_II_COMMON_RELDIR}/template-arguments
- ${CMAKE_BINARY_DIR}/${DEAL_II_COMMON_RELDIR}/Make.global_options
+ ${CMAKE_CURRENT_BINARY_DIR}/Make.global_options
DESTINATION ${DEAL_II_COMMON_RELDIR}
COMPONENT compat_files
)
ENDIF()
-
-#
-# Finally, add a target to create the "binary" file in
-# ${DEAL_II_PROJECT_CONFIG_RELDIR} and add it to the "all" target:
-#
-ADD_CUSTOM_TARGET(setup_build_dir ALL
- COMMAND ${CMAKE_COMMAND} -E touch
- ${CMAKE_BINARY_DIR}/${DEAL_II_PROJECT_CONFIG_RELDIR}/binary
- COMMAND ${CMAKE_COMMAND} -E touch
- ${CMAKE_BINARY_DIR}/${DEAL_II_PROJECT_CONFIG_RELDIR}/${DEAL_II_PROJECT_CONFIG_NAME}Config.cmake
- COMMENT "Update build directory"
- )
-FOREACH(_build ${DEAL_II_BUILD_TYPES})
- ADD_DEPENDENCIES(setup_build_dir ${DEAL_II_BASE_NAME}${DEAL_II_${_build}_SUFFIX})
-ENDFOREACH()
-
-#
-# And a script to remove it upon installation from the install prefix:
-# This is necessary if somebody wants to install into the build directory
-# (yes this is a valid use case...).
-#
-INSTALL(CODE
- "
- FILE(REMOVE ${CMAKE_INSTALL_PREFIX}/${DEAL_II_PROJECT_CONFIG_RELDIR}/binary)
- EXECUTE_PROCESS(COMMAND ${CMAKE_COMMAND} -E touch
- ${CMAKE_BINARY_DIR}/${DEAL_II_PROJECT_CONFIG_RELDIR}/${DEAL_II_PROJECT_CONFIG_NAME}Config.cmake
- )
- "
- COMPONENT library
- )
diff --git a/deal.II/cmake/config/Config.cmake.in b/deal.II/cmake/config/Config.cmake.in
index 8d5cd1a74d..153fb13f9e 100644
--- a/deal.II/cmake/config/Config.cmake.in
+++ b/deal.II/cmake/config/Config.cmake.in
@@ -66,20 +66,14 @@ WHILE(NOT "${_path}" STREQUAL "")
GET_FILENAME_COMPONENT(_path "${_path}" PATH)
ENDWHILE()
-#
-# Is this project reside in a build directory or at an installed location?
-#
-
-IF(EXISTS ${DEAL_II_PATH}/${DEAL_II_PROJECT_CONFIG_RELDIR}/binary)
- SET(DEAL_II_BUILD_DIR TRUE)
-ENDIF()
-
#
# Print a message after inclusion of this file:
#
SET(DEAL_II_PROJECT_CONFIG_INCLUDED TRUE)
+SET(DEAL_II_BUILD_DIR @CONFIG_BUILD_DIR@)
+
IF(NOT ${DEAL_II_PACKAGE_NAME}_FIND_QUIETLY)
IF(DEAL_II_BUILD_DIR)
MESSAGE(STATUS
@@ -152,11 +146,9 @@ SET(DEAL_II_STATIC_EXECUTABLE "@DEAL_II_STATIC_EXECUTABLE@")
#
# Information about include directories and libraries
#
-IF(DEAL_II_BUILD_DIR)
- SET(DEAL_II_INCLUDE_DIRS "@CONFIG_INCLUDE_DIRS_BINARY@")
-ELSE()
- SET(DEAL_II_INCLUDE_DIRS "@CONFIG_INCLUDE_DIRS_INSTALLATION@")
-ENDIF()
+
+# Full list of include directories:
+SET(DEAL_II_INCLUDE_DIRS "@CONFIG_INCLUDE_DIRS@")
# Full list of libraries for the debug target:
SET(DEAL_II_LIBRARIES_DEBUG "@CONFIG_LIBRARIES_DEBUG@")
@@ -172,12 +164,8 @@ SET(DEAL_II_LIBRARIES "@CONFIG_LIBRARIES@")
# Information about library targets
#
-IF(DEAL_II_BUILD_DIR)
- SET(DEAL_II_EXECUTABLE_CONFIG "${DEAL_II_PATH}/${DEAL_II_PROJECT_CONFIG_RELDIR}/${DEAL_II_PROJECT_CONFIG_NAME}Executables.cmake")
- SET(DEAL_II_TARGET_CONFIG "${DEAL_II_PATH}/${DEAL_II_PROJECT_CONFIG_RELDIR}/${DEAL_II_PROJECT_CONFIG_NAME}BuildTargets.cmake")
-ELSE()
- SET(DEAL_II_TARGET_CONFIG "${DEAL_II_PATH}/${DEAL_II_PROJECT_CONFIG_RELDIR}/${DEAL_II_PROJECT_CONFIG_NAME}Targets.cmake")
-ENDIF()
+# The library targets file:
+SET(DEAL_II_TARGET_CONFIG "${DEAL_II_PATH}/${DEAL_II_PROJECT_CONFIG_RELDIR}/${DEAL_II_PROJECT_CONFIG_NAME}Targets.cmake")
# The Debug target:
SET(DEAL_II_TARGET_DEBUG "@CONFIG_TARGET_DEBUG@")
diff --git a/deal.II/cmake/config/Make.global_options.in b/deal.II/cmake/config/Make.global_options.in
index 3ebd2f232a..832a9237b1 100644
--- a/deal.II/cmake/config/Make.global_options.in
+++ b/deal.II/cmake/config/Make.global_options.in
@@ -23,11 +23,7 @@
#
-ifeq ($(wildcard $(D)/@DEAL_II_PROJECT_CONFIG_RELDIR@/binary),)
- D = @MAKEFILE_D_INSTALLATION@
-else
- D = @MAKEFILE_D_BINARY@
-endif
+D = @MAKEFILE_D@
CXX = @CMAKE_CXX_COMPILER@
CXX-ID = @CMAKE_CXX_COMPILER_ID@
@@ -87,11 +83,7 @@ lib-deal2.g = @MAKEFILE_TARGETS_DEBUG@
# Include paths as command line flags
-ifeq ($(wildcard $(D)/@DEAL_II_PROJECT_CONFIG_RELDIR@/binary),)
- INCLUDE = @MAKEFILE_INCLUDE_DIRS_INSTALLATION@
-else
- INCLUDE = @MAKEFILE_INCLUDE_DIRS_BINARY@
-endif
+INCLUDE = @MAKEFILE_INCLUDE_DIRS@
# compiler flags for debug and optimized mode
diff --git a/deal.II/cmake/configure/configure_2_petsc.cmake b/deal.II/cmake/configure/configure_2_petsc.cmake
index 23465a2eb0..c1c71a4a83 100644
--- a/deal.II/cmake/configure/configure_2_petsc.cmake
+++ b/deal.II/cmake/configure/configure_2_petsc.cmake
@@ -98,6 +98,7 @@ MACRO(FEATURE_PETSC_FIND_EXTERNAL var)
UNSET(PETSC_INCLUDE_DIR_ARCH CACHE)
UNSET(PETSC_INCLUDE_DIR_COMMON CACHE)
UNSET(PETSC_LIBRARY CACHE)
+ UNSET(PETSC_PETSCVARIABLES CACHE)
SET(PETSC_DIR "" CACHE PATH
"An optional hint to a PETSc directory"
)
diff --git a/deal.II/cmake/configure/configure_1_umfpack.cmake b/deal.II/cmake/configure/configure_2_umfpack.cmake
similarity index 100%
rename from deal.II/cmake/configure/configure_1_umfpack.cmake
rename to deal.II/cmake/configure/configure_2_umfpack.cmake
diff --git a/deal.II/cmake/configure/configure_hdf5.cmake b/deal.II/cmake/configure/configure_hdf5.cmake
index 9816054301..a5a40a255a 100644
--- a/deal.II/cmake/configure/configure_hdf5.cmake
+++ b/deal.II/cmake/configure/configure_hdf5.cmake
@@ -22,11 +22,13 @@ MACRO(FEATURE_HDF5_FIND_EXTERNAL var)
FIND_PACKAGE(HDF5)
IF(HDF5_FOUND)
- IF( (HDF5_WITH_MPI AND DEAL_II_WITH_MPI)
- OR
- (NOT HDF5_WITH_MPI AND NOT DEAL_II_WITH_MPI))
+
+ IF( (HDF5_WITH_MPI AND DEAL_II_WITH_MPI) OR
+ (NOT HDF5_WITH_MPI AND NOT DEAL_II_WITH_MPI) )
SET(${var} TRUE)
+
ELSE()
+
MESSAGE(STATUS "Insufficient hdf5 installation found: "
"hdf5 has to be configured with the same MPI configuration as deal.II."
)
@@ -36,14 +38,23 @@ MACRO(FEATURE_HDF5_FIND_EXTERNAL var)
" DEAL_II_WITH_MPI = ${DEAL_II_WITH_MPI}\n"
" HDF5_WITH_MPI = ${HDF5_WITH_MPI}\n"
)
+ UNSET(HDF5_HL_LIBRARY CACHE)
+ UNSET(HDF5_INCLUDE_DIR CACHE)
+ UNSET(HDF5_LIBRARY CACHE)
+ UNSET(HDF5_PUBCONF CACHE)
ENDIF()
+ ENDIF()
+ENDMACRO()
+
+MACRO(FEATURE_HDF5_CONFIGURE_EXTERNAL)
INCLUDE_DIRECTORIES(${HDF5_INCLUDE_DIRS})
+
+ # The user has to know the location of the HDF5 headers as well:
LIST(APPEND DEAL_II_USER_INCLUDE_DIRS ${HDF5_INCLUDE_DIRS})
- DEAL_II_APPEND_LIBRARIES(${HDF5_LIBRARIES})
-
- ENDIF()
+ DEAL_II_APPEND_LIBRARIES(${HDF5_LIBRARIES})
ENDMACRO()
+
CONFIGURE_FEATURE(HDF5)
diff --git a/deal.II/cmake/macros/macro_add_test.cmake b/deal.II/cmake/macros/macro_add_test.cmake
index 0bda009a4c..fc95f00172 100644
--- a/deal.II/cmake/macros/macro_add_test.cmake
+++ b/deal.II/cmake/macros/macro_add_test.cmake
@@ -130,7 +130,20 @@ MACRO(DEAL_II_ADD_TEST _category _test_name _comparison_file)
IF(NOT TARGET ${_target})
# only add the target once
- ADD_EXECUTABLE(${_target} EXCLUDE_FROM_ALL ${_test_name}.cc)
+ #
+ # Add a "guard file" rule: The purpose of interrupt_guard.cc is to
+ # force a complete rerun of this test (BUILD, RUN and DIFF stage)
+ # if interrupt_guard.cc is removed by run_test.cmake due to an
+ # interruption.
+ #
+ ADD_CUSTOM_COMMAND(
+ OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/${_target}/interrupt_guard.cc
+ COMMAND touch ${CMAKE_CURRENT_BINARY_DIR}/${_target}/interrupt_guard.cc
+ )
+
+ ADD_EXECUTABLE(${_target} EXCLUDE_FROM_ALL ${_test_name}.cc
+ ${CMAKE_CURRENT_BINARY_DIR}/${_target}/interrupt_guard.cc
+ )
SET_TARGET_PROPERTIES(${_target} PROPERTIES
LINK_FLAGS "${DEAL_II_LINKER_FLAGS} ${DEAL_II_LINKER_FLAGS_${_build}}"
@@ -218,6 +231,7 @@ MACRO(DEAL_II_ADD_TEST _category _test_name _comparison_file)
-DEXPECT=${_expect}
-DADDITIONAL_OUTPUT=${ARGN}
-DDEAL_II_BINARY_DIR=${CMAKE_BINARY_DIR}
+ -DGUARD_FILE=${CMAKE_CURRENT_BINARY_DIR}/${_target}/interrupt_guard.cc
-P ${DEAL_II_SOURCE_DIR}/cmake/scripts/run_test.cmake
WORKING_DIRECTORY ${_test_directory}
)
diff --git a/deal.II/cmake/macros/macro_deal_ii_invoke_autopilot.cmake b/deal.II/cmake/macros/macro_deal_ii_invoke_autopilot.cmake
index a418855339..cce7276509 100644
--- a/deal.II/cmake/macros/macro_deal_ii_invoke_autopilot.cmake
+++ b/deal.II/cmake/macros/macro_deal_ii_invoke_autopilot.cmake
@@ -59,7 +59,7 @@ MACRO(DEAL_II_INVOKE_AUTOPILOT)
"EXECUTE_PROCESS(COMMAND ${TARGET_RUN}\n"
" RESULT_VARIABLE _return_value\n"
" )\n"
- "IF(NOT \"\${_return_value}\" STREQUAL "0")\n"
+ "IF(NOT \"\${_return_value}\" STREQUAL \"0\")\n"
" MESSAGE(SEND_ERROR \"\nProgram terminated with exit code: \${_return_value}\")\n"
"ENDIF()\n"
)
diff --git a/deal.II/cmake/macros/macro_expand_instantiations.cmake b/deal.II/cmake/macros/macro_expand_instantiations.cmake
index 684860d475..9574b0fab8 100644
--- a/deal.II/cmake/macros/macro_expand_instantiations.cmake
+++ b/deal.II/cmake/macros/macro_expand_instantiations.cmake
@@ -38,9 +38,14 @@ MACRO(EXPAND_INSTANTIATIONS _target _inst_in_files)
FOREACH (_inst_in_file ${_inst_in_files})
STRING(REGEX REPLACE "\\.in$" "" _inst_file "${_inst_in_file}" )
+ SET(_dependency)
+ IF(TARGET expand_instantiations)
+ SET(_dependency expand_instantiations)
+ ENDIF()
+
ADD_CUSTOM_COMMAND(
OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/${_inst_file}
- DEPENDS expand_instantiations
+ DEPENDS ${_dependency}
${CMAKE_BINARY_DIR}/${DEAL_II_COMMON_RELDIR}/template-arguments
${CMAKE_CURRENT_SOURCE_DIR}/${_inst_in_file}
COMMAND expand_instantiations
diff --git a/deal.II/cmake/modules/FindARPACK.cmake b/deal.II/cmake/modules/FindARPACK.cmake
index f2bb51d628..7e8da72090 100644
--- a/deal.II/cmake/modules/FindARPACK.cmake
+++ b/deal.II/cmake/modules/FindARPACK.cmake
@@ -31,11 +31,6 @@ INCLUDE(FindPackageHandleStandardArgs)
SET_IF_EMPTY(ARPACK_DIR "$ENV{ARPACK_DIR}")
-#
-# ARPACK needs LAPACK and BLAS as dependencies:
-#
-FIND_PACKAGE(DEALII_LAPACK)
-
FIND_LIBRARY(ARPACK_LIBRARY
NAMES arpack
HINTS
diff --git a/deal.II/cmake/modules/FindHDF5.cmake b/deal.II/cmake/modules/FindHDF5.cmake
index bac83af2d6..fec6194dbe 100644
--- a/deal.II/cmake/modules/FindHDF5.cmake
+++ b/deal.II/cmake/modules/FindHDF5.cmake
@@ -49,18 +49,28 @@ FIND_LIBRARY(HDF5_HL_LIBRARY NAMES hdf5_hl
lib${LIB_SUFFIX} lib64 lib
)
+FIND_FILE(HDF5_PUBCONF H5pubconf.h
+ HINTS
+ ${HDF5_INCLUDE_DIR}
+ ${HDF5_DIR}
+ PATH_SUFFIXES
+ hdf5 hdf5/include include/hdf5 include
+ )
+
SET(_output ${HDF5_HL_LIBRARY} ${HDF5_LIBRARY})
FIND_PACKAGE_HANDLE_STANDARD_ARGS(HDF5 DEFAULT_MSG
_output # Cosmetic: Gives nice output
HDF5_HL_LIBRARY
HDF5_LIBRARY
HDF5_INCLUDE_DIR
+ HDF5_PUBCONF
)
MARK_AS_ADVANCED(
HDF5_LIBRARY
HDF5_HL_LIBRARY
HDF5_INCLUDE_DIR
+ HDF5_PUBCONF
)
IF(HDF5_FOUND)
@@ -76,7 +86,7 @@ IF(HDF5_FOUND)
#
# Is hdf5 compiled with support for mpi?
#
- FILE(STRINGS "${HDF5_INCLUDE_DIR}/H5pubconf.h" HDF5_MPI_STRING
+ FILE(STRINGS ${HDF5_PUBCONF} HDF5_MPI_STRING
REGEX "#define.*H5_HAVE_PARALLEL 1")
IF("${HDF5_MPI_STRING}" STREQUAL "")
SET(HDF5_WITH_MPI FALSE)
diff --git a/deal.II/cmake/modules/FindMUMPS.cmake b/deal.II/cmake/modules/FindMUMPS.cmake
index 6099238908..d3ae910fe7 100644
--- a/deal.II/cmake/modules/FindMUMPS.cmake
+++ b/deal.II/cmake/modules/FindMUMPS.cmake
@@ -37,7 +37,6 @@ INCLUDE(FindPackageHandleStandardArgs)
# (We'll rely on the user of FindMUMPS, setting up mpi *cough*)
#
FIND_PACKAGE(SCALAPACK) # which will also include lapack and blas
-FIND_PACKAGE(METIS)
FIND_PATH(MUMPS_INCLUDE_DIR dmumps_c.h
diff --git a/deal.II/cmake/modules/FindSCALAPACK.cmake b/deal.II/cmake/modules/FindSCALAPACK.cmake
index 96a6a539b3..19af21abf8 100644
--- a/deal.II/cmake/modules/FindSCALAPACK.cmake
+++ b/deal.II/cmake/modules/FindSCALAPACK.cmake
@@ -37,12 +37,6 @@ FIND_LIBRARY(SCALAPACK_LIBRARY NAMES scalapack
lib${LIB_SUFFIX} lib64 lib
)
-#
-# SCALAPACK needs LAPACK and BLAS as dependency, search for them with the help
-# of the LAPACK find module:
-#
-FIND_PACKAGE(DEALII_LAPACK)
-
#
# Well, depending on the version of scalapack and the distribution it might
# be necessary to search for blacs, too. So we do this in a very
diff --git a/deal.II/cmake/modules/FindUMFPACK.cmake b/deal.II/cmake/modules/FindUMFPACK.cmake
index 8f5ed61440..71f9361e1a 100644
--- a/deal.II/cmake/modules/FindUMFPACK.cmake
+++ b/deal.II/cmake/modules/FindUMFPACK.cmake
@@ -34,15 +34,6 @@ FOREACH(_comp SUITESPARSE SUITESPARSE_CONFIG UMFPACK AMD CHOLMOD COLAMD)
SET_IF_EMPTY(${_comp}_DIR "$ENV{${_comp}_DIR}")
ENDFOREACH()
-
-#
-# UMFPACK depends on BLAS and LAPACK, so search for them:
-# TODO: There might be an external dependency for metis, ignore this for
-# now.
-#
-FIND_PACKAGE(DEALII_LAPACK)
-FIND_PACKAGE(METIS)
-
#
# Two macros to make life easier:
#
diff --git a/deal.II/cmake/scripts/CMakeLists.txt b/deal.II/cmake/scripts/CMakeLists.txt
index 6e000ece9b..6341917a7a 100644
--- a/deal.II/cmake/scripts/CMakeLists.txt
+++ b/deal.II/cmake/scripts/CMakeLists.txt
@@ -26,10 +26,6 @@ IF(NOT CMAKE_CROSSCOMPILING)
COMPONENT compat_files
)
ENDIF()
-
- EXPORT(TARGETS expand_instantiations
- FILE ${CMAKE_BINARY_DIR}/${DEAL_II_PROJECT_CONFIG_RELDIR}/${DEAL_II_PROJECT_CONFIG_NAME}Executables.cmake
- )
ENDIF()
IF(DEAL_II_COMPONENT_COMPAT_FILES)
@@ -96,8 +92,4 @@ IF(DEAL_II_COMPONENT_COMPAT_FILES)
DESTINATION ${DEAL_II_COMMON_RELDIR}/scripts
COMPONENT compat_files
)
- EXPORT(TARGETS make_dependencies report_features
- FILE ${CMAKE_BINARY_DIR}/${DEAL_II_PROJECT_CONFIG_RELDIR}/${DEAL_II_PROJECT_CONFIG_NAME}Executables.cmake
- APPEND
- )
ENDIF()
diff --git a/deal.II/cmake/scripts/run_test.cmake b/deal.II/cmake/scripts/run_test.cmake
index fb2a42f5e0..cb815aae64 100644
--- a/deal.II/cmake/scripts/run_test.cmake
+++ b/deal.II/cmake/scripts/run_test.cmake
@@ -30,6 +30,26 @@
#
# ADDITIONAL_OUTPUT - A list of additional output lines that should be printed
#
+# GUARD_FILE - used to detect a forced interruption of this script: On
+# startup the backed up file ${GUARD_FILE}_bck is put back
+# in place as ${GUARD_FILE} and on exit ${GUARD_FILE} is
+# backed up as ${GUARD_FILE}_bck. If on startup a stale
+# ${GUARD_FILE} is found, it is deleted.
+#
+
+IF(NOT "${GUARD_FILE}" STREQUAL "" AND EXISTS ${GUARD_FILE})
+ #
+ # Guard file still exists, so this script must have been interrupted.
+ # Remove guard file to force a complete rerun:
+ #
+ EXECUTE_PROCESS(COMMAND rm -f ${GUARD_FILE})
+ELSEIF(NOT "${GUARD_FILE}" STREQUAL "" AND EXISTS ${GUARD_FILE}_bck)
+ #
+ # A backed up guard file exists. Put it back in place:
+ #
+ EXECUTE_PROCESS(COMMAND mv ${GUARD_FILE}_bck ${GUARD_FILE})
+ENDIF()
+
IF("${EXPECT}" STREQUAL "")
SET(EXPECT "PASSED")
@@ -93,13 +113,21 @@ ENDIF()
MESSAGE("=============================== OUTPUT END ===============================")
+#
+# Back up guard file:
+#
+
+IF(NOT "${GUARD_FILE}" STREQUAL "" AND EXISTS ${GUARD_FILE})
+ EXECUTE_PROCESS(COMMAND mv ${GUARD_FILE} ${GUARD_FILE}_bck)
+ENDIF()
+
#
# Bail out:
#
IF(NOT "${_stage}" STREQUAL "${EXPECT}")
- MESSAGE("Excpected stage ${EXPECT} - aborting")
+ MESSAGE("Expected stage ${EXPECT} - aborting")
MESSAGE(FATAL_ERROR "*** abort")
ELSEIF(NOT "${_stage}" STREQUAL "PASSED")
- MESSAGE("Excpected stage ${EXPECT} - test considered successful.")
+ MESSAGE("Expected stage ${EXPECT} - test considered successful.")
ENDIF()
diff --git a/deal.II/cmake/setup_cached_variables.cmake b/deal.II/cmake/setup_cached_variables.cmake
index 8d476fb437..d69d0df166 100644
--- a/deal.II/cmake/setup_cached_variables.cmake
+++ b/deal.II/cmake/setup_cached_variables.cmake
@@ -104,14 +104,6 @@ OPTION(DEAL_II_FORCE_AUTODETECTION
OFF
)
-IF("${CMAKE_SOURCE_DIR}" STREQUAL "${CMAKE_BINARY_DIR}")
- SET(CMAKE_INSTALL_PREFIX
- "${CMAKE_BINARY_DIR}"
- CACHE STRING
- "Install path prefix, prepended onto install directories."
- )
-ENDIF()
-
########################################################################
# #
@@ -306,6 +298,24 @@ UNSET(ENV{LDFLAGS})
# #
########################################################################
+#
+# We do not support installation into the binary directory any more ("too
+# much pain, not enough profit"):
+#
+
+IF("${CMAKE_BINARY_DIR}" STREQUAL "${CMAKE_INSTALL_PREFIX}")
+ MESSAGE(FATAL_ERROR "
+Error CMAKE_INSTALL_PREFIX is equal to CMAKE_BINARY_DIR.
+It is not possible to install into the build directory. Please set
+CMAKE_INSTALL_PREFIX to a designated install directory different than
+CMAKE_BINARY_DIR.
+(Please note that you can use deal.II directly out of a build directory
+if this is what you tried to do.)
+"
+ )
+ENDIF()
+
+
GET_CMAKE_PROPERTY(_res VARIABLES)
FOREACH(_var ${_res})
#
diff --git a/deal.II/cmake/setup_custom_targets.cmake b/deal.II/cmake/setup_custom_targets.cmake
index 65399c645b..02e242c08d 100644
--- a/deal.II/cmake/setup_custom_targets.cmake
+++ b/deal.II/cmake/setup_custom_targets.cmake
@@ -89,6 +89,7 @@ FILE(WRITE ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/print_info.cmake
# test - runs a minimal set of tests
#
# setup_tests - sets up the testsuite subprojects
+# regen_tests - rerun configure stage in every testsuite subprojects
# clean_tests - runs the 'clean' target in every testsuite subproject
# prune_tests - removes all testsuite subprojects
#
diff --git a/deal.II/cmake/setup_deal_ii.cmake b/deal.II/cmake/setup_deal_ii.cmake
index 58fbd0e16e..777c066931 100644
--- a/deal.II/cmake/setup_deal_ii.cmake
+++ b/deal.II/cmake/setup_deal_ii.cmake
@@ -107,17 +107,7 @@ IF(DEAL_II_COMPONENT_COMPAT_FILES)
SET_IF_EMPTY(DEAL_II_DOCREADME_RELDIR "")
SET_IF_EMPTY(DEAL_II_EXAMPLES_RELDIR "examples")
SET_IF_EMPTY(DEAL_II_EXECUTABLE_RELDIR "bin")
- IF( "${CMAKE_INSTALL_PREFIX}" STREQUAL "${CMAKE_BINARY_DIR}" AND
- (NOT "${CMAKE_SOURCE_DIR}" STREQUAL "${CMAKE_BINARY_DIR}") )
- #
- # Ensure that in case of an out of source build BINARY_DIR/include !=
- # INSTALL_PREFIX/include is always true. Otherwise stale headers might
- # get included resulting in a failing build.
- #
- SET_IF_EMPTY(DEAL_II_INCLUDE_RELDIR "include/install")
- ELSE()
- SET_IF_EMPTY(DEAL_II_INCLUDE_RELDIR "include")
- ENDIF()
+ SET_IF_EMPTY(DEAL_II_INCLUDE_RELDIR "include")
SET_IF_EMPTY(DEAL_II_LIBRARY_RELDIR "lib")
SET_IF_EMPTY(DEAL_II_PROJECT_CONFIG_RELDIR "${DEAL_II_LIBRARY_RELDIR}/cmake/${DEAL_II_PROJECT_CONFIG_NAME}")
ELSE()
@@ -151,30 +141,17 @@ ENDIF()
# #
########################################################################
-#
-# Library search order:
-#
IF(DEAL_II_PREFER_STATIC_LIBS)
+ #
# Invert the search order for libraries when DEAL_II_PREFER_STATIC_LIBS
# is set. This will prefer static archives instead of shared libraries:
+ #
LIST(REVERSE CMAKE_FIND_LIBRARY_SUFFIXES)
ENDIF()
-#
-# Cross compilation stuff:
-#
IF(CMAKE_CROSSCOMPILING)
+ #
# Disable platform introspection when cross compiling
+ #
SET(DEAL_II_ALLOW_PLATFORM_INTROSPECTION OFF CACHE BOOL "" FORCE)
-
- # Import native expand_instantiations for use in cross compilation:
- SET(DEAL_II_NATIVE "DEAL_II_NATIVE-NOTFOUND" CACHE FILEPATH
- "A pointer to a native deal.Ii build directory"
- )
- IF(DEAL_II_NATIVE MATCHES "-NOTFOUND")
- MESSAGE(FATAL_ERROR
- "Please set the CMake variable DEAL_II_NATIVE to a valid path that points to a native deal.II build directory"
- )
- ENDIF()
- INCLUDE(${DEAL_II_NATIVE}/${DEAL_II_PROJECT_CONFIG_RELDIR}/${DEAL_II_PROJECT_CONFIG_NAME}Executables.cmake)
ENDIF()
diff --git a/deal.II/cmake/setup_finalize.cmake b/deal.II/cmake/setup_finalize.cmake
index 4c4fc377ff..8a084e5027 100644
--- a/deal.II/cmake/setup_finalize.cmake
+++ b/deal.II/cmake/setup_finalize.cmake
@@ -80,8 +80,9 @@ FILE(WRITE
#
# Cleanup deal.IITargets.cmake in the build directory:
#
-FILE(REMOVE
- ${CMAKE_BINARY_DIR}/${DEAL_II_PROJECT_CONFIG_RELDIR}/${DEAL_II_PROJECT_CONFIG_NAME}BuildTargets.cmake
+FILE(WRITE
+ ${CMAKE_BINARY_DIR}/${DEAL_II_PROJECT_CONFIG_RELDIR}/${DEAL_II_PROJECT_CONFIG_NAME}Targets.cmake
+ ""
)
diff --git a/deal.II/cmake/setup_testsuite.cmake b/deal.II/cmake/setup_testsuite.cmake
index cd334b751e..5aa5eb2718 100644
--- a/deal.II/cmake/setup_testsuite.cmake
+++ b/deal.II/cmake/setup_testsuite.cmake
@@ -45,13 +45,22 @@ ENDFOREACH()
#
# Pick up values from environment:
#
-SET_IF_EMPTY(DEAL_II_BINARY_DIR $ENV{DEAL_II_BINARY_DIR})
-SET_IF_EMPTY(DEAL_II_BINARY_DIR $ENV{DEAL_II_DIR})
-SET_IF_EMPTY(DEAL_II_SOURCE_DIR $ENV{DEAL_II_SOURCE_DIR})
-SET_IF_EMPTY(TEST_DIFF $ENV{TEST_DIFF})
-SET_IF_EMPTY(TEST_TIME_LIMIT $ENV{TEST_TIME_LIMIT})
-SET_IF_EMPTY(TEST_PICKUP_REGEX $ENV{TEST_PICKUP_REGEX})
-SET_IF_EMPTY(TEST_OVERRIDE_LOCATION $ENV{TEST_OVERRIDE_LOCATION})
+FOREACH(_var
+ DEAL_II_BINARY_DIR
+ DEAL_II_SOURCE_DIR
+ TEST_DIFF
+ TEST_TIME_LIMIT
+ TEST_PICKUP_REGEX
+ TEST_OVERRIDE_LOCATION
+ )
+ # Environment wins:
+ IF(DEFINED ENV{${_var}})
+ SET(${_var} $ENV{${_var}})
+ ENDIF()
+ IF(NOT "${_var}" STREQUAL "")
+ SET(${_var} "${${_var}}" CACHE STRING "")
+ ENDIF()
+ENDFOREACH()
#
# We need deal.II and Perl as external packages:
@@ -106,3 +115,9 @@ SET_IF_EMPTY(TEST_TIME_LIMIT 600)
# And finally, enable testing:
#
ENABLE_TESTING()
+
+#
+# A custom target that does absolutely nothing. It is used in the main
+# project to trigger a "make rebuild_cache" if necessary.
+#
+ADD_CUSTOM_TARGET(regenerate)
diff --git a/deal.II/contrib/mesh_conversion/CMakeLists.txt b/deal.II/contrib/mesh_conversion/CMakeLists.txt
index f17569af7d..387797f3e1 100644
--- a/deal.II/contrib/mesh_conversion/CMakeLists.txt
+++ b/deal.II/contrib/mesh_conversion/CMakeLists.txt
@@ -37,6 +37,6 @@ INSTALL(TARGETS mesh_converter_exe
)
EXPORT(TARGETS mesh_converter_exe
FILE
- ${CMAKE_BINARY_DIR}/${DEAL_II_PROJECT_CONFIG_RELDIR}/${DEAL_II_PROJECT_CONFIG_NAME}Executables.cmake
+ ${CMAKE_BINARY_DIR}/${DEAL_II_PROJECT_CONFIG_RELDIR}/${DEAL_II_PROJECT_CONFIG_NAME}Targets.cmake
APPEND
)
diff --git a/deal.II/contrib/parameter_gui/CMakeLists.txt b/deal.II/contrib/parameter_gui/CMakeLists.txt
index c06c0823dc..b0350b59e0 100644
--- a/deal.II/contrib/parameter_gui/CMakeLists.txt
+++ b/deal.II/contrib/parameter_gui/CMakeLists.txt
@@ -60,7 +60,7 @@ INSTALL(TARGETS parameter_gui_exe
COMPONENT parameter_gui
)
EXPORT(TARGETS parameter_gui_exe
- FILE ${CMAKE_BINARY_DIR}/${DEAL_II_PROJECT_CONFIG_RELDIR}/${DEAL_II_PROJECT_CONFIG_NAME}Executables.cmake
+ FILE ${CMAKE_BINARY_DIR}/${DEAL_II_PROJECT_CONFIG_RELDIR}/${DEAL_II_PROJECT_CONFIG_NAME}Targets.cmake
APPEND
)
diff --git a/deal.II/doc/authors.html b/deal.II/doc/authors.html
index c9f6b6ac49..0cfeae838f 100644
--- a/deal.II/doc/authors.html
+++ b/deal.II/doc/authors.html
@@ -15,13 +15,20 @@
deal.II Authors
- The deal.II project was initially started by
- members of the Numerical Methods group at the Heidelberg University,
- Germany but has since become a global, open source project.
+ deal.II is a global project. It is
+ administered by a group of principal
+ developers. Technical decisions are made by the principal
+ developers and a group of developers consisting of
+ long-term contributors with a global overview of the library. A
+ large number of contributors have provided substantial
+ patches over the years.
- The current maintainers of the library are:
+ These three groups are listed below.
+
+
+ Principal developers
-
Guido Kanschat,
- Texas A&M University, TX, USA and Universität Heidelberg, Germany
+ Universität Heidelberg, Germany
+
+ Developers
+
+ -
+ Luca Heltai,
+ SISSA, Trieste, Italy
+
+
+ -
+ Martin Kronbichler,
+ Technische Universität München, Germany
+
+
+ -
+ Matthias Maier,
+ Universität Heidelberg, Germany
+
+
+ -
+ Bruno Turcksin,
+ Texas A&M University, TX, USA
+
+
+ -
+ Toby Young,
+ Polish Academy of Sciences, Poland
+
+
+
+
+
+
+
+ Contributors
+
Many people have contributed to deal.II over the years, some of them very
substantial parts of the library. Their work
is greatly appreciated: no open source project can survice without a
@@ -125,19 +171,6 @@
Eric Heien:
HDF5 output.
- Luca Heltai:
- Gmsh format mesh reader and writer;
- some of the meshes in the GridGenerator class;
- generalization of FilteredMatrix;
- integration of the function parser library;
- cubit journal file to export to ucd mesh format;
- FEFieldFunction and ParsedFunction classes;
- work on the codimension-one meshes, DoFHandler, and finite
- elements;
- singular integration;
- Step-34 tutorial program;
- random bug fixes and enhancements.
-
Bärbel Janssen:
Lots of work on multigrid for adaptive meshes; multigrid in the
MeshWorker framework; step-16. Various fixes.
@@ -163,12 +196,6 @@
Oleh Krehel:
Many documentation fixes.
- Martin Kronbichler:
- step-22, step-31, step-32, step-37, step-48, interfaces to
- Trilinos, significant parts of ConstraintMatrix, matrix-free
- computations, support for massively parallel computations, and
- many enhancements in random places.
-
Tobias Leicht:
Lots of work on internal data structures: anisotropic refinement
(including step-30), faces
@@ -186,10 +213,6 @@
Enhancements in the interface to PETSc. Support for reading GMSH
2.5 format.
- Matthias Maier:
- CMake build system for the library and the testsuite. Periodic
- boundary conditions. Enhancements throughout the library.
-
Cataldo Manigrasso:
Work on the codimension-one meshes, DoFHandler, and finite
elements.
@@ -276,10 +299,6 @@
Christophe Trophime:
Packaging and configuration issues.
- Bruno Turcksin:
- Extending deal.ii for 64-bit integer support. Converting the
- testsuite to CMake. Various other changes.
-
Kainan Wang:
Extending deal.ii for 64-bit integer support.
@@ -297,11 +316,6 @@
Christian Wülker:
GridOut::write_svg.
- Toby D. Young:
- Interfaces to SLEPc; many changes in the interfaces to PETSc;
- MUMPS interface;
- METIS interface.
-
Yuhan Zhou:
Input to the curved boundary example in step-49.
diff --git a/deal.II/doc/developers/porting.html b/deal.II/doc/developers/porting.html
index 89efdc8665..2408227120 100644
--- a/deal.II/doc/developers/porting.html
+++ b/deal.II/doc/developers/porting.html
@@ -137,23 +137,24 @@
You have to set up a native deal.II build directory first and run
make expand_instantiations
in it. The executable is
needed for the build system (and obviously the cross compiled version
- cannot be used). Assuming you have a working cross compilation
- toolchain, set up a toolchain file next. An example toolchain
- file can be found here. With that
- invoke cmake
e.g. with:
+ cannot be used). Locate the expand_instantions
+ executable and export its location in the PATH
+ environment variable.
+
+
+
+ Assuming you have a working cross compilation toolchain, set up a
+ toolchain file next. With that
+ invoke cmake
with something like:
cmake -DCMAKE_TOOLCHAIN_FILE=<...>/Toolchain.sample
- -DDEAL_II_NATIVE=<...>/native/build/dir
-
-DDEAL_II_FORCE_BUNDLED_BOOST=ON
-DDEAL_II_ALLOW_AUTODETECTION=OFF
../deal.II
- where DEAL_II_NATIVE
points to the toolchain file and
- DEAL_II_NATIVE
to the native build directory (which
- expand_instantiations
will be picked from). The remaining
- configuration can be adjusted at will, see the documentation.
+ where CMAKE_TOOLCHAIN_FILE
points to the toolchain file.
+ The remaining configuration can be adjusted at will, see the documentation.
diff --git a/deal.II/doc/developers/testsuite.html b/deal.II/doc/developers/testsuite.html
index 9eb8dd343b..d6307a695c 100644
--- a/deal.II/doc/developers/testsuite.html
+++ b/deal.II/doc/developers/testsuite.html
@@ -156,6 +156,8 @@
Setup can be fine-tuned using the following commands:
+ $ make regen_tests - reruns configure stage in every testsuite subproject
+
$ make clean_tests - runs the 'clean' target in every testsuite subproject
$ make prune_tests - removes all testsuite subprojects
@@ -187,21 +189,6 @@
-
- Note: Specifying these options via environment variables is
- volatile, i.e. if make setup_tests
is invoked a second
- time without the variables set in environment, the option will be
- reset to the default value. If you want to set these options
- permanently, set them via cmake as CMake variable in the build
- directory:
-
-
- $ cmake -DTEST_PICKUP_REGEX="<regular expression>" .
-
- A variable set via cmake always overrides one
- set via environment.
-
-
Running the testsuite
diff --git a/deal.II/doc/news/changes.h b/deal.II/doc/news/changes.h
index 42be735793..ac3ebd69e1 100644
--- a/deal.II/doc/news/changes.h
+++ b/deal.II/doc/news/changes.h
@@ -78,6 +78,12 @@ inconvenience this causes.
+ - Changed: step-9, step-13 and step-14 have been converted to use the
+ more modern WorkStream concept for assembling linear systems in parallel.
+
+ (Bruno Turcksin, Wolfgang Bangerth, 2013/10/26)
+
+
- New: The testsuite is now ported to
CMake and uses CTest as test driver.
@@ -119,7 +125,7 @@ inconvenience this causes.
(Eric Heien, 2013/09/27)
-
+
-
New: DataOutBase::DataOutFilter provides a way to remove duplicate vertices
and values from a solution vector when generating output. Currently it only
@@ -168,6 +174,12 @@ inconvenience this causes.
Specific improvements
+ -
+ Fixed: the DerivativeApproximation class was not working correctly when
+ used with parallel vectors.
+ (Timo Heister, 2013/10/28)
+
+
-
~Subscriptor and ~GrowingVectorMemory no longer throw an exception (the
former if disable_abort_on_exception was called) to be compatible with the
@@ -185,7 +197,7 @@ inconvenience this causes.
-
- New: parallel::distributed::BlockVector has now methods update_ghost_values,
+ New: parallel::distributed::BlockVector has now methods update_ghost_values,
compress, set_out_ghosts, and has_ghost_elements that do the respective
operation on each block of parallel::distributed::Vector.
diff --git a/deal.II/doc/publications/index.html b/deal.II/doc/publications/index.html
index e7aa5b43a4..119322f602 100644
--- a/deal.II/doc/publications/index.html
+++ b/deal.II/doc/publications/index.html
@@ -107,7 +107,7 @@
- W. Bangerth,
- T. Heister
and
W. Bangerth,
C. Burstedde,
- T. Heister,
+ T. Heister,
M. Kronbichler
Algorithms and Data Structures for Massively Parallel Generic
diff --git a/deal.II/examples/step-13/doc/intro.dox b/deal.II/examples/step-13/doc/intro.dox
index de67cb0faf..0d599788cf 100644
--- a/deal.II/examples/step-13/doc/intro.dox
+++ b/deal.II/examples/step-13/doc/intro.dox
@@ -10,21 +10,22 @@ with presenting methods of writing modular and extensible finite
element programs. The main reason for this is the size and complexity
of modern research software: applications implementing modern error
estimation concepts and adaptive solution methods tend to become
-rather large. For example, the three largest applications by the main
+rather large. For example, when this program was written in 2002, the
+three largest applications by the main
authors of deal.II, are at the time of writing of this example
program:
- a program for solving conservation hyperbolic equations by the
Discontinuous Galerkin Finite Element method: 33,775 lines of
- code;
+ code;
- a parameter estimation program: 28,980 lines of code;
- a wave equation solver: 21,020 lines of code.
-(The library proper - without example programs and
-test suite - has slightly more than 150,000 lines of code as of spring 2002.)
-In the opinion of the author of this example program, the sizes of these
-applications are at the edge of what one person, even an experienced
-programmer, can manage.
+
+(The library proper - without example programs and test suite - has slightly
+more than 150,000 lines of code as of spring 2002. It is of course several
+times larger now.) The sizes of these applications are at the edge of what
+one person, even an experienced programmer, can manage.
@@ -55,12 +56,13 @@ relatively independent of the other ones:
mappings.
Besides these, and a large number of smaller classes, there are of
-course the following ``tool'' modules:
+course the following "tool" modules:
- output in various graphical formats;
- linear algebra classes.
-
+These complexes can also be found as a flow chart on the front page of
+the deal.II manual website.
@@ -79,7 +81,7 @@ number of differences compared to previous example programs:
- The classes that implement the process of numerically solving the
equation are no more responsible for driving the process of
- ``solving-estimating error-refining-solving again'', but we delegate
+ "solving-estimating error-refining-solving again", but we delegate
this to external functions. This allows first to use it as a
building block in a larger context, where the solution of a
Laplace equation might only be one part (for example, in a
@@ -105,6 +107,11 @@ number of differences compared to previous example programs:
classes that compute the solution.
- Separate the description of the test case with which we will
present the program, from the rest of the program.
+
- Parallelize the assembly of linear systems using the WorkStream
+ facilities. This follows the extensive description that can be
+ found in the @ref threads "Parallel computing with multiple processors accessing shared memory"
+ documentation module. The implementation essentially follows what
+ has already been described in step-9.
@@ -117,7 +124,7 @@ design techniques used in the program to achieve the goal of
implementing the desired mathematical method. However, we must
stress that software design is in part also a subjective matter:
different persons have different programming backgrounds and have
-different opinions about the ``right'' style of programming; this
+different opinions about the "right" style of programming; this
program therefore expresses only what the author considers useful
practice, and is not necessarily a style that you have to adopt in
order to write successful numerical software if you feel uncomfortable
@@ -163,7 +170,7 @@ avoid the pitfalls of too closely coupled codes.
What the program actually does is not even the main point of this
program, the structure of the program is more important. However, in a
few words, a description would be: solve the Laplace equation for a
-given right hand side such that the solution is the function
+given right hand side such that the solution is the function
$u(x,t)=\exp(x+\sin(10y+5x^2))$. The goal of the
computation is to get the value of the solution at the point
$x_0=(0.5,0.5)$, and to compare the accuracy with
diff --git a/deal.II/examples/step-13/doc/results.dox b/deal.II/examples/step-13/doc/results.dox
index e8457619ed..228457ff59 100644
--- a/deal.II/examples/step-13/doc/results.dox
+++ b/deal.II/examples/step-13/doc/results.dox
@@ -16,41 +16,41 @@ foremost interest is the point value computation, for which we had
implemented the corresponding evaluation class. The results (i.e. the
output) of the program looks as follows:
@code
- Running tests with "global" refinement criterion:
- -------------------------------------------------
- Refinement cycle: 0 1 2 3 4 5 6
- DoFs u(x_0)
- 25 1.2868
- 81 1.6945
- 289 1.4658
- 1089 1.5679
- 4225 1.5882
- 16641 1.5932
- 66049 1.5945
-
- Running tests with "kelly" refinement criterion:
- ------------------------------------------------
- Refinement cycle: 0 1 2 3 4 5 6 7 8 9 10 11
- DoFs u(x_0)
- 25 1.2868
- 47 0.8775
- 89 1.5365
- 165 1.2974
- 316 1.6442
- 589 1.5221
- 1090 1.5724
- 2035 1.5622
- 3754 1.5916
- 7100 1.5876
- 13059 1.5942
- 24749 1.5933
+Running tests with "global" refinement criterion:
+-------------------------------------------------
+Refinement cycle: 0 1 2 3 4 5 6
+DoFs u(x_0)
+ 25 1.2868
+ 81 1.6945
+ 289 1.4658
+ 1089 1.5679
+ 4225 1.5882
+16641 1.5932
+66049 1.5945
+
+Running tests with "kelly" refinement criterion:
+------------------------------------------------
+Refinement cycle: 0 1 2 3 4 5 6 7 8 9 10 11
+DoFs u(x_0)
+ 25 1.2868
+ 47 0.8775
+ 89 1.5365
+ 165 1.2974
+ 316 1.6442
+ 589 1.5221
+ 1093 1.5724
+ 2042 1.5627
+ 3766 1.5916
+ 7124 1.5876
+13111 1.5942
+24838 1.5932
@endcode
What surprises here is that the exact value is 1.59491554..., and that
-it is obviously surprisingly complicated to compute the solution even to
+it is apparently surprisingly complicated to compute the solution even to
only one per cent accuracy, although the solution is smooth (in fact
-infinite often differentiable). This smoothness is shown in the
+infinitely often differentiable). This smoothness is shown in the
graphical output generated by the program, here coarse grid and the
first 9 refinement steps of the Kelly refinement indicator:
@@ -173,7 +173,7 @@ profit. Answer 2: this quick hack
{
cell->clear_coarsen_flag();
refinement_indicated |= cell->refine_flag_set();
- };
+ }
if (refinement_indicated)
for (cell=triangulation->begin_active();
cell!=triangulation->end(); ++cell)
diff --git a/deal.II/examples/step-13/step-13.cc b/deal.II/examples/step-13/step-13.cc
index 17abbb2d79..cd1f9525b7 100644
--- a/deal.II/examples/step-13/step-13.cc
+++ b/deal.II/examples/step-13/step-13.cc
@@ -28,6 +28,7 @@
#include
#include
#include
+#include
#include
#include
#include
@@ -56,34 +57,11 @@
#include
#include
-#ifdef DEAL_II_WITH_THREADS
-# include
-# include
-#endif
-
// The last step is as in all previous programs:
namespace Step13
{
using namespace dealii;
- namespace Assembler
- {
- // Dummy structure
- struct Scratch
- {
- Scratch() {}
- };
-
- struct CopyData
- {
- CopyData() {}
-
- unsigned int dofs_per_cell;
- FullMatrix cell_matrix;
- std::vector local_dof_indices;
- };
- }
-
// @sect3{Evaluation of the solution}
// As for the program itself, we first define classes that evaluate the
@@ -648,7 +626,7 @@ namespace Step13
// various subobjects, and there is a function that implements a
// conjugate gradient method as solver.
private:
- struct LinearSystem
+ struct LinearSystem
{
LinearSystem (const DoFHandler &dof_handler);
@@ -660,70 +638,41 @@ namespace Step13
Vector rhs;
};
-#ifdef DEAL_II_WITH_THREADS
- // Tasks in TBB must be derived from tbb::task and override tbb::task*
- // execute.
- // The purpose of HangingNodeTask is to apply execute DoFTools::make_hanging_node_constraints.
- struct HangingNodeTask : public tbb::task
+ // Finally, there is a set of functions which will be used to
+ // assemble the actual system matrix. The main function of this
+ // group, assemble_linear_system()
computes the
+ // matrix in parallel on multicore systems, using the following
+ // two helper functions. The mechanism for doing so is the same
+ // as in the step-9 example program and follows the WorkStream
+ // concept outlined in @ref threads . The main function also
+ // calls the virtual function assembling the right hand side.
+ struct AssemblyScratchData
{
- HangingNodeTask (const DoFHandler &dof_handler,ConstraintMatrix &hanging_node_constraints) :
- dof_handler(&dof_handler),
- hanging_node_constraints(& hanging_node_constraints) {}
-
- tbb::task* execute()
- {
- DoFTools::make_hanging_node_constraints(*dof_handler,*hanging_node_constraints);
-
- return NULL;
- }
-
- const DoFHandler* dof_handler;
- ConstraintMatrix* hanging_node_constraints;
- };
-
+ AssemblyScratchData (const FiniteElement &fe,
+ const Quadrature &quadrature);
+ AssemblyScratchData (const AssemblyScratchData &scratch_data);
+ FEValues fe_values;
+ };
- // The purpose of SparsityPatternTask is to create the sparsity pattern.
- struct SparsityPatternTask : public tbb::task
+ struct AssemblyCopyData
{
- SparsityPatternTask (const DoFHandler &dof_handler,SparsityPattern &sparsity_pattern) :
- dof_handler(&dof_handler),
- sparsity_pattern(&sparsity_pattern) {}
-
- tbb::task* execute()
- {
- sparsity_pattern->reinit (dof_handler->n_dofs(),
- dof_handler->n_dofs(),
- dof_handler->max_couplings_between_dofs());
- DoFTools::make_sparsity_pattern (*dof_handler, *sparsity_pattern);
-
- return NULL;
- }
-
- const DoFHandler* dof_handler;
- SparsityPattern* sparsity_pattern;
+ FullMatrix cell_matrix;
+ std::vector local_dof_indices;
};
-#endif
-
- // Finally, there is a pair of functions which will be used to assemble
- // the actual system matrix. It calls the virtual function assembling
- // the right hand side, and installs a number threads each running the
- // second function which assembles part of the system matrix. The
- // mechanism for doing so is the same as in the step-9 example program.
void
assemble_linear_system (LinearSystem &linear_system);
void
- assemble_matrix (const typename DoFHandler::active_cell_iterator &cell,
- Assembler::Scratch &scratch,
- Assembler::CopyData ©_data) const;
-
+ local_assemble_matrix (const typename DoFHandler::active_cell_iterator &cell,
+ AssemblyScratchData &scratch_data,
+ AssemblyCopyData ©_data) const;
void
- copy_local_to_global(Assembler::CopyData const ©_data,
- LinearSystem &linear_system) const;
+ copy_local_to_global(const AssemblyCopyData ©_data,
+ LinearSystem &linear_system) const;
};
@@ -798,62 +747,179 @@ namespace Step13
}
- // The following function assembles matrix and right hand side of the
- // linear system to be solved in each step. It goes along the same lines
- // as used in previous examples, so we explain it only briefly. Note that
- // we do a number of things in parallel, a process described in more
- // detail in the @ref threads module.
+ // The following function assembles matrix and right hand side of
+ // the linear system to be solved in each step. We will do things
+ // in parallel at a couple of levels. First, note that we need to
+ // assemble both the matrix and the right hand side. These are
+ // independent operations, and we should do this in parallel. To
+ // this end, we use the concept of "tasks" that is discussed in
+ // the @ref threads documentation module. In essence, what we want
+ // to say "here is something that needs to be worked on, go do it
+ // whenever a CPU core is available", then do something else, and
+ // when we need the result of the first operation wait for its
+ // completion. At the second level, we want to assemble the matrix
+ // using the exact same strategy we have already used in step-9,
+ // namely the WorkStream concept.
+ //
+ // While we could consider either assembling the right hand side
+ // or assembling the matrix as the thing to do in the background
+ // while doing the other, we will opt for the former approach
+ // simply because the call to Solver::assemble_rhs
is
+ // so much simpler to write than the call to WorkStream::run with
+ // its many arguments. In any case, the code then looks like this
+ // to assemble the entire linear system:
template
void
Solver::assemble_linear_system (LinearSystem &linear_system)
{
- // First define a convenience abbreviation for these lengthy iterator
- // names...
- typedef
- typename DoFHandler::active_cell_iterator
- active_cell_iterator;
-
- // ... and use it to split up the set of cells into a number of pieces
- // of equal size. The number of blocks is set to the default number of
- // threads to be used, which by default is set to the number of
- // processors found in your computer at startup of the program:
-
- // These ranges are then assigned to a number of threads which we create
- // next. Each will assemble the local cell matrices on the assigned
- // cells, and fill the matrix object with it. Since there is need for
- // synchronization when filling the same matrix from different threads,
- // we need a mutex here:
-
- Assembler::Scratch scratch;
- Assembler::CopyData copy_data;
- WorkStream::run(dof_handler.begin_active(),dof_handler.end(),
- std_cxx1x::bind(&Solver::assemble_matrix,this,std_cxx1x::_1,std_cxx1x::_2,std_cxx1x::_3),
- std_cxx1x::bind(&Solver::copy_local_to_global,this,std_cxx1x::_1,std_cxx1x::ref(linear_system)),
- scratch,copy_data);
-
- // While the new threads assemble the system matrix, we can already
- // compute the right hand side vector in the main thread, and condense
- // away the constraints due to hanging nodes:
- assemble_rhs (linear_system.rhs);
- linear_system.hanging_node_constraints.condense (linear_system.rhs);
+ Threads::Task<> rhs_task = Threads::new_task (&Solver::assemble_rhs,
+ *this,
+ linear_system.rhs);
+
+ WorkStream::run(dof_handler.begin_active(),
+ dof_handler.end(),
+ std_cxx1x::bind(&Solver::local_assemble_matrix,
+ this,
+ std_cxx1x::_1,
+ std_cxx1x::_2,
+ std_cxx1x::_3),
+ std_cxx1x::bind(&Solver::copy_local_to_global,
+ this,
+ std_cxx1x::_1,
+ std_cxx1x::ref(linear_system)),
+ AssemblyScratchData(*fe, *quadrature),
+ AssemblyCopyData());
+ linear_system.hanging_node_constraints.condense (linear_system.matrix);
- // And while we're already computing things in parallel, interpolating
- // boundary values is one more thing that can be done independently, so
- // we do it here:
+ // The syntax above using std_cxx1x::bind
requires
+ // some explanation. There are multiple version of
+ // WorkStream::run that expect different arguments. In step-9,
+ // we used one version that took a pair of iterators, a pair of
+ // pointers to member functions with very specific argument
+ // lists, a pointer or reference to the object on which these
+ // member functions have to work, and a scratch and copy data
+ // object. This is a bit restrictive since the member functions
+ // called this way have to have an argument list that exactly
+ // matches what WorkStream::run expects: the local assembly
+ // function needs to take an iterator, a scratch object and a
+ // copy object; and the copy-local-to-global function needs to
+ // take exactly a copy object. But, what if we want something
+ // that's slightly more general? For example, in the current
+ // program, the copy-local-to-global function needs to know
+ // which linear system object to write the local contributions
+ // into, i.e., it also has to take a LinearSystem
+ // argument. That won't work with the approach using member
+ // function pointers.
+ //
+ // Fortunately, C++ offers a way out. These are called function
+ // objects. In essence, what WorkStream::run wants to do is not
+ // call a member function. It wants to call some function that
+ // takes an iterator, a scratch object and a copy object in the
+ // first case, and a copy object in the second case. Whether
+ // these are member functions, global functions, or something
+ // else, is really not of much concern to
+ // WorkStream. Consequently, there is a second version of the
+ // function that just takes function objects -- objects that
+ // have an operator()
and that consequently can be
+ // called like functions, whatever they really represent. The
+ // typical way to generate such function objects is using
+ // std::bind
(or, if the compiler is too old, a
+ // replacement for it, which we generically call
+ // std_cxx1x::bind
) which takes a pointer to a
+ // (member) function and then binds individual arguments
+ // to fixed values. For example, you can create a function that
+ // takes an iterator, a scratch object and a copy object by
+ // taking the address of a member function and binding the
+ // (implicit) argument to the object on which it is to work to
+ // *this
. This is what we do in the first call
+ // above. In the second call, we need to create a function
+ // object that takes a copy object, and we do so by taking the
+ // address of a member function that takes an implicit pointer
+ // to *this
, a reference to a copy object, and a
+ // reference to a linear system, and binding the first and third
+ // of these, leaving something that has only one open argument
+ // that can then be filled by WorkStream::run().
+ //
+ // There remains the question of what the
+ // std_cxx1x::_1
, std_cxx1x::_2
, etc.,
+ // mean. (These arguments are called placeholders.) The
+ // idea of using std_cxx1x::bind
in the first of
+ // the two cases above is that it produces an object that can be
+ // called with three arguments. But how are the three arguments
+ // the function object is being called with going to be
+ // distributed to the four arguments
+ // local_assemble_matrix()
(including the implicit
+ // this
pointer)? As specified, the first argument
+ // given to the function object will become the first argument
+ // given to local_assemble_matrix()
, the second the
+ // second, etc. This is trivial here, but allows for interesting
+ // games in other circumstances. Consider, for example, having a
+ // function void f(double x, double y)
. Then,
+ // creating a variable p
of type
+ // std_cxx1x::function@
and
+ // initializing p=std_cxx1x::bind(&f, std_cxx1x::_2,
+ // std_cxx1x::_1)
then calling p(1,2)
will
+ // result in calling f(2,1)
.
+ //
+ // @note Once deal.II can rely on every compiler being able to
+ // fully understand the syntax of the C++11 standard, one can
+ // use C++'s version of lambda
+ // functions to achieve the same goal. In essence, a lambda
+ // function is a function without a name that is defined right
+ // at the one place where it is going to be used -- i.e., where
+ // we pass the third and fourth argument to WorkStream::run. The
+ // functions one would define in these locations would take 3
+ // and 1 arguments, respectively, and all they do is call
+ // Solver::local_assemble_matrix
and
+ // Solver::copy_local_to_global
with the required
+ // number of arguments, utilizing what the lambda function has
+ // gotten as arguments itself. We won't show the syntax this
+ // would require since it is no less confusing than the one used
+ // above.
+
+ // At this point, we have assembled the matrix and condensed
+ // it. The right hand side may or may not have been completely
+ // assembled, but we would like to condense the right hand side
+ // vector next. We can only do this if the assembly of this
+ // vector has finished, so we have to wait for the task to
+ // finish; in computer science, waiting for a task is typically
+ // called "joining" the task, explaining the name of the
+ // function we call below.
+ //
+ // Since that task may or may not have finished, and since we
+ // may have to wait for it to finish, we may as well try to pack
+ // other things that need to be done anyway into this
+ // gap. Consequently, we first interpolate boundary values
+ // before we wait for the right hand side. Of course, another
+ // possibility would have been to also interpolate the boundary
+ // values on a separate task since doing so is independent of
+ // the other things we have done in this function so far. Feel
+ // free to find the correct syntax to also create a task for
+ // this interpolation and start it at the top of this function,
+ // along with the assembly of the right hand side. (You will
+ // find that this is slightly more complicated since there are
+ // multiple versions of
+ // VectorTools::interpolate_boundary_values(), and so simply
+ // taking the address
+ // &VectorTools::interpolate_boundary_values
+ // produces a set of overloaded functions that can't be passed
+ // to Threads::new_task() right away -- you have to select which
+ // element of this overload set you want by casting the address
+ // expression to a function pointer type that is specific to the
+ // version of the function that you want to call on the task.)
std::map boundary_value_map;
VectorTools::interpolate_boundary_values (dof_handler,
0,
*boundary_values,
boundary_value_map);
+ rhs_task.join ();
+ linear_system.hanging_node_constraints.condense (linear_system.rhs);
- // If this is done, wait for the matrix assembling threads, and condense
- // the constraints in the matrix as well:
- linear_system.hanging_node_constraints.condense (linear_system.matrix);
-
- // Now that we have the linear system, we can also treat boundary
- // values, which need to be eliminated from both the matrix and the
- // right hand side:
+ // Now that we have the complete linear system, we can also
+ // treat boundary values, which need to be eliminated from both
+ // the matrix and the right hand side:
MatrixTools::apply_boundary_values (boundary_value_map,
linear_system.matrix,
solution,
@@ -862,34 +928,52 @@ namespace Step13
}
- // The second of this pair of functions takes a range of cell iterators,
- // and assembles the system matrix on this part of the domain. Since it's
- // actions have all been explained in previous programs, we do not comment
- // on it any more, except for one point below.
+ // The second half of this set of functions deals with the local
+ // assembly on each cell and copying local contributions into the
+ // global matrix object. This works in exactly the same way as
+ // described in step-9:
+ template
+ Solver::AssemblyScratchData::
+ AssemblyScratchData (const FiniteElement &fe,
+ const Quadrature &quadrature)
+ :
+ fe_values (fe,
+ quadrature,
+ update_gradients | update_JxW_values)
+ {}
+
+
+ template
+ Solver::AssemblyScratchData::
+ AssemblyScratchData (const AssemblyScratchData &scratch_data)
+ :
+ fe_values (scratch_data.fe_values.get_fe(),
+ scratch_data.fe_values.get_quadrature(),
+ update_gradients | update_JxW_values)
+ {}
+
+
template
void
- Solver::assemble_matrix (const typename DoFHandler::active_cell_iterator &cell,
- Assembler::Scratch &scratch,
- Assembler::CopyData ©_data) const
+ Solver::local_assemble_matrix (const typename DoFHandler::active_cell_iterator &cell,
+ AssemblyScratchData &scratch_data,
+ AssemblyCopyData ©_data) const
{
- FEValues fe_values (*fe, *quadrature,
- update_gradients | update_JxW_values);
-
- copy_data.dofs_per_cell = fe->dofs_per_cell;
- const unsigned int n_q_points = quadrature->size();
+ const unsigned int dofs_per_cell = fe->dofs_per_cell;
+ const unsigned int n_q_points = quadrature->size();
- copy_data.cell_matrix = FullMatrix (copy_data.dofs_per_cell, copy_data.dofs_per_cell);
+ copy_data.cell_matrix.reinit (dofs_per_cell, dofs_per_cell);
- copy_data.local_dof_indices.resize(copy_data.dofs_per_cell);
+ copy_data.local_dof_indices.resize(dofs_per_cell);
- fe_values.reinit (cell);
+ scratch_data.fe_values.reinit (cell);
for (unsigned int q_point=0; q_pointget_dof_indices (copy_data.local_dof_indices);
}
@@ -898,41 +982,14 @@ namespace Step13
template
void
- Solver::copy_local_to_global(Assembler::CopyData const ©_data,
- LinearSystem &linear_system) const
+ Solver::copy_local_to_global(const AssemblyCopyData ©_data,
+ LinearSystem &linear_system) const
{
- // In the step-9 program, we have shown that you have to use the
- // mutex to lock the matrix when copying the elements from the local
- // to the global matrix. This was necessary to avoid that two
- // threads access it at the same time, eventually overwriting their
- // respective work. Previously, we have used the
- // acquire
and release
functions of the
- // mutex to lock and unlock the mutex, respectively. While this is
- // valid, there is one possible catch: if between the locking
- // operation and the unlocking operation an exception is thrown, the
- // mutex remains in the locked state, and in some cases this might
- // lead to deadlocks. A similar situation arises, when one changes
- // the code to have a return statement somewhere in the middle of
- // the locked block, and forgets that before we call
- // return
, we also have to unlock the mutex. All this
- // is no problem here, but we want to show the general
- // technique to cope with these problems nevertheless: have an
- // object that upon initialization (i.e. in its constructor) locks
- // the mutex, and on running the destructor unlocks it again. This
- // is called the scoped lock
pattern (apparently
- // invented by Doug Schmidt originally), and it works because
- // destructors of local objects are also run when we exit the
- // function either through a return
statement, or when
- // an exception is raised. Thus, it is guaranteed that the mutex
- // will always be unlocked when we exit this part of the program,
- // whether the operation completed successfully or not, whether the
- // exit path was something we implemented willfully or whether the
- // function was exited by an exception that we did not foresee.
- for (unsigned int i=0; iset_ref_count(3);
-
- HangingNodeTask* hanging_node_task =
- new (empty_task->allocate_child()) HangingNodeTask(dof_handler,hanging_node_constraints);
- SparsityPatternTask* sparsity_pattern_task =
- new (empty_task->allocate_child()) SparsityPatternTask(dof_handler,sparsity_pattern);
-
- // Spawn the two tasks
- empty_task->spawn(*hanging_node_task);
- empty_task->spawn(*sparsity_pattern_task);
-
- // Wait for children to finish
- empty_task->wait_for_all();
- // empty_task must be destroy manually because it does not return.
- empty_task->destroy(*empty_task);
-#else
- DoFTools::make_hanging_node_constraints(dof_handler,hanging_node_constraints);
+ void (*mhnc_p) (const DoFHandler &,
+ ConstraintMatrix &)
+ = &DoFTools::make_hanging_node_constraints;
+
+ // Start a side task then continue on the main thread
+ Threads::Task<> side_task(std_cxx1x::bind(mhnc_p,std_cxx1x::cref(dof_handler),
+ std_cxx1x::ref(hanging_node_constraints)));
sparsity_pattern.reinit (dof_handler.n_dofs(),
dof_handler.n_dofs(),
dof_handler.max_couplings_between_dofs());
DoFTools::make_sparsity_pattern (dof_handler, sparsity_pattern);
-#endif
+
+ // Wait for the side task to be done before going further
+ side_task.join();
hanging_node_constraints.close ();
hanging_node_constraints.condense (sparsity_pattern);
diff --git a/deal.II/examples/step-14/doc/intro.dox b/deal.II/examples/step-14/doc/intro.dox
index 0f6004d52e..15a115b9af 100644
--- a/deal.II/examples/step-14/doc/intro.dox
+++ b/deal.II/examples/step-14/doc/intro.dox
@@ -3,10 +3,10 @@
The maths
-The Heidelberg group of Professor Rolf Rannacher, to which the three main
+The Heidelberg group of Professor Rolf Rannacher, to which the three initial
authors of the deal.II library belonged during their PhD time and partly also
afterwards, has been involved with adaptivity and error estimation for finite
-element discretizations since the mid-90ies. The main achievement is the
+element discretizations since the mid-1990ies. The main achievement is the
development of error estimates for arbitrary functionals of the solution, and
of optimal mesh refinement for its computation.
diff --git a/deal.II/examples/step-14/step-14.cc b/deal.II/examples/step-14/step-14.cc
index ec777aa8ce..ee642a5564 100644
--- a/deal.II/examples/step-14/step-14.cc
+++ b/deal.II/examples/step-14/step-14.cc
@@ -23,6 +23,7 @@
#include
#include
#include
+#include
#include
#include
#include
@@ -54,33 +55,11 @@
#include
#include
-#ifdef DEAL_II_WITH_THREADS
-# include
-# include
-#endif
-
// The last step is as in all previous programs:
namespace Step14
{
using namespace dealii;
- namespace Assembler
- {
- struct Scratch
- {
- Scratch() {}
- };
-
- struct CopyData
- {
- CopyData() {}
-
- unsigned int dofs_per_cell;
- FullMatrix cell_matrix;
- std::vector local_dof_indices;
- };
- }
-
// @sect3{Evaluating the solution}
// As mentioned in the introduction, significant parts of the program have
@@ -492,59 +471,39 @@ namespace Step14
Vector rhs;
};
-#ifdef DEAL_II_WITH_THREADS
- struct HangingNodeTask : public tbb::task
+ // The remainder of the class is essentially a copy of step-13
+ // as well, including the data structures and functions
+ // necessary to compute the linear system in parallel using the
+ // WorkStream framework:
+ struct AssemblyScratchData
{
- HangingNodeTask (const DoFHandler &dof_handler,ConstraintMatrix &hanging_node_constraints) :
- dof_handler(&dof_handler),
- hanging_node_constraints(& hanging_node_constraints) {}
+ AssemblyScratchData (const FiniteElement &fe,
+ const Quadrature &quadrature);
+ AssemblyScratchData (const AssemblyScratchData &scratch_data);
- tbb::task* execute()
- {
- DoFTools::make_hanging_node_constraints(*dof_handler,*hanging_node_constraints);
-
- return NULL;
- }
-
- const DoFHandler* dof_handler;
- ConstraintMatrix* hanging_node_constraints;
- };
+ FEValues fe_values;
+ };
- struct SparsityPatternTask : public tbb::task
+ struct AssemblyCopyData
{
- SparsityPatternTask (const DoFHandler &dof_handler,SparsityPattern &sparsity_pattern) :
- dof_handler(&dof_handler),
- sparsity_pattern(&sparsity_pattern) {}
-
- tbb::task* execute()
- {
- sparsity_pattern->reinit (dof_handler->n_dofs(),
- dof_handler->n_dofs(),
- dof_handler->max_couplings_between_dofs());
- DoFTools::make_sparsity_pattern (*dof_handler, *sparsity_pattern);
-
- return NULL;
- }
-
- const DoFHandler* dof_handler;
- SparsityPattern* sparsity_pattern;
+ FullMatrix cell_matrix;
+ std::vector local_dof_indices;
};
-#endif
void
assemble_linear_system (LinearSystem &linear_system);
void
- assemble_matrix (const typename DoFHandler::active_cell_iterator &cell,
- Assembler::Scratch &scratch,
- Assembler::CopyData ©_data) const;
-
+ local_assemble_matrix (const typename DoFHandler::active_cell_iterator &cell,
+ AssemblyScratchData &scratch_data,
+ AssemblyCopyData ©_data) const;
+
void
- copy_local_to_global(Assembler::CopyData const ©_data,
- LinearSystem &linear_system) const;
+ copy_local_to_global(const AssemblyCopyData ©_data,
+ LinearSystem &linear_system) const;
};
@@ -602,30 +561,30 @@ namespace Step14
}
+ // The following few functions and constructors are verbatim
+ // copies taken from step-13:
template
void
Solver::assemble_linear_system (LinearSystem &linear_system)
{
- typedef
- typename DoFHandler::active_cell_iterator
- active_cell_iterator;
+ Threads::Task<> rhs_task = Threads::new_task (&Solver::assemble_rhs,
+ *this,
+ linear_system.rhs);
+
+ WorkStream::run(dof_handler.begin_active(),
+ dof_handler.end(),
+ std_cxx1x::bind(&Solver::local_assemble_matrix,
+ this,
+ std_cxx1x::_1,
+ std_cxx1x::_2,
+ std_cxx1x::_3),
+ std_cxx1x::bind(&Solver::copy_local_to_global,
+ this,
+ std_cxx1x::_1,
+ std_cxx1x::ref(linear_system)),
+ AssemblyScratchData(*fe, *quadrature),
+ AssemblyCopyData());
- const unsigned int n_threads = multithread_info.n_threads();
- std::vector >
- thread_ranges
- = Threads::split_range (dof_handler.begin_active (),
- dof_handler.end (),
- n_threads);
-
- Assembler::Scratch scratch;
- Assembler::CopyData copy_data;
- WorkStream::run(dof_handler.begin_active(),dof_handler.end(),
- std_cxx1x::bind(&Solver::assemble_matrix,this,std_cxx1x::_1,std_cxx1x::_2,std_cxx1x::_3),
- std_cxx1x::bind(&Solver::copy_local_to_global,this,std_cxx1x::_1,std_cxx1x::ref(linear_system)),
- scratch,copy_data);
-
-
- assemble_rhs (linear_system.rhs);
linear_system.hanging_node_constraints.condense (linear_system.rhs);
std::map boundary_value_map;
@@ -634,6 +593,8 @@ namespace Step14
*boundary_values,
boundary_value_map);
+ rhs_task.join ();
+
linear_system.hanging_node_constraints.condense (linear_system.matrix);
MatrixTools::apply_boundary_values (boundary_value_map,
@@ -643,30 +604,48 @@ namespace Step14
}
+ template
+ Solver::AssemblyScratchData::
+ AssemblyScratchData (const FiniteElement &fe,
+ const Quadrature &quadrature)
+ :
+ fe_values (fe,
+ quadrature,
+ update_gradients | update_JxW_values)
+ {}
+
+
+ template
+ Solver::AssemblyScratchData::
+ AssemblyScratchData (const AssemblyScratchData &scratch_data)
+ :
+ fe_values (scratch_data.fe_values.get_fe(),
+ scratch_data.fe_values.get_quadrature(),
+ update_gradients | update_JxW_values)
+ {}
+
+
template
void
- Solver::assemble_matrix (const typename DoFHandler::active_cell_iterator &cell,
- Assembler::Scratch &scratch,
- Assembler::CopyData ©_data) const
+ Solver::local_assemble_matrix (const typename DoFHandler::active_cell_iterator &cell,
+ AssemblyScratchData &scratch_data,
+ AssemblyCopyData ©_data) const
{
- FEValues fe_values (*fe, *quadrature,
- update_gradients | update_JxW_values);
-
- copy_data.dofs_per_cell = fe->dofs_per_cell;
- const unsigned int n_q_points = quadrature->size();
+ const unsigned int dofs_per_cell = fe->dofs_per_cell;
+ const unsigned int n_q_points = quadrature->size();
- copy_data.cell_matrix = FullMatrix (copy_data.dofs_per_cell, copy_data.dofs_per_cell);
+ copy_data.cell_matrix.reinit (dofs_per_cell, dofs_per_cell);
- copy_data.local_dof_indices.resize(copy_data.dofs_per_cell);
+ copy_data.local_dof_indices.resize(dofs_per_cell);
- fe_values.reinit (cell);
+ scratch_data.fe_values.reinit (cell);
for (unsigned int q_point=0; q_pointget_dof_indices (copy_data.local_dof_indices);
}
@@ -675,79 +654,73 @@ namespace Step14
template
void
- Solver::copy_local_to_global(Assembler::CopyData const ©_data,
- LinearSystem &linear_system) const
- {
- for (unsigned int i=0; i::copy_local_to_global(const AssemblyCopyData ©_data,
+ LinearSystem &linear_system) const
+ {
+ for (unsigned int i=0; iThreads::ThreadGroup class here, but rather use
- // the one created thread object directly to wait for this particular
- // thread's exit.
+ // Now for the functions that implement actions in the linear
+ // system class. First, the constructor initializes all data
+ // elements to their correct sizes, and sets up a number of
+ // additional data structures, such as constraints due to hanging
+ // nodes. Since setting up the hanging nodes and finding out about
+ // the nonzero elements of the matrix is independent, we do that
+ // in parallel (if the library was configured to use concurrency,
+ // at least; otherwise, the actions are performed
+ // sequentially). Note that we start only one thread, and do the
+ // second action in the main thread. Since only one thread is
+ // generated, we don't use the Threads::ThreadGroup
+ // class here, but rather use the one created thread object
+ // directly to wait for this particular thread's exit. The
+ // approach is generally the same as the one we have used in
+ // Solver::assemble_linear_system()
above.
//
- // Note that taking up the address of the
- // DoFTools::make_hanging_node_constraints
function is a
- // little tricky, since there are actually three of them, one for each
- // supported space dimension. Taking addresses of overloaded functions is
- // somewhat complicated in C++, since the address-of operator
- // &
in that case returns more like a set of values (the
- // addresses of all functions with that name), and selecting the right one
- // is then the next step. If the context dictates which one to take (for
- // example by assigning to a function pointer of known type), then the
- // compiler can do that by itself, but if this set of pointers shall be
- // given as the argument to a function that takes a template, the compiler
- // could choose all without having a preference for one. We therefore have
- // to make it clear to the compiler which one we would like to have; for
- // this, we could use a cast, but for more clarity, we assign it to a
- // temporary mhnc_p
(short for pointer to
- // make_hanging_node_constraints
) with the right type, and using
- // this pointer instead.
+ // Note that taking the address of the
+ // DoFTools::make_hanging_node_constraints
function
+ // is a little tricky, since there are actually three functions of
+ // this name, one for each supported space dimension. Taking
+ // addresses of overloaded functions is somewhat complicated in
+ // C++, since the address-of operator &
in that case
+ // returns a set of values (the addresses of all
+ // functions with that name), and selecting the right one is then
+ // the next step. If the context dictates which one to take (for
+ // example by assigning to a function pointer of known type), then
+ // the compiler can do that by itself, but if this set of pointers
+ // shall be given as the argument to a function that takes a
+ // template, the compiler could choose all without having a
+ // preference for one. We therefore have to make it clear to the
+ // compiler which one we would like to have; for this, we could
+ // use a cast, but for more clarity, we assign it to a temporary
+ // mhnc_p
(short for pointer to
+ // make_hanging_node_constraints
) with the right type, and
+ // using this pointer instead.
template
Solver::LinearSystem::
LinearSystem (const DoFHandler &dof_handler)
{
hanging_node_constraints.clear ();
-#ifdef DEAL_II_WITH_THREADS
- tbb::task_scheduler_init init;
- // Create an empty task to be the parent of the two tasks that we need.
- tbb::empty_task* empty_task = new (tbb::task::allocate_root()) tbb::empty_task;
- // Set the reference count to 3 (number of children+1)
- empty_task->set_ref_count(3);
-
- HangingNodeTask* hanging_node_task =
- new (empty_task->allocate_child()) HangingNodeTask(dof_handler,hanging_node_constraints);
- SparsityPatternTask* sparsity_pattern_task =
- new (empty_task->allocate_child()) SparsityPatternTask(dof_handler,sparsity_pattern);
-
- empty_task->spawn(*hanging_node_task);
- empty_task->spawn(*sparsity_pattern_task);
-
- // Wait for children to finish
- empty_task->wait_for_all();
- empty_task->destroy(*empty_task);
-#else
- DoFTools::make_hanging_node_constraints(dof_handler,hanging_node_constraints);
+ void (*mhnc_p) (const DoFHandler &,
+ ConstraintMatrix &)
+ = &DoFTools::make_hanging_node_constraints;
+
+ Threads::Task<> side_task
+ = Threads::new_task (mhnc_p,
+ dof_handler,
+ hanging_node_constraints);
sparsity_pattern.reinit (dof_handler.n_dofs(),
- dof_handler.n_dofs(),
- dof_handler.max_couplings_between_dofs());
+ dof_handler.n_dofs(),
+ dof_handler.max_couplings_between_dofs());
DoFTools::make_sparsity_pattern (dof_handler, sparsity_pattern);
-#endif
+
+ side_task.join();
hanging_node_constraints.close ();
hanging_node_constraints.condense (sparsity_pattern);
@@ -2180,12 +2153,12 @@ namespace Step14
void
WeightedResidual::solve_problem ()
{
- Threads::ThreadGroup<> threads;
- threads += Threads::new_thread (&WeightedResidual::solve_primal_problem,
- *this);
- threads += Threads::new_thread (&WeightedResidual::solve_dual_problem,
- *this);
- threads.join_all ();
+ Threads::TaskGroup<> tasks;
+ tasks += Threads::new_task (&WeightedResidual::solve_primal_problem,
+ *this);
+ tasks += Threads::new_task (&WeightedResidual::solve_dual_problem,
+ *this);
+ tasks.join_all();
}
@@ -2436,16 +2409,16 @@ namespace Step14
// parts of all the cells, and once they are all started wait until they
// have all finished:
const unsigned int n_threads = multithread_info.n_threads();
- Threads::ThreadGroup<> threads;
+ Threads::TaskGroup<> tasks;
for (unsigned int i=0; i::estimate_some,
+ tasks += Threads::new_task<> (&WeightedResidual::estimate_some,
*this,
primal_solution,
dual_weights,
n_threads, i,
error_indicators,
face_integrals);
- threads.join_all();
+ tasks.join_all();
// Once the error contributions are computed, sum them up. For this,
// note that the cell terms are already set, and that only the edge
diff --git a/deal.II/examples/step-32/doc/intro.dox b/deal.II/examples/step-32/doc/intro.dox
index 25317c67cb..6a20022310 100644
--- a/deal.II/examples/step-32/doc/intro.dox
+++ b/deal.II/examples/step-32/doc/intro.dox
@@ -778,8 +778,9 @@ at the same time read an existing matrix element, add their contribution, and
write the sum back into memory without danger of producing a race condition.
-deal.II has a class that is made for exactly this workflow: WorkStream. Its
-use is extensively documented in the module on @ref threads (in the section
+deal.II has a class that is made for exactly this workflow: WorkStream, first
+discussed in step-9 and step-13. Its
+use is also extensively documented in the module on @ref threads (in the section
on @ref MTWorkStream "the WorkStream class") and we won't repeat here the
rationale and detailed instructions laid out there, though you will want to
read through this module to understand the distinction between scratch space
diff --git a/deal.II/examples/step-32/step-32.cc b/deal.II/examples/step-32/step-32.cc
index 6819a0b1a6..27e895b355 100644
--- a/deal.II/examples/step-32/step-32.cc
+++ b/deal.II/examples/step-32/step-32.cc
@@ -2179,23 +2179,28 @@ namespace Step32
// i.e., it returns true exactly if the cell is owned by the current
// processor. The resulting iterator range is then exactly what we need.
//
- // With this obstacle out of the way, we call the WorkStream::run function
- // with this set of cells, scratch and copy objects, and with pointers to
- // two functions: the local assembly and copy-local-to-global
- // function. These functions need to have very specific signatures: three
- // arguments in the first and one argument in the latter case (see the
- // documentation of the WorkStream::run function for the meaning of these
- // arguments). Note how we use the construct std_cxx1x::bind
- // to create a function object that satisfies this requirement. It uses
- // placeholders _1, std_cxx1x::_2, _3
for the local assembly
- // function that specify cell, scratch data, and copy data, as well as the
- // placeholder _1
for the copy function that expects the data
- // to be written into the global matrix. On the other hand, the implicit
- // zeroth argument of member functions (namely the this
pointer
- // of the object on which that member function is to operate on) is
- // bound to the this
pointer of the current
- // function. The WorkStream::run function, as a consequence, does not need
- // to know anything about the object these functions work on.
+ // With this obstacle out of the way, we call the WorkStream::run
+ // function with this set of cells, scratch and copy objects, and
+ // with pointers to two functions: the local assembly and
+ // copy-local-to-global function. These functions need to have very
+ // specific signatures: three arguments in the first and one
+ // argument in the latter case (see the documentation of the
+ // WorkStream::run function for the meaning of these arguments).
+ // Note how we use the construct std_cxx1x::bind
to
+ // create a function object that satisfies this requirement. It uses
+ // placeholders std_cxx1x::_1, std_cxx1x::_2,
+ // std_cxx1x::_3
for the local assembly function that specify
+ // cell, scratch data, and copy data, as well as the placeholder
+ // std_cxx1x::_1
for the copy function that expects the
+ // data to be written into the global matrix (for placeholder
+ // arguments, also see the discussion in step-13's
+ // assemble_linear_system()
function). On the other
+ // hand, the implicit zeroth argument of member functions (namely
+ // the this
pointer of the object on which that member
+ // function is to operate on) is bound to the
+ // this
pointer of the current function. The
+ // WorkStream::run function, as a consequence, does not need to know
+ // anything about the object these functions work on.
//
// When the WorkStream is executed, it will create several local assembly
// routines of the first kind for several cells and let some available
diff --git a/deal.II/examples/step-37/step-37.cc b/deal.II/examples/step-37/step-37.cc
index 3fe0dc27c7..a4a3a9ca88 100644
--- a/deal.II/examples/step-37/step-37.cc
+++ b/deal.II/examples/step-37/step-37.cc
@@ -845,8 +845,6 @@ namespace Step37
const unsigned int n_q_points = quadrature_formula.size();
std::vector local_dof_indices (dofs_per_cell);
- const Coefficient coefficient;
- std::vector coefficient_values (n_q_points);
typename DoFHandler::active_cell_iterator cell = dof_handler.begin_active(),
endc = dof_handler.end();
diff --git a/deal.II/examples/step-42/doc/intro.dox b/deal.II/examples/step-42/doc/intro.dox
index a263359ead..5c9c2b9219 100644
--- a/deal.II/examples/step-42/doc/intro.dox
+++ b/deal.II/examples/step-42/doc/intro.dox
@@ -23,7 +23,7 @@ elasto-plastic material behavior with isotropic hardening in three dimensions.
In other words, it considers how a three-dimensional body deforms if one pushes
into it a rigid obstacle (the contact problem) where deformation is governed
by an elasto-plastic material law (a material that can only accommodate a certain
-maximal stress) that hardens as deformation accumulates. To show we we intend to
+maximal stress) that hardens as deformation accumulates. To show what we intend to
do before going into too many details, let us just show a picture of what the
solution will look like (the deformable body is a cube - only half of
which is actually shown -, the obstacle corresponds
@@ -197,7 +197,11 @@ the following equation (still an inequality, but linearized):
@f{align*}
\label{eq:linearization}
\left(I_{\Pi}\varepsilon(\tilde {\mathbf u}^{i}),
- \varepsilon(\varphi) - \varepsilon(\tilde {\mathbf u}^{i})\right) \geq 0,
+ \varepsilon(\varphi) - \varepsilon(\tilde {\mathbf u}^{i})\right) \geq
+ \left(\left(I_{\Pi}\varepsilon({\mathbf u}^{i-1}),
+ \varepsilon(\varphi) - \varepsilon(\tilde {\mathbf u}^{i})\right) -
+ \left(P_{\Pi}(C\varepsilon({\mathbf u}^{i-1})),
+ \varepsilon(\varphi) - \varepsilon(\tilde {\mathbf u}^{i})\right)\right),
\quad \forall \varphi\in V^+,
@f}
where the rank-4 tensor $I_\Pi=I_\Pi(\varepsilon^D(\mathbf u^{i-1}))$ given by
@@ -298,8 +302,11 @@ method for the contact. It works as follows:
(1-\alpha^i_l)U^{i-1}@f}
satisfies
@f{gather*}
- \vert F\left(U^{i}\right) \vert < \vert F\left(U^{i-1}\right) \vert.
+ \vert {\hat R}\left({\mathbf u}^{i}\right) \vert < \vert {\hat R}\left({\mathbf u}^{i-1}\right) \vert.
\f}
+ with ${\hat R}\left({\mathbf u}\right)=\left(P_{Pi}(C\varepsilon(u)),\varepsilon(\varphi^{i}_p\right)$ with
+ the exceptions of (i) elements $p\in\mathcal{A}_i$ where we set ${\hat R}\left({\mathbf u}\right)=0$,
+ and (ii) elements that correpond to hanging nodes, which we eliminate in the usual manner.
- Define the new active and inactive sets by
@f{gather*}\mathcal{A}_{i+1}:=\lbrace p\in\mathcal{S}:\Lambda^i_p +
@@ -320,8 +327,8 @@ method for the contact. It works as follows:
where $g_{h,p}$ is the gap denoting the distance of the obstacle
from the undisplaced configuration of the body.
-
- If $\mathcal{A}_{i+1} = \mathcal{A}_k$ and $\vert
- F\left(U^{i}\right) \vert < \delta$ then stop, else set $i=i+1$ and go to
+
- If $\mathcal{A}_{i+1} = \mathcal{A}_k$ and $\left\|
+ {\hat R}\left({\mathbf u}^{i}\right)\right) \right\|_{\ell_2} < \delta$ then stop, else set $i=i+1$ and go to
step (1). This step ensures that we only stop iterations if both the correct
active set has been found and the plasticity has been iterated to sufficient
accuracy.
diff --git a/deal.II/examples/step-42/step-42.cc b/deal.II/examples/step-42/step-42.cc
index 5df351e27a..2a077f813b 100644
--- a/deal.II/examples/step-42/step-42.cc
+++ b/deal.II/examples/step-42/step-42.cc
@@ -1388,7 +1388,7 @@ namespace Step42
// matrix corresponds to the bilinear form
// $A_{ij}=(I_\Pi\varepsilon(\varphi_i),\varepsilon(\varphi_j))$ in the
// notation of the accompanying publication, whereas the right
- // hand side is $F_i=([I_\Pi-P_\Pi]\varepsilon(\varphi_i),\varepsilon(\mathbf u))$
+ // hand side is $F_i=([I_\Pi-P_\Pi C]\varepsilon(\varphi_i),\varepsilon(\mathbf u))$
// where $u$ is the current linearization points (typically the last solution).
// This might suggest that the right hand side will be zero if the material
// is completely elastic (where $I_\Pi=P_\Pi$) but this ignores the fact
diff --git a/deal.II/examples/step-51/doc/builds-on b/deal.II/examples/step-51/doc/builds-on
index 0946a61258..f1818ebf1b 100644
--- a/deal.II/examples/step-51/doc/builds-on
+++ b/deal.II/examples/step-51/doc/builds-on
@@ -1 +1 @@
-step-7 step-32 step-44
+step-7 step-9
diff --git a/deal.II/examples/step-51/doc/intro.dox b/deal.II/examples/step-51/doc/intro.dox
index 44992f71f4..6d476e0c78 100644
--- a/deal.II/examples/step-51/doc/intro.dox
+++ b/deal.II/examples/step-51/doc/intro.dox
@@ -17,7 +17,7 @@ is the large number of globally coupled degrees of freedom that one
must solve in an implicit system. This is because, unlike continuous finite
elements, in typical discontinuous elements there is one degree of freedom at
each vertex for each of the adjacent elements, rather than just one,
-and similarly for edges and faces. As an example of how fast the number of
+and similarly for edges and faces. As an example of how fast the number of
unknowns grows,
consider the
FE_DGP_Monomial
basis: each
scalar solution component is represented by polynomials of degree $p$
@@ -79,7 +79,7 @@ The coupling to other cells is introduced by the matrices
matrix A element by element (the local solution of the Dirichlet
problem) and subtract $CA^{-1}B$ from $D$. The steps in the Dirichlet-to-Neumann map concept hence correspond to
- - constructing the Schur complement matrix $D-C A^{-1} B$ and right hand side $G - C A^{-1} F$ locally on each cell
+
- constructing the Schur complement matrix $D-C A^{-1} B$ and right hand side $G - C A^{-1} F$ locally on each cell
and inserting the contribution into the global trace matrix in the usual way,
- solving the Schur complement system for $\Lambda$, and
- solving for U using the second equation, given $\Lambda$.
@@ -153,10 +153,11 @@ We eliminate the numerical trace $\hat{\mathbf{q}}$ by using traces of the form:
The variable $\hat {u}$ is introduced as an additional independent variable
and is the one for which we finally set up a globally coupled linear
system. As mentioned above, it is defined on the element faces and
-discontinuous from one face to another.
+discontinuous from one face to another wherever faces meet (at
+vertices in 2d, and at edges and vertices in 3d).
Values for $u$ and $\mathbf{q}$ appearing in the numerical trace function
are taken to be the cell's interior solution restricted
-to the boundary $\partial K$.
+to the boundary $\partial K$.
The local stabilization parameter $\tau$ has effects on stability and accuracy
of HDG solutions; see the literature for a further discussion. A stabilization
@@ -168,7 +169,7 @@ the stabilization parameter as
@f{eqnarray*}
\tau = \frac{\kappa}{\ell} + |\mathbf{c} \cdot \mathbf{n}|
@f}
-where we set the diffusion $\kappa=1$ and the diffusion length scale to
+where we set the diffusion $\kappa=1$ and the diffusion length scale to
$\ell = \frac{1}{5}$.
The trace/skeleton variables in HDG methods are single-valued on element
@@ -194,7 +195,7 @@ Find $(\mathbf{q}_h, u_h, \hat{u}_h) \in
- ( \nabla\cdot\mathbf{v}, u_h)_{\mathcal{T}}
+ \left<\mathbf{v}\cdot\mathbf{n}, \hat{u}_h\right>_{\partial\mathcal{T}}
&=& 0,
- \quad \forall \mathbf{v} \in \mathcal{V}_h^p,
+ \quad &&\forall \mathbf{v} \in \mathcal{V}_h^p,
\\
- (\nabla w, \mathbf{c} u_h)_{\mathcal{T}}
+ (w, \nabla \cdot \mathbf{q}_h)_{\mathcal{T}}
@@ -202,14 +203,14 @@ Find $(\mathbf{q}_h, u_h, \hat{u}_h) \in
+ \left_{\partial \mathcal{T}}
&=&
(w, f)_{\mathcal{T}},
- \quad \forall w \in \mathcal{W}_h^p,
+ \quad &&\forall w \in \mathcal{W}_h^p,
\\
\left< \mu, \hat{u}_h\mathbf{c} \cdot \mathbf{n}
+ \mathbf{q}_h\cdot \mathbf{n}
+ \tau (u_h - \hat{u}_h)\right>_{\partial \mathcal{T}}
&=&
\left<\mu, g_N\right>_{\partial\Omega_N},
- \quad \forall \mu \in \mathcal{M}_h^p.
+ \quad &&\forall \mu \in \mathcal{M}_h^p.
@f}
The unknowns $(\mathbf{q}_h, u_h)$ are referred to as local variables; they are
@@ -220,9 +221,10 @@ We use the notation $(\cdot, \cdot)_{\mathcal{T}} = \sum_K (\cdot, \cdot)_K$
to denote the sum of integrals over all cells and $\left<\cdot,
\cdot\right>_{\partial \mathcal{T}} = \sum_K \left<\cdot,
\cdot\right>_{\partial K}$ to denote integration over all faces of all cells,
-i.e., interior faces are visited twice. When combining the contribution from
-both elements sharing a face, the above equation yields terms familiar for DG
-with jumps of the solution over the cell boundaries.
+i.e., interior faces are visited twice, once from each side and with
+the corresponding normal vectors. When combining the contribution from
+both elements sharing a face, the above equation yields terms familiar
+from the DG method, with jumps of the solution over the cell boundaries.
In the equation above, the space $\mathcal {W}_h^{p}$ for the scalar variable
uh is defined as the space of functions that are tensor
@@ -244,7 +246,8 @@ In the weak form given above, we can note the following coupling patterns:
- The matrix $A$ consists of local-local coupling terms. These arise when the
local weighting functions $(\mathbf{v}, w)$ multiply the local solution terms
- $(\mathbf{q}_h, u_h)$.
+ $(\mathbf{q}_h, u_h)$. Because the elements are discontinuous, $A$
+ is block diagonal.
- The matrix $B$ represents the local-face coupling. These are the terms
with weighting functions $(\mathbf{v}, w)$ multiplying the skeleton variable
$\hat{u}_h$.
@@ -270,12 +273,14 @@ ingredients:
We now introduce a new variable $u_h^* \in \mathcal{V}_h^{p+1}$, which we find
-by the expression $|\kappa \nabla u_h^* + \mathbf{q}_h|^2$ over the cell
+by minimizing the expression $|\kappa \nabla u_h^* + \mathbf{q}_h|^2$ over the cell
K under the constraint $\left(1, u_h^*\right)_K &=& \left(1,
-u_h\right)_K$. This translates to the following system of equations:
+u_h\right)_K$. The constraint is necessary because the minimization
+functional does not determine the constant part of $u_h^*$. This
+translates to the following system of equations:
@f{eqnarray*}
\left(1, u_h^*\right)_K &=& \left(1, u_h\right)_K\\
-\left(\nabla w_h^*, \kappa \nabla u_h^*\right)_K &=&
+\left(\nabla w_h^*, \kappa \nabla u_h^*\right)_K &=&
-\left(\nabla w_h^*, \mathbf{q}_h\right)_K
\quad \text{for all } w_h^* \in \mathcal Q^{p+1}.
@f}
@@ -337,8 +342,8 @@ Note that the convection is divergence-free, $\nabla \cdot c = 0$.
Besides implementing the above equations, the implementation below provides the following features:
- - WorkStream to parallelize local solvers. Workstream is already used in
- step-32, step-44.
+
- WorkStream to parallelize local solvers. Workstream has been presented
+ in detail in step-9.
- Reconstruct the local DG solution from the trace.
- Post-processing the solution for superconvergence.
- DataOutFaces for direct output of the global skeleton solution.
diff --git a/deal.II/examples/step-51/doc/results.dox b/deal.II/examples/step-51/doc/results.dox
index f41ab66065..56a040d44f 100644
--- a/deal.II/examples/step-51/doc/results.dox
+++ b/deal.II/examples/step-51/doc/results.dox
@@ -17,7 +17,13 @@ solution is not continuous on the vertices where the faces meet, even though
its values are quite close along lines in the same coordinate direction. The
skeleton solution can be interpreted as a rubber spring between the two sides
that balances the jumps in the solution (or rather, the flux $\kappa \nabla u
-+ \mathbf{c} u$). As the mesh is refined, the jumps between the cells get
++ \mathbf{c} u$). From the picture at the top left, it is clear that
+the bulk solution frequently over- and undershoots and that the
+skeleton variable in indeed a better approximation to the exact
+solution; this explains why we can get a better solution using a
+postprocessing step.
+
+As the mesh is refined, the jumps between the cells get
small (we represent a smooth solution), and the skeleton solution approaches
the interior parts. For cycle 8, there is no visible difference in the two
variables. We also see how boundary conditions are implemented weakly and that
@@ -122,16 +128,16 @@ cells dofs val L2 grad L2 val L2-post
Q3 elements, global refinement:
cells dofs val L2 grad L2 val L2-post
- 16 160 3.613e-01 - 1.891e+00 - 3.020e-01 -
- 36 336 6.411e-02 4.26 5.081e-01 3.24 3.238e-02 5.51
- 64 576 3.480e-02 2.12 2.533e-01 2.42 5.277e-03 6.31
- 144 1248 8.297e-03 3.54 5.924e-02 3.58 6.330e-04 5.23
- 256 2176 2.254e-03 4.53 1.636e-02 4.47 1.403e-04 5.24
- 576 4800 4.558e-04 3.94 3.277e-03 3.96 1.844e-05 5.01
- 1024 8448 1.471e-04 3.93 1.052e-03 3.95 4.378e-06 5.00
- 2304 18816 2.956e-05 3.96 2.104e-04 3.97 5.750e-07 5.01
- 4096 33280 9.428e-06 3.97 6.697e-05 3.98 1.362e-07 5.01
- 9216 74496 1.876e-06 3.98 1.330e-05 3.99 1.788e-08 5.01
+ 16 160 3.613e-01 - 1.891e+00 - 3.020e-01 -
+ 36 336 6.411e-02 4.26 5.081e-01 3.24 3.238e-02 5.51
+ 64 576 3.480e-02 2.12 2.533e-01 2.42 5.277e-03 6.31
+ 144 1248 8.297e-03 3.54 5.924e-02 3.58 6.330e-04 5.23
+ 256 2176 2.254e-03 4.53 1.636e-02 4.47 1.403e-04 5.24
+ 576 4800 4.558e-04 3.94 3.277e-03 3.96 1.844e-05 5.01
+ 1024 8448 1.471e-04 3.93 1.052e-03 3.95 4.378e-06 5.00
+ 2304 18816 2.956e-05 3.96 2.104e-04 3.97 5.750e-07 5.01
+ 4096 33280 9.428e-06 3.97 6.697e-05 3.98 1.362e-07 5.01
+ 9216 74496 1.876e-06 3.98 1.330e-05 3.99 1.788e-08 5.01
@endcode
@@ -139,7 +145,7 @@ One can see the error reduction upon grid refinement, and for the cases where
global refinement was performed, also the convergence rates. The quadratic
convergence rates of Q1 elements in the $L_2$ norm for both the scalar
variable and the gradient variable is apparent, as is the cubic rate for the
-postprocessed scalar variable in the $L_2$ norm. Note that is is a distinctive
+postprocessed scalar variable in the $L_2$ norm. Note this distinctive
feature of an HDG solution. In typical continuous finite elements, the
gradient of the solution of order p converges at rate p only, as
opposed to p+1 for the actual solution. Even though superconvergence
@@ -153,41 +159,41 @@ The same convergence rates are observed in 3d.
@code
Q1 elements, adaptive refinement:
cells dofs val L2 grad L2 val L2-post
- 8 144 7.122e+00 1.941e+01 6.102e+00
- 29 500 3.309e+00 1.023e+01 2.145e+00
- 113 1792 2.204e+00 1.023e+01 1.912e+00
- 379 5732 6.085e-01 5.008e+00 2.233e-01
- 1317 19412 1.543e-01 1.464e+00 4.196e-02
- 4579 64768 5.058e-02 5.611e-01 9.521e-03
- 14596 199552 2.129e-02 3.122e-01 4.569e-03
- 46180 611400 1.033e-02 1.622e-01 1.684e-03
-144859 1864212 5.007e-03 8.371e-02 7.364e-04
-451060 5684508 2.518e-03 4.562e-02 3.070e-04
+ 8 144 7.122e+00 1.941e+01 6.102e+00
+ 29 500 3.309e+00 1.023e+01 2.145e+00
+ 113 1792 2.204e+00 1.023e+01 1.912e+00
+ 379 5732 6.085e-01 5.008e+00 2.233e-01
+ 1317 19412 1.543e-01 1.464e+00 4.196e-02
+ 4579 64768 5.058e-02 5.611e-01 9.521e-03
+ 14596 199552 2.129e-02 3.122e-01 4.569e-03
+ 46180 611400 1.033e-02 1.622e-01 1.684e-03
+144859 1864212 5.007e-03 8.371e-02 7.364e-04
+451060 5684508 2.518e-03 4.562e-02 3.070e-04
Q1 elements, global refinement:
cells dofs val L2 grad L2 val L2-post
- 8 144 7.122e+00 - 1.941e+01 - 6.102e+00 -
- 27 432 5.491e+00 0.64 2.184e+01 -0.29 4.448e+00 0.78
- 64 960 3.646e+00 1.42 1.299e+01 1.81 3.306e+00 1.03
- 216 3024 1.595e+00 2.04 8.550e+00 1.03 1.441e+00 2.05
- 512 6912 6.922e-01 2.90 5.306e+00 1.66 2.511e-01 6.07
- 1728 22464 2.915e-01 2.13 2.490e+00 1.87 8.588e-02 2.65
- 4096 52224 1.684e-01 1.91 1.453e+00 1.87 4.055e-02 2.61
- 13824 172800 7.972e-02 1.84 6.861e-01 1.85 1.335e-02 2.74
- 32768 405504 4.637e-02 1.88 3.984e-01 1.89 5.932e-03 2.82
-110592 1354752 2.133e-02 1.92 1.830e-01 1.92 1.851e-03 2.87
+ 8 144 7.122e+00 - 1.941e+01 - 6.102e+00 -
+ 27 432 5.491e+00 0.64 2.184e+01 -0.29 4.448e+00 0.78
+ 64 960 3.646e+00 1.42 1.299e+01 1.81 3.306e+00 1.03
+ 216 3024 1.595e+00 2.04 8.550e+00 1.03 1.441e+00 2.05
+ 512 6912 6.922e-01 2.90 5.306e+00 1.66 2.511e-01 6.07
+ 1728 22464 2.915e-01 2.13 2.490e+00 1.87 8.588e-02 2.65
+ 4096 52224 1.684e-01 1.91 1.453e+00 1.87 4.055e-02 2.61
+ 13824 172800 7.972e-02 1.84 6.861e-01 1.85 1.335e-02 2.74
+ 32768 405504 4.637e-02 1.88 3.984e-01 1.89 5.932e-03 2.82
+110592 1354752 2.133e-02 1.92 1.830e-01 1.92 1.851e-03 2.87
Q3 elements, global refinement:
cells dofs val L2 grad L2 val L2-post
- 8 576 5.670e+00 - 1.868e+01 - 5.462e+00 -
- 27 1728 1.048e+00 4.16 6.988e+00 2.42 8.011e-01 4.73
- 64 3840 2.831e-01 4.55 2.710e+00 3.29 1.363e-01 6.16
- 216 12096 7.883e-02 3.15 7.721e-01 3.10 2.158e-02 4.55
- 512 27648 3.642e-02 2.68 3.305e-01 2.95 5.231e-03 4.93
- 1728 89856 8.546e-03 3.58 7.581e-02 3.63 7.640e-04 4.74
- 4096 208896 2.598e-03 4.14 2.313e-02 4.13 1.783e-04 5.06
- 13824 691200 5.314e-04 3.91 4.697e-03 3.93 2.355e-05 4.99
- 32768 1622016 1.723e-04 3.91 1.517e-03 3.93 5.602e-06 4.99
+ 8 576 5.670e+00 - 1.868e+01 - 5.462e+00 -
+ 27 1728 1.048e+00 4.16 6.988e+00 2.42 8.011e-01 4.73
+ 64 3840 2.831e-01 4.55 2.710e+00 3.29 1.363e-01 6.16
+ 216 12096 7.883e-02 3.15 7.721e-01 3.10 2.158e-02 4.55
+ 512 27648 3.642e-02 2.68 3.305e-01 2.95 5.231e-03 4.93
+ 1728 89856 8.546e-03 3.58 7.581e-02 3.63 7.640e-04 4.74
+ 4096 208896 2.598e-03 4.14 2.313e-02 4.13 1.783e-04 5.06
+ 13824 691200 5.314e-04 3.91 4.697e-03 3.93 2.355e-05 4.99
+ 32768 1622016 1.723e-04 3.91 1.517e-03 3.93 5.602e-06 4.99
110592 5419008 3.482e-05 3.94 3.055e-04 3.95 7.374e-07 5.00
@endcode
@@ -268,7 +274,7 @@ somewhat higher order, usually around p=3. This is because of a
volume-to-surface effect for discontinuous solutions with too much of the
solution living on the surfaces and hence duplicating work when the elements
are linear. Put in other words, DG methods are often most efficient when used
-at relatively high order, despite their focus on discontinuous (and hence,
+at relatively high order, despite their focus on a discontinuous (and hence,
seemingly low accurate) representation of solutions.
Resuls for 3D
@@ -319,7 +325,7 @@ One final note on the efficiency comparison: We tried to use general-purpose
sparse matrix structures and similar solvers (optimal AMG preconditioners for
both without particular tuning of the AMG parameters on any of them) to give a
fair picture of the cost versus accuracy of two methods, on a toy example. It
-should be noted however that GMG for continuous finite elements is about a
+should be noted however that geometric multigrid (GMG) for continuous finite elements is about a
factor four to five faster for p=3 and p=6. The authors of this
tutorial have not seen similarly advanced solvers for the HDG linear
systems. Also, there are other implementation aspects for CG available such as
@@ -327,6 +333,7 @@ fast matrix-free approaches as shown in step-37 that make higher order
continuous elements more competitive. Again, it is not clear to the authors of
the tutorial whether similar improvements could be made for HDG.
+
Possibilities for improvements
As already mentioned in the introduction, one possibility is to implement
@@ -398,16 +405,17 @@ components:
As can be seen from the table, the solver and assembly calls dominate the
-runtime of the program. This also gives a clear indication of where an
-improvement makes most sense.
+runtime of the program. This also gives a clear indication of where
+improvements would make the most sense:
- Better linear solvers: We use a BiCGStab iterative solver without
preconditioner, where the number of iteration increases with increasing
problem size (the number of iterations for Q1 elements and global
- refinements start at 35 for the small sizes but increase up to 701 for the
+ refinements starts at 35 for the small sizes but increase up to 701 for the
largest size). To do better, one could for example use an algebraic
- multigrid preconditioner from Trilinos. For diffusion-dominated problems as
+ multigrid preconditioner from Trilinos. For diffusion-dominated
+ problems such as
the problem at hand with finer meshes, such a solver can be designed that
uses the matrix-vector products from the more efficient ChunkSparseMatrix on
the finest level, as long as we are not working in parallel with MPI. For
diff --git a/deal.II/examples/step-9/doc/intro.dox b/deal.II/examples/step-9/doc/intro.dox
index 3662474b1f..3f273b69c8 100644
--- a/deal.II/examples/step-9/doc/intro.dox
+++ b/deal.II/examples/step-9/doc/intro.dox
@@ -12,9 +12,10 @@ In this example, our aims are the following:
While the second aim is difficult to describe in general terms without
reference to the code, we will discuss the other two aims in the
following. The use of multiple threads will then be detailed at the
-relevant places within the program. Furthermore, there exists a report on this
-subject, which is also available online from the ``Documentation'' section of
-the deal.II homepage.
+relevant places within the program. We will, however, follow the
+general discussion of the WorkStream approach detailed in the
+@ref threads "Parallel computing with multiple processors accessing shared memory"
+documentation module.
Discretizing the advection equation
diff --git a/deal.II/examples/step-9/step-9.cc b/deal.II/examples/step-9/step-9.cc
index 73b56ce912..d0e92e6c75 100644
--- a/deal.II/examples/step-9/step-9.cc
+++ b/deal.II/examples/step-9/step-9.cc
@@ -48,8 +48,8 @@
// The following two files provide classes and information for multithreaded
// programs. In the first one, the classes and functions are declared which we
-// need to start new threads and to wait for threads to return (i.e. the
-// Thread
class and the new_thread
functions). The
+// need to do assembly in parallel (i.e. the
+// WorkStream
namespace). The
// second file has a class MultithreadInfo
(and a global object
// multithread_info
of that type) which can be used to query the
// number of processors in your system, which is often useful when deciding
@@ -74,25 +74,6 @@ namespace Step9
{
using namespace dealii;
- namespace Assembler
- {
- struct Scratch
- {
- Scratch() {};
- };
-
- struct CopyData
- {
- CopyData() {};
-
- unsigned int dofs_per_cell;
- std::vector local_dof_indices;
- // We declare cell matrix and cell right hand side...
- FullMatrix cell_matrix;
- Vector cell_rhs;
- };
- }
-
// @sect3{AdvectionProblem class declaration}
// Following we declare the main class of this program. It is very much
@@ -108,28 +89,65 @@ namespace Step9
private:
void setup_system ();
- // The next function will be used to assemble the matrix. However, unlike
- // in the previous examples, the function will not do the work itself, but
- // rather it will split the range of active cells into several chunks and
- // then call the following function on each of these chunks. The rationale
- // is that matrix assembly can be parallelized quite well, as the
+
+ // The next set of functions will be used to assemble the
+ // matrix. However, unlike in the previous examples, the
+ // assemble_system()
function will not do the work
+ // itself, but rather will delegate the actual assembly to helper
+ // functions assemble_local_system()
and
+ // copy_local_to_global()
. The rationale is that
+ // matrix assembly can be parallelized quite well, as the
// computation of the local contributions on each cell is entirely
- // independent of other cells, and we only have to synchronize when we add
- // the contribution of a cell to the global matrix. The second function,
- // doing the actual work, accepts two parameters which denote the first
- // cell on which it shall operate, and the one past the last.
+ // independent of other cells, and we only have to synchronize
+ // when we add the contribution of a cell to the global
+ // matrix.
//
// The strategy for parallelization we choose here is one of the
- // possibilities mentioned in detail in the @ref threads module in the
- // documentation. While it is a straightforward way to distribute the work
- // for assembling the system onto multiple processor cores. As mentioned
- // in the module, there are other, and possibly better suited, ways to
- // achieve the same goal.
+ // possibilities mentioned in detail in the @ref threads module in
+ // the documentation. Specifically, we will use the WorkStream
+ // approach discussed there. Since there is so much documentation
+ // in this module, we will not repeat the rationale for the design
+ // choices here (for example, if you read through the module
+ // mentioned above, you will understand what the purpose of the
+ // AssemblyScratchData
and
+ // AssemblyCopyData
structures is). Rather, we will
+ // only discuss the specific implementation.
+ //
+ // If you read the page mentioned above, you will find that in
+ // order to parallelize assembly, we need two data structures --
+ // one that corresponds to data that we need during local
+ // integration ("scratch data", i.e., things we only need as
+ // temporary storage), and one that carries information from the
+ // local integration to the function that then adds the local
+ // contributions to the corresponding elements of the global
+ // matrix. The former of these typically contains the FEValues and
+ // FEFaceValues objects, whereas the latter has the local matrix,
+ // local right hand side, and information about which degrees of
+ // freedom live on the cell for which we are assembling a local
+ // contribution. With this information, the following should be
+ // relatively self-explanatory:
+ struct AssemblyScratchData
+ {
+ AssemblyScratchData (const FiniteElement &fe);
+ AssemblyScratchData (const AssemblyScratchData &scratch_data);
+
+ FEValues fe_values;
+ FEFaceValues fe_face_values;
+ };
+
+ struct AssemblyCopyData
+ {
+ FullMatrix cell_matrix;
+ Vector cell_rhs;
+ std::vector local_dof_indices;
+ };
+
void assemble_system ();
- void build_local_system (typename DoFHandler::active_cell_iterator const &cell,
- Assembler::Scratch &scratch,Assembler::CopyData ©_data);
- void copy_local_to_global (Assembler::CopyData const ©_data);
-
+ void local_assemble_system (const typename DoFHandler::active_cell_iterator &cell,
+ AssemblyScratchData &scratch,
+ AssemblyCopyData ©_data);
+ void copy_local_to_global (const AssemblyCopyData ©_data);
+
// The following functions again are as in previous examples, as are the
// subsequent variables.
@@ -447,13 +465,33 @@ namespace Step9
DeclException0 (ExcInsufficientDirections);
private:
- typedef std::pair IndexInterval;
+ template
+ struct EstimateScratchData
+ {
+ EstimateScratchData (const FiniteElement &fe,
+ const Vector &solution);
+ EstimateScratchData (const EstimateScratchData &data);
+
+ FEValues fe_midpoint_value;
+ Vector solution;
+ };
+
+ // There is nothing to copy but WorkStream requires a CopyData structure
+ template
+ struct EstimateCopyData
+ {
+ EstimateCopyData () {}
+ };
template
- static void estimate_interval (const DoFHandler &dof,
- const Vector &solution,
- const IndexInterval &index_interval,
- Vector &error_per_cell);
+ static void estimate_cell (
+ const SynchronousIterators::active_cell_iterator,
+ Vector::iterator> > &cell,
+ EstimateScratchData &scratch_data,
+ const EstimateCopyData ©_data);
+ // There is nothing to copy but WorkStream required a copy function
+ template
+ static void dummy_copy(const EstimateCopyData ©_data) {}
};
@@ -510,147 +548,128 @@ namespace Step9
// In the following function, the matrix and right hand side are
// assembled. As stated in the documentation of the main class above, it
// does not do this itself, but rather delegates to the function following
- // next, by splitting up the range of cells into chunks of approximately the
- // same size and assembling on each of these chunks in parallel.
+ // next, utilizing the WorkStream concept discussed in @ref threads .
+ //
+ // If you have looked through the @ref threads module, you will have
+ // seen that assembling in parallel does not take an incredible
+ // amount of extra code as long as you diligently describe what the
+ // scratch and copy data objects are, and if you define suitable
+ // functios for the local assembly and the copy operation from local
+ // contributions to global objects. This done, the following will do
+ // all the heavy lifting to get these operations done on multiple
+ // threads on as many cores as you have in your system:
template
void AdvectionProblem::assemble_system ()
{
- // First, we want to find out how many threads shall assemble the matrix
- // in parallel. A reasonable choice would be that each processor in your
- // system processes one chunk of cells; if we were to use this
- // information, we could use the value of the global variable
- // multithread_info.n_cpus
, which is determined at start-up
- // time of your program automatically. (Note that if the library was not
- // configured for multithreading, then the number of CPUs is set to one.)
- // However, sometimes there might be reasons to use another value. For
- // example, you might want to use less processors than there are in your
- // system in order not to use too many computational resources. For
- // this reason, the number of threads can be set by
- // MultithreadInfo::set_thread_limit
and the current value
- // is returned by n_threads(). This
- // is also queried by functions inside the library to determine
- // how many threads they shall create.
-
- // It is worth noting, however, that this setup determines the load
- // distribution onto processor in a static way: it does not take into
- // account that some other part of our program may also be running
- // something in parallel at the same time as we get here (this is not the
- // case in the current program, but may easily be the case in more complex
- // applications). A discussion of how to deal with this case can be found
- // in the @ref threads module.
- //
- // Next, we need an object which is capable of keeping track of the
- // threads we created, and allows us to wait until they all have finished
- // (to join
them in the language of threads). The
- // Threads::ThreadGroup class does this, which is basically just a
- // container for objects of type Threads::Thread that represent a single
- // thread; Threads::Thread is what the Threads::new_thread function below
- // will return when we start a new thread.
- //
- // Note that both Threads::ThreadGroup and Threads::Thread have a template
- // argument that represents the return type of the function being called
- // on a separate thread. Since most of the functions that we will call on
- // different threads have return type void
, the template
- // argument has a default value void
, so that in that case it
- // can be omitted. (However, you still need to write the angle brackets,
- // even if they are empty.)
- //
- // If you did not configure for multithreading, then the
- // new_thread
function that is supposed to start a new thread
- // in parallel only executes the function which should be run in parallel,
- // waits for it to return (i.e. the function is executed sequentially),
- // and puts the return value into the Thread
- // object. Likewise, the function join
that is supposed to
- // wait for all spawned threads to return, returns immediately, as there
- // can't be any threads running.
-
- // Now we have to split the range of cells into chunks of approximately
- // the same size. Each thread will then assemble the local contributions
- // of the cells within its chunk and transfer these contributions to the
- // global matrix. As splitting a range of cells is a rather common task
- // when using multithreading, there is a function in the
- // Threads
namespace that does exactly this. In fact, it does
- // this not only for a range of cell iterators, but for iterators in
- // general, so you could use it for std::vector::iterator
or
- // usual pointers as well.
- //
- // The function returns a vector of pairs of iterators, where the first
- // denotes the first cell of each chunk, while the second denotes the one
- // past the last (this half-open interval is the usual convention in the
- // C++ standard library, so we keep to it). Note that we have to specify
- // the actual data type of the iterators in angle brackets to the
- // function. This is necessary, since it is a template function which
- // takes the data type of the iterators as template argument; in the
- // present case, however, the data types of the two first parameters
- // differ (begin_active
returns an
- // active_iterator
, while end
returns a
- // raw_iterator
), and in this case the C++ language requires
- // us to specify the template type explicitly. For brevity, we first
- // typedef this data type to an alias.
-
- typedef typename DoFHandler::active_cell_iterator active_cell_iterator;
-
- // Finally, for each of the chunks of iterators we have computed, start
- // one thread (or if not in multithread mode: execute assembly on these
- // chunks sequentially). This is done using the following sequence of
- // function calls:
-
- Assembler::Scratch scratch;
- Assembler::CopyData copy_data;
- WorkStream::run(dof_handler.begin_active(),dof_handler.end(),*this,
- &AdvectionProblem::build_local_system,&AdvectionProblem::copy_local_to_global,
- scratch,copy_data);
-
-
- // The reasons and internal workings of these functions can be found in
- // the report on the subject of multithreading, which is available online
- // as well. Suffice it to say that we create a new thread that calls the
- // assemble_system_interval
function on the present object
- // (the this
pointer), with the arguments following in the
- // second set of parentheses passed as parameters. The Threads::new_thread
- // function returns an object of type Threads::Thread, which we put into
- // the threads
container. If a thread exits, the return value
- // of the function being called is put into a place such that the thread
- // objects can access it using their return_value
function;
- // since the function we call doesn't have a return value, this does not
- // apply here. Note that you can copy around thread objects freely, and
- // that of course they will still represent the same thread.
-
- // When all the threads are running, the only thing we have to do is wait
- // for them to finish. This is necessary of course, as we can't proceed
- // with our tasks before the matrix and right hand side are
- // assembled. Waiting for all the threads to finish can be done using the
- // join_all
function in the ThreadGroup
- // container, which just calls join
on each of the thread
- // objects it stores.
- //
- // Again, if the library was not configured to use multithreading, then
- // no threads can run in parallel and the function returns immediately.
+ WorkStream::run(dof_handler.begin_active(),
+ dof_handler.end(),
+ *this,
+ &AdvectionProblem::local_assemble_system,
+ &AdvectionProblem::copy_local_to_global,
+ AssemblyScratchData(fe),
+ AssemblyCopyData());
// After the matrix has been assembled in parallel, we still have to
// eliminate hanging node constraints. This is something that can't be
// done on each of the threads separately, so we have to do it now.
- hanging_node_constraints.condense (system_matrix);
- hanging_node_constraints.condense (system_rhs);
// Note also, that unlike in previous examples, there are no boundary
// conditions to be applied to the system of equations. This, of course,
// is due to the fact that we have included them into the weak formulation
// of the problem.
+ hanging_node_constraints.condense (system_matrix);
+ hanging_node_constraints.condense (system_rhs);
}
+ // As already mentioned above, we need to have scratch objects for
+ // the parallel computation of local contributions. These objects
+ // contain FEValues and FEFaceValues objects, and so we will need to
+ // have constructors and copy constructors that allow us to create
+ // them. In initializing them, note first that we use bilinear
+ // elements, soGauss formulae with two points in each space
+ // direction are sufficient. For the cell terms we need the values
+ // and gradients of the shape functions, the quadrature points in
+ // order to determine the source density and the advection field at
+ // a given point, and the weights of the quadrature points times the
+ // determinant of the Jacobian at these points. In contrast, for the
+ // boundary integrals, we don't need the gradients, but rather the
+ // normal vectors to the cells. This determines which update flags
+ // we will have to pass to the constructors of the members of the
+ // class:
+ template
+ AdvectionProblem::AssemblyScratchData::
+ AssemblyScratchData (const FiniteElement &fe)
+ :
+ fe_values (fe,
+ QGauss(2),
+ update_values | update_gradients |
+ update_quadrature_points | update_JxW_values),
+ fe_face_values (fe,
+ QGauss(2),
+ update_values | update_quadrature_points |
+ update_JxW_values | update_normal_vectors)
+ {}
+
+
+
+ template
+ AdvectionProblem::AssemblyScratchData::
+ AssemblyScratchData (const AssemblyScratchData &scratch_data)
+ :
+ fe_values (scratch_data.fe_values.get_fe(),
+ scratch_data.fe_values.get_quadrature(),
+ update_values | update_gradients |
+ update_quadrature_points | update_JxW_values),
+ fe_face_values (scratch_data.fe_face_values.get_fe(),
+ scratch_data.fe_face_values.get_quadrature(),
+ update_values | update_quadrature_points |
+ update_JxW_values | update_normal_vectors)
+ {}
+
+
+
+
// Now, this is the function that does the actual work. It is not very
// different from the assemble_system
functions of previous
// example programs, so we will again only comment on the differences. The
// mathematical stuff follows closely what we have said in the introduction.
+ //
+ // There are a number of points worth mentioning here, though. The
+ // first one is that we have moved the FEValues and FEFaceValues
+ // objects into the ScratchData object. We have done so because the
+ // alternative would have been to simply create one every time we
+ // get into this function -- i.e., on every cell. It now turns out
+ // that the FEValues classes were written with the explicit goal of
+ // moving everything that remains the same from cell to cell into
+ // the construction of the object, and only do as little work as
+ // possible in FEValues::reinit() whenever we move to a new
+ // cell. What this means is that it would be very expensive to
+ // create a new object of this kind in this function as we would
+ // have to do it for every cell -- exactly the thing we wanted to
+ // avoid with the FEValues class. Instead, what we do is create it
+ // only once (or a small number of times) in the scratch objects and
+ // then re-use it as often as we can.
+ //
+ // This begs the question of whether there are other objects we
+ // create in this function whose creation is expensive compared to
+ // its use. Indeed, at the top of the function, we declare all sorts
+ // of objects. The AdvectionField
,
+ // RightHandSide
and BoundaryValues
do not
+ // cost much to create, so there is no harm here. However,
+ // allocating memory in creating the rhs_values
and
+ // similar variables below typically costs a significant amount of
+ // time, compared to just accessing the (temporary) values we store
+ // in them. Consequently, these would be candidates for moving into
+ // the AssemblyScratchData
class. We will leave this as
+ // an exercise.
template
void
AdvectionProblem::
- build_local_system (typename DoFHandler::active_cell_iterator const &cell,
- Assembler::Scratch &scratch,
- Assembler::CopyData ©_data)
+ local_assemble_system (const typename DoFHandler::active_cell_iterator &cell,
+ AssemblyScratchData &scratch_data,
+ AssemblyCopyData ©_data)
{
// First of all, we will need some objects that describe boundary values,
// right hand side function and the advection field. As we will only
@@ -661,39 +680,18 @@ namespace Step9
const RightHandSide right_hand_side;
const BoundaryValues boundary_values;
- // Next we need quadrature formula for the cell terms, but also for the
- // integral over the inflow boundary, which will be a face integral. As we
- // use bilinear elements, Gauss formulae with two points in each space
- // direction are sufficient.
- QGauss quadrature_formula(2);
- QGauss face_quadrature_formula(2);
-
- // Finally, we need objects of type FEValues
and
- // FEFaceValues
. For the cell terms we need the values and
- // gradients of the shape functions, the quadrature points in order to
- // determine the source density and the advection field at a given point,
- // and the weights of the quadrature points times the determinant of the
- // Jacobian at these points. In contrast, for the boundary integrals, we
- // don't need the gradients, but rather the normal vectors to the cells.
- FEValues fe_values (fe, quadrature_formula,
- update_values | update_gradients |
- update_quadrature_points | update_JxW_values);
- FEFaceValues fe_face_values (fe, face_quadrature_formula,
- update_values | update_quadrature_points |
- update_JxW_values | update_normal_vectors);
-
// Then we define some abbreviations to avoid unnecessarily long lines:
- copy_data.dofs_per_cell = fe.dofs_per_cell;
- const unsigned int n_q_points = quadrature_formula.size();
- const unsigned int n_face_q_points = face_quadrature_formula.size();
+ const unsigned int dofs_per_cell = fe.dofs_per_cell;
+ const unsigned int n_q_points = scratch_data.fe_values.get_quadrature().size();
+ const unsigned int n_face_q_points = scratch_data.fe_face_values.get_quadrature().size();
// We declare cell matrix and cell right hand side...
- copy_data.cell_matrix = FullMatrix (copy_data.dofs_per_cell, copy_data.dofs_per_cell);
- copy_data.cell_rhs = Vector (copy_data.dofs_per_cell);
+ copy_data.cell_matrix.reinit (dofs_per_cell, dofs_per_cell);
+ copy_data.cell_rhs.reinit (dofs_per_cell);
// ... an array to hold the global indices of the degrees of freedom of
// the cell on which we are presently working...
- copy_data.local_dof_indices.resize(copy_data.dofs_per_cell);
+ copy_data.local_dof_indices.resize(dofs_per_cell);
// ... and array in which the values of right hand side, advection
// direction, and boundary values will be stored, for cell and face
@@ -705,13 +703,13 @@ namespace Step9
// ... then initialize the FEValues
object...
- fe_values.reinit (cell);
+ scratch_data.fe_values.reinit (cell);
// ... obtain the values of right hand side and advection directions
// at the quadrature points...
- advection_field.value_list (fe_values.get_quadrature_points(),
+ advection_field.value_list (scratch_data.fe_values.get_quadrature_points(),
advection_directions);
- right_hand_side.value_list (fe_values.get_quadrature_points(),
+ right_hand_side.value_list (scratch_data.fe_values.get_quadrature_points(),
rhs_values);
// ... set the value of the streamline diffusion parameter as
@@ -721,26 +719,26 @@ namespace Step9
// ... and assemble the local contributions to the system matrix and
// right hand side as also discussed above:
for (unsigned int q_point=0; q_point
void
- AdvectionProblem::copy_local_to_global (Assembler::CopyData const ©_data)
+ AdvectionProblem::copy_local_to_global (const AssemblyCopyData ©_data)
{
+ for (unsigned int i=0; iMutex, which is short for
- // mutually exclusive
: a thread that wants to write to
- // the global objects acquires this lock, but has to wait if it is
- // presently owned by another thread. If it has acquired the lock, it
- // can be sure that no other thread is presently writing to the
- // matrix, and can do so freely. When finished, we release the lock
- // again so as to allow other threads to acquire it and write to the
- // matrix.
- for (unsigned int i=0; ilock and release
- // functions are no-ops, i.e. they return without doing anything.
- //
- // 2. In order to work properly, it is essential that all threads try
- // to acquire the same lock. This, of course, can not be achieved if
- // the lock is a local variable, as then each thread would acquire its
- // own lock. Therefore, the lock variable is a member variable of the
- // class; since all threads execute member functions of the same
- // object, they have the same this
pointer and therefore
- // also operate on the same lock
.
+ system_rhs(copy_data.local_dof_indices[i]) += copy_data.cell_rhs(i);
+ }
}
-
+
@@ -946,7 +910,7 @@ namespace Step9
else
{
refine_grid ();
- };
+ }
std::cout << " Number of active cells: "
@@ -962,7 +926,7 @@ namespace Step9
assemble_system ();
solve ();
output_results (cycle);
- };
+ }
DataOut data_out;
data_out.attach_dof_handler (dof_handler);
@@ -977,6 +941,32 @@ namespace Step9
// @sect3{GradientEstimation class implementation}
+ // ScratchData used by estimate_cell
+ template
+ GradientEstimation::EstimateScratchData
+ ::EstimateScratchData (const FiniteElement &fe,
+ const Vector &solution)
+ :
+ fe_midpoint_value(fe,
+ QMidpoint (),
+ update_values | update_quadrature_points),
+ solution(solution)
+ {}
+
+
+
+ // ScratchData used by estimate_cell
+ template
+ GradientEstimation::EstimateScratchData
+ ::EstimateScratchData(const EstimateScratchData &scratch_data)
+ :
+ fe_midpoint_value(scratch_data.fe_midpoint_value.get_fe(),
+ scratch_data.fe_midpoint_value.get_quadrature(),
+ update_values | update_quadrature_points),
+ solution(scratch_data.solution)
+ {}
+
+
// Now for the implementation of the GradientEstimation
// class. The first function does not much except for delegating work to the
// other function:
@@ -997,15 +987,6 @@ namespace Step9
ExcInvalidVectorLength (error_per_cell.size(),
dof_handler.get_tria().n_active_cells()));
- // Next, we subdivide the range of cells into chunks of equal size. Just
- // as we have used the function Threads::split_range
when
- // assembling above, there is a function that computes intervals of
- // roughly equal size from a larger interval. This is used here:
- const unsigned int n_threads = multithread_info.n_threads();
- std::vector index_intervals
- = Threads::split_interval (0, dof_handler.get_tria().n_active_cells(),
- n_threads);
-
// In the same way as before, we use a Threads::ThreadGroup
// object to collect the descriptor objects of different threads. Note
// that as the function called is not a member function, but rather a
@@ -1020,20 +1001,27 @@ namespace Step9
// or the other compiler, but have to take a temporary variable for that
// purpose. Here, in this case, Compaq's cxx
compiler choked
// on the code so we use this workaround with the function pointer:
- Threads::ThreadGroup<> threads;
- void (*estimate_interval_ptr) (const DoFHandler &,
- const Vector &,
- const IndexInterval &,
- Vector &)
- = &GradientEstimation::template estimate_interval;
- for (unsigned int i=0; i::active_cell_iterator,Vector::iterator> > &cell,
+ EstimateScratchData &scratch_data,
+ const EstimateCopyData ©_data)
+ = &GradientEstimation::template estimate_cell;
+
+ void (*dummy_copy) (const EstimateCopyData ©_data)
+ = &GradientEstimation::template dummy_copy;
+
+ typedef std_cxx1x::tuple::active_cell_iterator,Vector::iterator>
+ Iterators;
+ SynchronousIterators begin_sync_it(Iterators(dof_handler.begin_active(),
+ error_per_cell.begin()));
+ SynchronousIterators end_sync_it(Iterators(dof_handler.end(),error_per_cell.end()));
+
+ WorkStream::run(begin_sync_it,end_sync_it,
+ estimate_cell_ptr,
+ dummy_copy,
+ EstimateScratchData (dof_handler.get_fe(),solution),
+ EstimateCopyData ());
+
// Note that if the value of the variable
// multithread_info.n_threads()
was one, or if the
// library was not configured to use threads, then the sequence of
@@ -1060,51 +1048,15 @@ namespace Step9
// Now for the details:
template
void
- GradientEstimation::estimate_interval (const DoFHandler &dof_handler,
- const Vector &solution,
- const IndexInterval &index_interval,
- Vector &error_per_cell)
- {
- // First we need a way to extract the values of the given finite element
- // function at the center of the cells. As usual with values of finite
- // element functions, we use an object of type FEValues
, and
- // we use (or mis-use in this case) the midpoint quadrature rule to get at
- // the values at the center. Note that the FEValues
object
- // only needs to compute the values at the centers, and the location of
- // the quadrature points in real space in order to get at the vectors
- // y
.
- QMidpoint midpoint_rule;
- FEValues fe_midpoint_value (dof_handler.get_fe(),
- midpoint_rule,
- update_values | update_quadrature_points);
-
- // Then we need space foe the tensor Y
, which is the sum of
+ GradientEstimation::estimate_cell (const SynchronousIterators::active_cell_iterator,Vector::iterator> > &cell,
+ EstimateScratchData &scratch_data,
+ const EstimateCopyData ©_data)
+ {
+ // We need space for the tensor Y
, which is the sum of
// outer products of the y-vectors.
Tensor<2,dim> Y;
- // Then define iterators into the cells and into the output vector, which
- // are to be looped over by the present instance of this function. We get
- // start and end iterators over cells by setting them to the first active
- // cell and advancing them using the given start and end index. Note that
- // we can use the advance
function of the standard C++
- // library, but that we have to cast the distance by which the iterator is
- // to be moved forward to a signed quantity in order to avoid warnings by
- // the compiler.
- typename DoFHandler::active_cell_iterator cell, endc;
-
- cell = dof_handler.begin_active();
- advance (cell, static_cast(index_interval.first));
-
- endc = dof_handler.begin_active();
- advance (endc, static_cast(index_interval.second));
-
- // Getting an iterator into the output array is simpler. We don't need an
- // end iterator, as we always move this iterator forward by one element
- // for each cell we are on, but stop the loop when we hit the end cell, so
- // we need not have an end element for this iterator.
- Vector::iterator
- error_on_this_cell = error_per_cell.begin() + index_interval.first;
-
// Then we allocate a vector to hold iterators to all active neighbors of
// a cell. We reserve the maximal number of active neighbors in order to
@@ -1114,205 +1066,203 @@ namespace Step9
active_neighbors.reserve (GeometryInfo::faces_per_cell *
GeometryInfo::max_children_per_face);
- // Well then, after all these preliminaries, lets start the computations:
- for (; cell!=endc; ++cell, ++error_on_this_cell)
- {
- // First initialize the FEValues
object, as well as the
- // Y
tensor:
- fe_midpoint_value.reinit (cell);
- Y.clear ();
-
- // Then allocate the vector that will be the sum over the y-vectors
- // times the approximate directional derivative:
- Tensor<1,dim> projected_gradient;
-
-
- // Now before going on first compute a list of all active neighbors of
- // the present cell. We do so by first looping over all faces and see
- // whether the neighbor there is active, which would be the case if it
- // is on the same level as the present cell or one level coarser (note
- // that a neighbor can only be once coarser than the present cell, as
- // we only allow a maximal difference of one refinement over a face in
- // deal.II). Alternatively, the neighbor could be on the same level
- // and be further refined; then we have to find which of its children
- // are next to the present cell and select these (note that if a child
- // of of neighbor of an active cell that is next to this active cell,
- // needs necessarily be active itself, due to the one-refinement rule
- // cited above).
- //
- // Things are slightly different in one space dimension, as there the
- // one-refinement rule does not exist: neighboring active cells may
- // differ in as many refinement levels as they like. In this case, the
- // computation becomes a little more difficult, but we will explain
- // this below.
- //
- // Before starting the loop over all neighbors of the present cell, we
- // have to clear the array storing the iterators to the active
- // neighbors, of course.
- active_neighbors.clear ();
- for (unsigned int face_no=0; face_no::faces_per_cell; ++face_no)
- if (! cell->at_boundary(face_no))
+ typename DoFHandler::active_cell_iterator cell_it(std_cxx1x::get<0>(cell.iterators));
+
+ // First initialize the FEValues
object, as well as the
+ // Y
tensor:
+ scratch_data.fe_midpoint_value.reinit (cell_it);
+
+ // Then allocate the vector that will be the sum over the y-vectors
+ // times the approximate directional derivative:
+ Tensor<1,dim> projected_gradient;
+
+
+ // Now before going on first compute a list of all active neighbors of
+ // the present cell. We do so by first looping over all faces and see
+ // whether the neighbor there is active, which would be the case if it
+ // is on the same level as the present cell or one level coarser (note
+ // that a neighbor can only be once coarser than the present cell, as
+ // we only allow a maximal difference of one refinement over a face in
+ // deal.II). Alternatively, the neighbor could be on the same level
+ // and be further refined; then we have to find which of its children
+ // are next to the present cell and select these (note that if a child
+ // of of neighbor of an active cell that is next to this active cell,
+ // needs necessarily be active itself, due to the one-refinement rule
+ // cited above).
+ //
+ // Things are slightly different in one space dimension, as there the
+ // one-refinement rule does not exist: neighboring active cells may
+ // differ in as many refinement levels as they like. In this case, the
+ // computation becomes a little more difficult, but we will explain
+ // this below.
+ //
+ // Before starting the loop over all neighbors of the present cell, we
+ // have to clear the array storing the iterators to the active
+ // neighbors, of course.
+ active_neighbors.clear ();
+ for (unsigned int face_no=0; face_no::faces_per_cell; ++face_no)
+ if (! std_cxx1x::get<0>(cell.iterators)->at_boundary(face_no))
+ {
+ // First define an abbreviation for the iterator to the face and
+ // the neighbor
+ const typename DoFHandler::face_iterator
+ face = std_cxx1x::get<0>(cell.iterators)->face(face_no);
+ const typename DoFHandler::cell_iterator
+ neighbor = std_cxx1x::get<0>(cell.iterators)->neighbor(face_no);
+
+ // Then check whether the neighbor is active. If it is, then it
+ // is on the same level or one level coarser (if we are not in
+ // 1D), and we are interested in it in any case.
+ if (neighbor->active())
+ active_neighbors.push_back (neighbor);
+ else
{
- // First define an abbreviation for the iterator to the face and
- // the neighbor
- const typename DoFHandler::face_iterator
- face = cell->face(face_no);
- const typename DoFHandler::cell_iterator
- neighbor = cell->neighbor(face_no);
-
- // Then check whether the neighbor is active. If it is, then it
- // is on the same level or one level coarser (if we are not in
- // 1D), and we are interested in it in any case.
- if (neighbor->active())
- active_neighbors.push_back (neighbor);
- else
+ // If the neighbor is not active, then check its children.
+ if (dim == 1)
{
- // If the neighbor is not active, then check its children.
- if (dim == 1)
- {
- // To find the child of the neighbor which bounds to the
- // present cell, successively go to its right child if
- // we are left of the present cell (n==0), or go to the
- // left child if we are on the right (n==1), until we
- // find an active cell.
- typename DoFHandler::cell_iterator
- neighbor_child = neighbor;
- while (neighbor_child->has_children())
- neighbor_child = neighbor_child->child (face_no==0 ? 1 : 0);
-
- // As this used some non-trivial geometrical intuition,
- // we might want to check whether we did it right,
- // i.e. check whether the neighbor of the cell we found
- // is indeed the cell we are presently working
- // on. Checks like this are often useful and have
- // frequently uncovered errors both in algorithms like
- // the line above (where it is simple to involuntarily
- // exchange n==1
for n==0
or
- // the like) and in the library (the assumptions
- // underlying the algorithm above could either be wrong,
- // wrongly documented, or are violated due to an error
- // in the library). One could in principle remove such
- // checks after the program works for some time, but it
- // might be a good things to leave it in anyway to check
- // for changes in the library or in the algorithm above.
- //
- // Note that if this check fails, then this is certainly
- // an error that is irrecoverable and probably qualifies
- // as an internal error. We therefore use a predefined
- // exception class to throw here.
- Assert (neighbor_child->neighbor(face_no==0 ? 1 : 0)==cell,
- ExcInternalError());
-
- // If the check succeeded, we push the active neighbor
- // we just found to the stack we keep:
- active_neighbors.push_back (neighbor_child);
- }
- else
- // If we are not in 1d, we collect all neighbor children
- // `behind' the subfaces of the current face
- for (unsigned int subface_no=0; subface_non_children(); ++subface_no)
- active_neighbors.push_back (
- cell->neighbor_child_on_subface(face_no, subface_no));
- };
- };
-
- // OK, now that we have all the neighbors, lets start the computation
- // on each of them. First we do some preliminaries: find out about the
- // center of the present cell and the solution at this point. The
- // latter is obtained as a vector of function values at the quadrature
- // points, of which there are only one, of course. Likewise, the
- // position of the center is the position of the first (and only)
- // quadrature point in real space.
- const Point this_center = fe_midpoint_value.quadrature_point(0);
-
- std::vector this_midpoint_value(1);
- fe_midpoint_value.get_function_values (solution, this_midpoint_value);
-
-
- // Now loop over all active neighbors and collect the data we
- // need. Allocate a vector just like this_midpoint_value
- // which we will use to store the value of the solution in the
- // midpoint of the neighbor cell. We allocate it here already, since
- // that way we don't have to allocate memory repeatedly in each
- // iteration of this inner loop (memory allocation is a rather
- // expensive operation):
- std::vector neighbor_midpoint_value(1);
- typename std::vector::active_cell_iterator>::const_iterator
- neighbor_ptr = active_neighbors.begin();
- for (; neighbor_ptr!=active_neighbors.end(); ++neighbor_ptr)
- {
- // First define an abbreviation for the iterator to the active
- // neighbor cell:
- const typename DoFHandler::active_cell_iterator
- neighbor = *neighbor_ptr;
-
- // Then get the center of the neighbor cell and the value of the
- // finite element function thereon. Note that for this information
- // we have to reinitialize the FEValues
object for
- // the neighbor cell.
- fe_midpoint_value.reinit (neighbor);
- const Point neighbor_center = fe_midpoint_value.quadrature_point(0);
-
- fe_midpoint_value.get_function_values (solution,
- neighbor_midpoint_value);
-
- // Compute the vector y
connecting the centers of the
- // two cells. Note that as opposed to the introduction, we denote
- // by y
the normalized difference vector, as this is
- // the quantity used everywhere in the computations.
- Point y = neighbor_center - this_center;
- const double distance = std::sqrt(y.square());
- y /= distance;
-
- // Then add up the contribution of this cell to the Y matrix...
- for (unsigned int i=0; iy which
- // span the whole space, otherwise we would not have all components of
- // the gradient. This is indicated by the invertibility of the matrix.
- //
- // If the matrix should not be invertible, this means that the present
- // cell had an insufficient number of active neighbors. In contrast to
- // all previous cases, where we raised exceptions, this is, however,
- // not a programming error: it is a runtime error that can happen in
- // optimized mode even if it ran well in debug mode, so it is
- // reasonable to try to catch this error also in optimized mode. For
- // this case, there is the AssertThrow
macro: it checks
- // the condition like the Assert
macro, but not only in
- // debug mode; it then outputs an error message, but instead of
- // terminating the program as in the case of the Assert
- // macro, the exception is thrown using the throw
command
- // of C++. This way, one has the possibility to catch this error and
- // take reasonable counter actions. One such measure would be to
- // refine the grid globally, as the case of insufficient directions
- // can not occur if every cell of the initial grid has been refined at
- // least once.
- AssertThrow (determinant(Y) != 0,
- ExcInsufficientDirections());
-
- // If, on the other hand the matrix is invertible, then invert it,
- // multiply the other quantity with it and compute the estimated error
- // using this quantity and the right powers of the mesh width:
- const Tensor<2,dim> Y_inverse = invert(Y);
-
- Point gradient;
- contract (gradient, Y_inverse, projected_gradient);
-
- *error_on_this_cell = (std::pow(cell->diameter(),
- 1+1.0*dim/2) *
- std::sqrt(gradient.square()));
- };
+ // To find the child of the neighbor which bounds to the
+ // present cell, successively go to its right child if
+ // we are left of the present cell (n==0), or go to the
+ // left child if we are on the right (n==1), until we
+ // find an active cell.
+ typename DoFHandler::cell_iterator
+ neighbor_child = neighbor;
+ while (neighbor_child->has_children())
+ neighbor_child = neighbor_child->child (face_no==0 ? 1 : 0);
+
+ // As this used some non-trivial geometrical intuition,
+ // we might want to check whether we did it right,
+ // i.e. check whether the neighbor of the cell we found
+ // is indeed the cell we are presently working
+ // on. Checks like this are often useful and have
+ // frequently uncovered errors both in algorithms like
+ // the line above (where it is simple to involuntarily
+ // exchange n==1
for n==0
or
+ // the like) and in the library (the assumptions
+ // underlying the algorithm above could either be wrong,
+ // wrongly documented, or are violated due to an error
+ // in the library). One could in principle remove such
+ // checks after the program works for some time, but it
+ // might be a good things to leave it in anyway to check
+ // for changes in the library or in the algorithm above.
+ //
+ // Note that if this check fails, then this is certainly
+ // an error that is irrecoverable and probably qualifies
+ // as an internal error. We therefore use a predefined
+ // exception class to throw here.
+ Assert (neighbor_child->neighbor(face_no==0 ? 1 : 0)
+ ==std_cxx1x::get<0>(cell.iterators),ExcInternalError());
+
+ // If the check succeeded, we push the active neighbor
+ // we just found to the stack we keep:
+ active_neighbors.push_back (neighbor_child);
+ }
+ else
+ // If we are not in 1d, we collect all neighbor children
+ // `behind' the subfaces of the current face
+ for (unsigned int subface_no=0; subface_non_children(); ++subface_no)
+ active_neighbors.push_back (
+ std_cxx1x::get<0>(cell.iterators)->neighbor_child_on_subface(face_no,subface_no));
+ }
+ }
+
+ // OK, now that we have all the neighbors, lets start the computation
+ // on each of them. First we do some preliminaries: find out about the
+ // center of the present cell and the solution at this point. The
+ // latter is obtained as a vector of function values at the quadrature
+ // points, of which there are only one, of course. Likewise, the
+ // position of the center is the position of the first (and only)
+ // quadrature point in real space.
+ const Point this_center = scratch_data.fe_midpoint_value.quadrature_point(0);
+
+ std::vector this_midpoint_value(1);
+ scratch_data.fe_midpoint_value.get_function_values (scratch_data.solution, this_midpoint_value);
+
+
+ // Now loop over all active neighbors and collect the data we
+ // need. Allocate a vector just like this_midpoint_value
+ // which we will use to store the value of the solution in the
+ // midpoint of the neighbor cell. We allocate it here already, since
+ // that way we don't have to allocate memory repeatedly in each
+ // iteration of this inner loop (memory allocation is a rather
+ // expensive operation):
+ std::vector neighbor_midpoint_value(1);
+ typename std::vector