# deal.IIConfig.cmake
# deal.IIVersionConfig.cmake
#
-# and copies it (a) to the build directory and (b) prepares it for later
-# installation.
+# We support two configurations out of which deal.II can be used - directly
+# from the build directory or after installation. So we have to prepare
+# two distinct set ups.
#
#
#
# For binary dir:
#
-SET(CONFIG_INCLUDE_DIRS_BINARY
+
+SET(CONFIG_BUILD_DIR TRUE)
+SET(CONFIG_INCLUDE_DIRS
\${DEAL_II_PATH}/include
\${DEAL_II_PATH}/include/deal.II
${CMAKE_SOURCE_DIR}/include/
${deal_ii_source_includes}
${DEAL_II_USER_INCLUDE_DIRS}
)
+CONFIGURE_FILE(
+ ${CMAKE_CURRENT_SOURCE_DIR}/Config.cmake.in
+ ${CMAKE_BINARY_DIR}/${DEAL_II_PROJECT_CONFIG_RELDIR}/${DEAL_II_PROJECT_CONFIG_NAME}Config.cmake
+ @ONLY
+ )
+CONFIGURE_FILE(
+ ${CMAKE_CURRENT_SOURCE_DIR}/ConfigVersion.cmake.in
+ ${CMAKE_BINARY_DIR}/${DEAL_II_PROJECT_CONFIG_RELDIR}/${DEAL_II_PROJECT_CONFIG_NAME}ConfigVersion.cmake
+ @ONLY
+ )
+
#
# For installation:
#
-SET(CONFIG_INCLUDE_DIRS_INSTALLATION
+
+SET(CONFIG_BUILD_DIR FALSE)
+SET(CONFIG_INCLUDE_DIRS
\${DEAL_II_PATH}/\${DEAL_II_INCLUDE_RELDIR}
\${DEAL_II_PATH}/\${DEAL_II_INCLUDE_RELDIR}/deal.II
\${DEAL_II_PATH}/\${DEAL_II_INCLUDE_RELDIR}/deal.II/bundled
)
CONFIGURE_FILE(
${CMAKE_CURRENT_SOURCE_DIR}/Config.cmake.in
- ${CMAKE_BINARY_DIR}/${DEAL_II_PROJECT_CONFIG_RELDIR}/${DEAL_II_PROJECT_CONFIG_NAME}Config.cmake
+ ${CMAKE_CURRENT_BINARY_DIR}/${DEAL_II_PROJECT_CONFIG_NAME}Config.cmake
+ @ONLY
+ )
+CONFIGURE_FILE(
+ ${CMAKE_CURRENT_SOURCE_DIR}/ConfigVersion.cmake.in
+ ${CMAKE_CURRENT_BINARY_DIR}/${DEAL_II_PROJECT_CONFIG_NAME}ConfigVersion.cmake
@ONLY
)
+INSTALL(FILES
+ ${CMAKE_CURRENT_BINARY_DIR}/${DEAL_II_PROJECT_CONFIG_NAME}Config.cmake
+ ${CMAKE_CURRENT_BINARY_DIR}/${DEAL_II_PROJECT_CONFIG_NAME}ConfigVersion.cmake
+ DESTINATION ${DEAL_II_PROJECT_CONFIG_RELDIR}
+ COMPONENT library
+ )
+
#
-# Append feature configuration:
+# Append feature configuration to both configuration files:
#
-SET(_file
+
+SET(_files
${CMAKE_BINARY_DIR}/${DEAL_II_PROJECT_CONFIG_RELDIR}/${DEAL_II_PROJECT_CONFIG_NAME}Config.cmake
+ ${CMAKE_CURRENT_BINARY_DIR}/${DEAL_II_PROJECT_CONFIG_NAME}Config.cmake
)
-FILE(APPEND ${_file}
- "\n\n#\n# Feature configuration:\n#\n\n"
- )
+FOREACH(_file ${_files})
+ FILE(APPEND ${_file}
+ "\n\n#\n# Feature configuration:\n#\n\n"
+ )
+ENDFOREACH()
GET_CMAKE_PROPERTY(res VARIABLES)
FOREACH(var ${res})
IF(var MATCHES "DEAL_II_WITH")
- FILE(APPEND ${_file}
- "SET(${var} ${${var}})\n"
- )
+ FOREACH(_file ${_files})
+ FILE(APPEND ${_file}
+ "SET(${var} ${${var}})\n"
+ )
+ ENDFOREACH()
ENDIF()
ENDFOREACH()
-CONFIGURE_FILE(
- ${CMAKE_CURRENT_SOURCE_DIR}/ConfigVersion.cmake.in
- ${CMAKE_BINARY_DIR}/${DEAL_II_PROJECT_CONFIG_RELDIR}/${DEAL_II_PROJECT_CONFIG_NAME}ConfigVersion.cmake
- @ONLY
- )
-
-INSTALL(FILES
- ${CMAKE_BINARY_DIR}/${DEAL_II_PROJECT_CONFIG_RELDIR}/${DEAL_II_PROJECT_CONFIG_NAME}Config.cmake
- ${CMAKE_BINARY_DIR}/${DEAL_II_PROJECT_CONFIG_RELDIR}/${DEAL_II_PROJECT_CONFIG_NAME}ConfigVersion.cmake
- DESTINATION ${DEAL_II_PROJECT_CONFIG_RELDIR}
- COMPONENT library
- )
-
########################################################################
# #
# #
########################################################################
-
IF(DEAL_II_COMPONENT_COMPAT_FILES)
#
# Transform some cmake lists into a string that the old Makefile
#
# For binary dir:
#
- SET(MAKEFILE_D_BINARY ${CMAKE_BINARY_DIR})
- TO_STRING_AND_ADD_PREFIX(MAKEFILE_INCLUDE_DIRS_BINARY "${CMAKE_INCLUDE_FLAG_CXX}"
+ SET(MAKEFILE_D ${CMAKE_BINARY_DIR})
+ TO_STRING_AND_ADD_PREFIX(MAKEFILE_INCLUDE_DIRS "${CMAKE_INCLUDE_FLAG_CXX}"
$(D)/install
$(D)/install/deal.II
${CMAKE_SOURCE_DIR}/include/
${deal_ii_source_includes}
${DEAL_II_USER_INCLUDE_DIRS}
)
+ CONFIGURE_FILE(
+ ${CMAKE_CURRENT_SOURCE_DIR}/Make.global_options.in
+ ${CMAKE_BINARY_DIR}/${DEAL_II_COMMON_RELDIR}/Make.global_options
+ )
#
# For installation:
#
- SET(MAKEFILE_D_INSTALLATION ${CMAKE_INSTALL_PREFIX})
- TO_STRING_AND_ADD_PREFIX(MAKEFILE_INCLUDE_DIRS_INSTALLATION "${CMAKE_INCLUDE_FLAG_CXX}"
+ SET(MAKEFILE_D ${CMAKE_INSTALL_PREFIX})
+ TO_STRING_AND_ADD_PREFIX(MAKEFILE_INCLUDE_DIRS "${CMAKE_INCLUDE_FLAG_CXX}"
$(D)/${DEAL_II_INCLUDE_RELDIR}
$(D)/${DEAL_II_INCLUDE_RELDIR}/deal.II
$(D)/${DEAL_II_INCLUDE_RELDIR}/deal.II/bundled
)
CONFIGURE_FILE(
${CMAKE_CURRENT_SOURCE_DIR}/Make.global_options.in
- ${CMAKE_BINARY_DIR}/${DEAL_II_COMMON_RELDIR}/Make.global_options
+ ${CMAKE_CURRENT_BINARY_DIR}/Make.global_options
)
-
INSTALL(FILES
${CMAKE_BINARY_DIR}/${DEAL_II_COMMON_RELDIR}/template-arguments
- ${CMAKE_BINARY_DIR}/${DEAL_II_COMMON_RELDIR}/Make.global_options
+ ${CMAKE_CURRENT_BINARY_DIR}/Make.global_options
DESTINATION ${DEAL_II_COMMON_RELDIR}
COMPONENT compat_files
)
ENDIF()
-
-#
-# Finally, add a target to create the "binary" file in
-# ${DEAL_II_PROJECT_CONFIG_RELDIR} and add it to the "all" target:
-#
-ADD_CUSTOM_TARGET(setup_build_dir ALL
- COMMAND ${CMAKE_COMMAND} -E touch
- ${CMAKE_BINARY_DIR}/${DEAL_II_PROJECT_CONFIG_RELDIR}/binary
- COMMAND ${CMAKE_COMMAND} -E touch
- ${CMAKE_BINARY_DIR}/${DEAL_II_PROJECT_CONFIG_RELDIR}/${DEAL_II_PROJECT_CONFIG_NAME}Config.cmake
- COMMENT "Update build directory"
- )
-FOREACH(_build ${DEAL_II_BUILD_TYPES})
- ADD_DEPENDENCIES(setup_build_dir ${DEAL_II_BASE_NAME}${DEAL_II_${_build}_SUFFIX})
-ENDFOREACH()
-
-#
-# And a script to remove it upon installation from the install prefix:
-# This is necessary if somebody wants to install into the build directory
-# (yes this is a valid use case...).
-#
-INSTALL(CODE
- "
- FILE(REMOVE ${CMAKE_INSTALL_PREFIX}/${DEAL_II_PROJECT_CONFIG_RELDIR}/binary)
- EXECUTE_PROCESS(COMMAND ${CMAKE_COMMAND} -E touch
- ${CMAKE_BINARY_DIR}/${DEAL_II_PROJECT_CONFIG_RELDIR}/${DEAL_II_PROJECT_CONFIG_NAME}Config.cmake
- )
- "
- COMPONENT library
- )
GET_FILENAME_COMPONENT(_path "${_path}" PATH)
ENDWHILE()
-#
-# Is this project reside in a build directory or at an installed location?
-#
-
-IF(EXISTS ${DEAL_II_PATH}/${DEAL_II_PROJECT_CONFIG_RELDIR}/binary)
- SET(DEAL_II_BUILD_DIR TRUE)
-ENDIF()
-
#
# Print a message after inclusion of this file:
#
SET(DEAL_II_PROJECT_CONFIG_INCLUDED TRUE)
+SET(DEAL_II_BUILD_DIR @CONFIG_BUILD_DIR@)
+
IF(NOT ${DEAL_II_PACKAGE_NAME}_FIND_QUIETLY)
IF(DEAL_II_BUILD_DIR)
MESSAGE(STATUS
#
# Information about include directories and libraries
#
-IF(DEAL_II_BUILD_DIR)
- SET(DEAL_II_INCLUDE_DIRS "@CONFIG_INCLUDE_DIRS_BINARY@")
-ELSE()
- SET(DEAL_II_INCLUDE_DIRS "@CONFIG_INCLUDE_DIRS_INSTALLATION@")
-ENDIF()
+
+# Full list of include directories:
+SET(DEAL_II_INCLUDE_DIRS "@CONFIG_INCLUDE_DIRS@")
# Full list of libraries for the debug target:
SET(DEAL_II_LIBRARIES_DEBUG "@CONFIG_LIBRARIES_DEBUG@")
# Information about library targets
#
-IF(DEAL_II_BUILD_DIR)
- SET(DEAL_II_EXECUTABLE_CONFIG "${DEAL_II_PATH}/${DEAL_II_PROJECT_CONFIG_RELDIR}/${DEAL_II_PROJECT_CONFIG_NAME}Executables.cmake")
- SET(DEAL_II_TARGET_CONFIG "${DEAL_II_PATH}/${DEAL_II_PROJECT_CONFIG_RELDIR}/${DEAL_II_PROJECT_CONFIG_NAME}BuildTargets.cmake")
-ELSE()
- SET(DEAL_II_TARGET_CONFIG "${DEAL_II_PATH}/${DEAL_II_PROJECT_CONFIG_RELDIR}/${DEAL_II_PROJECT_CONFIG_NAME}Targets.cmake")
-ENDIF()
+# The library targets file:
+SET(DEAL_II_TARGET_CONFIG "${DEAL_II_PATH}/${DEAL_II_PROJECT_CONFIG_RELDIR}/${DEAL_II_PROJECT_CONFIG_NAME}Targets.cmake")
# The Debug target:
SET(DEAL_II_TARGET_DEBUG "@CONFIG_TARGET_DEBUG@")
#
-ifeq ($(wildcard $(D)/@DEAL_II_PROJECT_CONFIG_RELDIR@/binary),)
- D = @MAKEFILE_D_INSTALLATION@
-else
- D = @MAKEFILE_D_BINARY@
-endif
+D = @MAKEFILE_D@
CXX = @CMAKE_CXX_COMPILER@
CXX-ID = @CMAKE_CXX_COMPILER_ID@
# Include paths as command line flags
-ifeq ($(wildcard $(D)/@DEAL_II_PROJECT_CONFIG_RELDIR@/binary),)
- INCLUDE = @MAKEFILE_INCLUDE_DIRS_INSTALLATION@
-else
- INCLUDE = @MAKEFILE_INCLUDE_DIRS_BINARY@
-endif
+INCLUDE = @MAKEFILE_INCLUDE_DIRS@
# compiler flags for debug and optimized mode
UNSET(PETSC_INCLUDE_DIR_ARCH CACHE)
UNSET(PETSC_INCLUDE_DIR_COMMON CACHE)
UNSET(PETSC_LIBRARY CACHE)
+ UNSET(PETSC_PETSCVARIABLES CACHE)
SET(PETSC_DIR "" CACHE PATH
"An optional hint to a PETSc directory"
)
FIND_PACKAGE(HDF5)
IF(HDF5_FOUND)
- IF( (HDF5_WITH_MPI AND DEAL_II_WITH_MPI)
- OR
- (NOT HDF5_WITH_MPI AND NOT DEAL_II_WITH_MPI))
+
+ IF( (HDF5_WITH_MPI AND DEAL_II_WITH_MPI) OR
+ (NOT HDF5_WITH_MPI AND NOT DEAL_II_WITH_MPI) )
SET(${var} TRUE)
+
ELSE()
+
MESSAGE(STATUS "Insufficient hdf5 installation found: "
"hdf5 has to be configured with the same MPI configuration as deal.II."
)
" DEAL_II_WITH_MPI = ${DEAL_II_WITH_MPI}\n"
" HDF5_WITH_MPI = ${HDF5_WITH_MPI}\n"
)
+ UNSET(HDF5_HL_LIBRARY CACHE)
+ UNSET(HDF5_INCLUDE_DIR CACHE)
+ UNSET(HDF5_LIBRARY CACHE)
+ UNSET(HDF5_PUBCONF CACHE)
ENDIF()
+ ENDIF()
+ENDMACRO()
+
+MACRO(FEATURE_HDF5_CONFIGURE_EXTERNAL)
INCLUDE_DIRECTORIES(${HDF5_INCLUDE_DIRS})
+
+ # The user has to know the location of the HDF5 headers as well:
LIST(APPEND DEAL_II_USER_INCLUDE_DIRS ${HDF5_INCLUDE_DIRS})
- DEAL_II_APPEND_LIBRARIES(${HDF5_LIBRARIES})
-
- ENDIF()
+ DEAL_II_APPEND_LIBRARIES(${HDF5_LIBRARIES})
ENDMACRO()
+
CONFIGURE_FEATURE(HDF5)
IF(NOT TARGET ${_target})
# only add the target once
- ADD_EXECUTABLE(${_target} EXCLUDE_FROM_ALL ${_test_name}.cc)
+ #
+ # Add a "guard file" rule: The purpose of interrupt_guard.cc is to
+ # force a complete rerun of this test (BUILD, RUN and DIFF stage)
+ # if interrupt_guard.cc is removed by run_test.cmake due to an
+ # interruption.
+ #
+ ADD_CUSTOM_COMMAND(
+ OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/${_target}/interrupt_guard.cc
+ COMMAND touch ${CMAKE_CURRENT_BINARY_DIR}/${_target}/interrupt_guard.cc
+ )
+
+ ADD_EXECUTABLE(${_target} EXCLUDE_FROM_ALL ${_test_name}.cc
+ ${CMAKE_CURRENT_BINARY_DIR}/${_target}/interrupt_guard.cc
+ )
SET_TARGET_PROPERTIES(${_target} PROPERTIES
LINK_FLAGS "${DEAL_II_LINKER_FLAGS} ${DEAL_II_LINKER_FLAGS_${_build}}"
-DEXPECT=${_expect}
-DADDITIONAL_OUTPUT=${ARGN}
-DDEAL_II_BINARY_DIR=${CMAKE_BINARY_DIR}
+ -DGUARD_FILE=${CMAKE_CURRENT_BINARY_DIR}/${_target}/interrupt_guard.cc
-P ${DEAL_II_SOURCE_DIR}/cmake/scripts/run_test.cmake
WORKING_DIRECTORY ${_test_directory}
)
"EXECUTE_PROCESS(COMMAND ${TARGET_RUN}\n"
" RESULT_VARIABLE _return_value\n"
" )\n"
- "IF(NOT \"\${_return_value}\" STREQUAL "0")\n"
+ "IF(NOT \"\${_return_value}\" STREQUAL \"0\")\n"
" MESSAGE(SEND_ERROR \"\nProgram terminated with exit code: \${_return_value}\")\n"
"ENDIF()\n"
)
FOREACH (_inst_in_file ${_inst_in_files})
STRING(REGEX REPLACE "\\.in$" "" _inst_file "${_inst_in_file}" )
+ SET(_dependency)
+ IF(TARGET expand_instantiations)
+ SET(_dependency expand_instantiations)
+ ENDIF()
+
ADD_CUSTOM_COMMAND(
OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/${_inst_file}
- DEPENDS expand_instantiations
+ DEPENDS ${_dependency}
${CMAKE_BINARY_DIR}/${DEAL_II_COMMON_RELDIR}/template-arguments
${CMAKE_CURRENT_SOURCE_DIR}/${_inst_in_file}
COMMAND expand_instantiations
SET_IF_EMPTY(ARPACK_DIR "$ENV{ARPACK_DIR}")
-#
-# ARPACK needs LAPACK and BLAS as dependencies:
-#
-FIND_PACKAGE(DEALII_LAPACK)
-
FIND_LIBRARY(ARPACK_LIBRARY
NAMES arpack
HINTS
lib${LIB_SUFFIX} lib64 lib
)
+FIND_FILE(HDF5_PUBCONF H5pubconf.h
+ HINTS
+ ${HDF5_INCLUDE_DIR}
+ ${HDF5_DIR}
+ PATH_SUFFIXES
+ hdf5 hdf5/include include/hdf5 include
+ )
+
SET(_output ${HDF5_HL_LIBRARY} ${HDF5_LIBRARY})
FIND_PACKAGE_HANDLE_STANDARD_ARGS(HDF5 DEFAULT_MSG
_output # Cosmetic: Gives nice output
HDF5_HL_LIBRARY
HDF5_LIBRARY
HDF5_INCLUDE_DIR
+ HDF5_PUBCONF
)
MARK_AS_ADVANCED(
HDF5_LIBRARY
HDF5_HL_LIBRARY
HDF5_INCLUDE_DIR
+ HDF5_PUBCONF
)
IF(HDF5_FOUND)
#
# Is hdf5 compiled with support for mpi?
#
- FILE(STRINGS "${HDF5_INCLUDE_DIR}/H5pubconf.h" HDF5_MPI_STRING
+ FILE(STRINGS ${HDF5_PUBCONF} HDF5_MPI_STRING
REGEX "#define.*H5_HAVE_PARALLEL 1")
IF("${HDF5_MPI_STRING}" STREQUAL "")
SET(HDF5_WITH_MPI FALSE)
# (We'll rely on the user of FindMUMPS, setting up mpi *cough*)
#
FIND_PACKAGE(SCALAPACK) # which will also include lapack and blas
-FIND_PACKAGE(METIS)
FIND_PATH(MUMPS_INCLUDE_DIR dmumps_c.h
lib${LIB_SUFFIX} lib64 lib
)
-#
-# SCALAPACK needs LAPACK and BLAS as dependency, search for them with the help
-# of the LAPACK find module:
-#
-FIND_PACKAGE(DEALII_LAPACK)
-
#
# Well, depending on the version of scalapack and the distribution it might
# be necessary to search for blacs, too. So we do this in a very
SET_IF_EMPTY(${_comp}_DIR "$ENV{${_comp}_DIR}")
ENDFOREACH()
-
-#
-# UMFPACK depends on BLAS and LAPACK, so search for them:
-# TODO: There might be an external dependency for metis, ignore this for
-# now.
-#
-FIND_PACKAGE(DEALII_LAPACK)
-FIND_PACKAGE(METIS)
-
#
# Two macros to make life easier:
#
COMPONENT compat_files
)
ENDIF()
-
- EXPORT(TARGETS expand_instantiations
- FILE ${CMAKE_BINARY_DIR}/${DEAL_II_PROJECT_CONFIG_RELDIR}/${DEAL_II_PROJECT_CONFIG_NAME}Executables.cmake
- )
ENDIF()
IF(DEAL_II_COMPONENT_COMPAT_FILES)
DESTINATION ${DEAL_II_COMMON_RELDIR}/scripts
COMPONENT compat_files
)
- EXPORT(TARGETS make_dependencies report_features
- FILE ${CMAKE_BINARY_DIR}/${DEAL_II_PROJECT_CONFIG_RELDIR}/${DEAL_II_PROJECT_CONFIG_NAME}Executables.cmake
- APPEND
- )
ENDIF()
#
# ADDITIONAL_OUTPUT - A list of additional output lines that should be printed
#
+# GUARD_FILE - used to detect a forced interruption of this script: On
+# startup the backed up file ${GUARD_FILE}_bck is put back
+# in place as ${GUARD_FILE} and on exit ${GUARD_FILE} is
+# backed up as ${GUARD_FILE}_bck. If on startup a stale
+# ${GUARD_FILE} is found, it is deleted.
+#
+
+IF(NOT "${GUARD_FILE}" STREQUAL "" AND EXISTS ${GUARD_FILE})
+ #
+ # Guard file still exists, so this script must have been interrupted.
+ # Remove guard file to force a complete rerun:
+ #
+ EXECUTE_PROCESS(COMMAND rm -f ${GUARD_FILE})
+ELSEIF(NOT "${GUARD_FILE}" STREQUAL "" AND EXISTS ${GUARD_FILE}_bck)
+ #
+ # A backed up guard file exists. Put it back in place:
+ #
+ EXECUTE_PROCESS(COMMAND mv ${GUARD_FILE}_bck ${GUARD_FILE})
+ENDIF()
+
IF("${EXPECT}" STREQUAL "")
SET(EXPECT "PASSED")
MESSAGE("=============================== OUTPUT END ===============================")
+#
+# Back up guard file:
+#
+
+IF(NOT "${GUARD_FILE}" STREQUAL "" AND EXISTS ${GUARD_FILE})
+ EXECUTE_PROCESS(COMMAND mv ${GUARD_FILE} ${GUARD_FILE}_bck)
+ENDIF()
+
#
# Bail out:
#
IF(NOT "${_stage}" STREQUAL "${EXPECT}")
- MESSAGE("Excpected stage ${EXPECT} - aborting")
+ MESSAGE("Expected stage ${EXPECT} - aborting")
MESSAGE(FATAL_ERROR "*** abort")
ELSEIF(NOT "${_stage}" STREQUAL "PASSED")
- MESSAGE("Excpected stage ${EXPECT} - test considered successful.")
+ MESSAGE("Expected stage ${EXPECT} - test considered successful.")
ENDIF()
OFF
)
-IF("${CMAKE_SOURCE_DIR}" STREQUAL "${CMAKE_BINARY_DIR}")
- SET(CMAKE_INSTALL_PREFIX
- "${CMAKE_BINARY_DIR}"
- CACHE STRING
- "Install path prefix, prepended onto install directories."
- )
-ENDIF()
-
########################################################################
# #
# #
########################################################################
+#
+# We do not support installation into the binary directory any more ("too
+# much pain, not enough profit"):
+#
+
+IF("${CMAKE_BINARY_DIR}" STREQUAL "${CMAKE_INSTALL_PREFIX}")
+ MESSAGE(FATAL_ERROR "
+Error CMAKE_INSTALL_PREFIX is equal to CMAKE_BINARY_DIR.
+It is not possible to install into the build directory. Please set
+CMAKE_INSTALL_PREFIX to a designated install directory different than
+CMAKE_BINARY_DIR.
+(Please note that you can use deal.II directly out of a build directory
+if this is what you tried to do.)
+"
+ )
+ENDIF()
+
+
GET_CMAKE_PROPERTY(_res VARIABLES)
FOREACH(_var ${_res})
#
# test - runs a minimal set of tests
#
# setup_tests - sets up the testsuite subprojects
+# regen_tests - rerun configure stage in every testsuite subprojects
# clean_tests - runs the 'clean' target in every testsuite subproject
# prune_tests - removes all testsuite subprojects
#
SET_IF_EMPTY(DEAL_II_DOCREADME_RELDIR "")
SET_IF_EMPTY(DEAL_II_EXAMPLES_RELDIR "examples")
SET_IF_EMPTY(DEAL_II_EXECUTABLE_RELDIR "bin")
- IF( "${CMAKE_INSTALL_PREFIX}" STREQUAL "${CMAKE_BINARY_DIR}" AND
- (NOT "${CMAKE_SOURCE_DIR}" STREQUAL "${CMAKE_BINARY_DIR}") )
- #
- # Ensure that in case of an out of source build BINARY_DIR/include !=
- # INSTALL_PREFIX/include is always true. Otherwise stale headers might
- # get included resulting in a failing build.
- #
- SET_IF_EMPTY(DEAL_II_INCLUDE_RELDIR "include/install")
- ELSE()
- SET_IF_EMPTY(DEAL_II_INCLUDE_RELDIR "include")
- ENDIF()
+ SET_IF_EMPTY(DEAL_II_INCLUDE_RELDIR "include")
SET_IF_EMPTY(DEAL_II_LIBRARY_RELDIR "lib")
SET_IF_EMPTY(DEAL_II_PROJECT_CONFIG_RELDIR "${DEAL_II_LIBRARY_RELDIR}/cmake/${DEAL_II_PROJECT_CONFIG_NAME}")
ELSE()
# #
########################################################################
-#
-# Library search order:
-#
IF(DEAL_II_PREFER_STATIC_LIBS)
+ #
# Invert the search order for libraries when DEAL_II_PREFER_STATIC_LIBS
# is set. This will prefer static archives instead of shared libraries:
+ #
LIST(REVERSE CMAKE_FIND_LIBRARY_SUFFIXES)
ENDIF()
-#
-# Cross compilation stuff:
-#
IF(CMAKE_CROSSCOMPILING)
+ #
# Disable platform introspection when cross compiling
+ #
SET(DEAL_II_ALLOW_PLATFORM_INTROSPECTION OFF CACHE BOOL "" FORCE)
-
- # Import native expand_instantiations for use in cross compilation:
- SET(DEAL_II_NATIVE "DEAL_II_NATIVE-NOTFOUND" CACHE FILEPATH
- "A pointer to a native deal.Ii build directory"
- )
- IF(DEAL_II_NATIVE MATCHES "-NOTFOUND")
- MESSAGE(FATAL_ERROR
- "Please set the CMake variable DEAL_II_NATIVE to a valid path that points to a native deal.II build directory"
- )
- ENDIF()
- INCLUDE(${DEAL_II_NATIVE}/${DEAL_II_PROJECT_CONFIG_RELDIR}/${DEAL_II_PROJECT_CONFIG_NAME}Executables.cmake)
ENDIF()
#
# Cleanup deal.IITargets.cmake in the build directory:
#
-FILE(REMOVE
- ${CMAKE_BINARY_DIR}/${DEAL_II_PROJECT_CONFIG_RELDIR}/${DEAL_II_PROJECT_CONFIG_NAME}BuildTargets.cmake
+FILE(WRITE
+ ${CMAKE_BINARY_DIR}/${DEAL_II_PROJECT_CONFIG_RELDIR}/${DEAL_II_PROJECT_CONFIG_NAME}Targets.cmake
+ ""
)
#
# Pick up values from environment:
#
-SET_IF_EMPTY(DEAL_II_BINARY_DIR $ENV{DEAL_II_BINARY_DIR})
-SET_IF_EMPTY(DEAL_II_BINARY_DIR $ENV{DEAL_II_DIR})
-SET_IF_EMPTY(DEAL_II_SOURCE_DIR $ENV{DEAL_II_SOURCE_DIR})
-SET_IF_EMPTY(TEST_DIFF $ENV{TEST_DIFF})
-SET_IF_EMPTY(TEST_TIME_LIMIT $ENV{TEST_TIME_LIMIT})
-SET_IF_EMPTY(TEST_PICKUP_REGEX $ENV{TEST_PICKUP_REGEX})
-SET_IF_EMPTY(TEST_OVERRIDE_LOCATION $ENV{TEST_OVERRIDE_LOCATION})
+FOREACH(_var
+ DEAL_II_BINARY_DIR
+ DEAL_II_SOURCE_DIR
+ TEST_DIFF
+ TEST_TIME_LIMIT
+ TEST_PICKUP_REGEX
+ TEST_OVERRIDE_LOCATION
+ )
+ # Environment wins:
+ IF(DEFINED ENV{${_var}})
+ SET(${_var} $ENV{${_var}})
+ ENDIF()
+ IF(NOT "${_var}" STREQUAL "")
+ SET(${_var} "${${_var}}" CACHE STRING "")
+ ENDIF()
+ENDFOREACH()
#
# We need deal.II and Perl as external packages:
# And finally, enable testing:
#
ENABLE_TESTING()
+
+#
+# A custom target that does absolutely nothing. It is used in the main
+# project to trigger a "make rebuild_cache" if necessary.
+#
+ADD_CUSTOM_TARGET(regenerate)
)
EXPORT(TARGETS mesh_converter_exe
FILE
- ${CMAKE_BINARY_DIR}/${DEAL_II_PROJECT_CONFIG_RELDIR}/${DEAL_II_PROJECT_CONFIG_NAME}Executables.cmake
+ ${CMAKE_BINARY_DIR}/${DEAL_II_PROJECT_CONFIG_RELDIR}/${DEAL_II_PROJECT_CONFIG_NAME}Targets.cmake
APPEND
)
COMPONENT parameter_gui
)
EXPORT(TARGETS parameter_gui_exe
- FILE ${CMAKE_BINARY_DIR}/${DEAL_II_PROJECT_CONFIG_RELDIR}/${DEAL_II_PROJECT_CONFIG_NAME}Executables.cmake
+ FILE ${CMAKE_BINARY_DIR}/${DEAL_II_PROJECT_CONFIG_RELDIR}/${DEAL_II_PROJECT_CONFIG_NAME}Targets.cmake
APPEND
)
<h1>deal.II Authors</h1>
<p>
- The <acronym>deal.II</acronym> project was initially started by
- members of the Numerical Methods group at the Heidelberg University,
- Germany but has since become a global, open source project.
+ <acronym>deal.II</acronym> is a global project. It is
+ administered by a group of <i>principal
+ developers</i>. Technical decisions are made by the principal
+ developers and a group of <i>developers</i> consisting of
+ long-term contributors with a global overview of the library. A
+ large number of <i>contributors</i> have provided substantial
+ patches over the years.
</p>
<p>
- The current maintainers of the library are:
+ These three groups are listed below.
</p>
+
+
+ <h2>Principal developers</h2>
<ul>
<li>
<a href="http://www.math.tamu.edu/~bangerth"
<li>
<a href="http://www.iwr.uni-heidelberg.de/groups/MathSim/"
target="_top">Guido Kanschat</a>,
- Texas A&M University, TX, USA and Universität Heidelberg, Germany
+ Universität Heidelberg, Germany
</li>
</ul>
+
+ <h2>Developers</h2>
+ <ul>
+ <li>
+ <a href="http://people.sissa.it/~heltai/"
+ target="_top">Luca Heltai</a>,
+ SISSA, Trieste, Italy
+ </li>
+
+ <li>
+ <a href="http://www.lnm.mw.tum.de/en/staff/martin-kronbichler/"
+ target="_top">Martin Kronbichler</a>,
+ Technische Universität München, Germany
+ </li>
+
+ <li>
+ <a href="http://ganymed.iwr.uni-heidelberg.de/~maier/"
+ target="_top">Matthias Maier</a>,
+ Universität Heidelberg, Germany
+ </li>
+
+ <li>
+ Bruno Turcksin,
+ Texas A&M University, TX, USA
+ </li>
+
+ <li>
+ <a href="http://bluebox.ippt.pan.pl/~tyoung/"
+ target="_top">Toby Young</a>,
+ Polish Academy of Sciences, Poland
+ </li>
+
+ </ul>
+
+
+
+
+ <h2>Contributors</h2>
+
<p>Many people have contributed to deal.II over the years, some of them very
substantial parts of the library. Their work
is greatly appreciated: no open source project can survice without a
<li><em>Eric Heien:</em>
HDF5 output.
- <li><em>Luca Heltai:</em>
- Gmsh format mesh reader and writer;
- some of the meshes in the GridGenerator class;
- generalization of FilteredMatrix;
- integration of the function parser library;
- cubit journal file to export to ucd mesh format;
- FEFieldFunction and ParsedFunction classes;
- work on the codimension-one meshes, DoFHandler, and finite
- elements;
- singular integration;
- Step-34 tutorial program;
- random bug fixes and enhancements.
-
<li><em>Bärbel Janssen:</em>
Lots of work on multigrid for adaptive meshes; multigrid in the
MeshWorker framework; step-16. Various fixes.
<li><em>Oleh Krehel:</em>
Many documentation fixes.
- <li><em>Martin Kronbichler:</em>
- step-22, step-31, step-32, step-37, step-48, interfaces to
- Trilinos, significant parts of ConstraintMatrix, matrix-free
- computations, support for massively parallel computations, and
- many enhancements in random places.
-
<li><em>Tobias Leicht:</em>
Lots of work on internal data structures: anisotropic refinement
(including step-30), faces
Enhancements in the interface to PETSc. Support for reading GMSH
2.5 format.
- <li><em>Matthias Maier:</em>
- CMake build system for the library and the testsuite. Periodic
- boundary conditions. Enhancements throughout the library.
-
<li><em>Cataldo Manigrasso:</em>
Work on the codimension-one meshes, DoFHandler, and finite
elements.
<li><em>Christophe Trophime:</em>
Packaging and configuration issues.
- <li><em>Bruno Turcksin:</em>
- Extending deal.ii for 64-bit integer support. Converting the
- testsuite to CMake. Various other changes.
-
<li><em>Kainan Wang:</em>
Extending deal.ii for 64-bit integer support.
<li><em>Christian Wülker:</em>
GridOut::write_svg.
- <li><em>Toby D. Young:</em>
- Interfaces to SLEPc; many changes in the interfaces to PETSc;
- MUMPS interface;
- METIS interface.
-
<li><em>Yuhan Zhou:</em>
Input to the curved boundary example in step-49.
</ul>
You have to set up a native deal.II build directory first and run
<code>make expand_instantiations</code> in it. The executable is
needed for the build system (and obviously the cross compiled version
- cannot be used). Assuming you have a working cross compilation
- toolchain, set up a <i>toolchain file</i> next. An example toolchain
- file can be found <a href="Toolchain.sample">here</a>. With that
- invoke <code>cmake</code> e.g. with:
+ cannot be used). Locate the <code>expand_instantions</code>
+ executable and export its location in the <code>PATH</code>
+ environment variable.
+ </p>
+
+ <p>
+ Assuming you have a working cross compilation toolchain, set up a
+ <a href="Toolchain.sample"><i>toolchain file</i></a> next. With that
+ invoke <code>cmake</code> with something like:
<pre>
cmake -DCMAKE_TOOLCHAIN_FILE=<...>/Toolchain.sample
- -DDEAL_II_NATIVE=<...>/native/build/dir
-
-DDEAL_II_FORCE_BUNDLED_BOOST=ON
-DDEAL_II_ALLOW_AUTODETECTION=OFF
../deal.II
</pre>
- where <code>DEAL_II_NATIVE</code> points to the toolchain file and
- <code>DEAL_II_NATIVE</code> to the native build directory (which
- <code>expand_instantiations</code> will be picked from). The remaining
- configuration can be adjusted at will, see <a
- href="cmake.html">the documentation</a>.
+ where <code>CMAKE_TOOLCHAIN_FILE</code> points to the toolchain file.
+ The remaining configuration can be adjusted at will, see <a
+ href="cmake.html">the documentation</a>.
</p>
<hr />
Setup can be fine-tuned using the following commands:
<pre>
+ $ make regen_tests - reruns configure stage in every testsuite subproject
+
$ make clean_tests - runs the 'clean' target in every testsuite subproject
$ make prune_tests - removes all testsuite subprojects
</pre>
</p>
- <p>
- <b>Note:</b> Specifying these options via environment variables is
- volatile, i.e. if <code>make setup_tests</code> is invoked a second
- time without the variables set in environment, the option will be
- reset to the default value. If you want to set these options
- permanently, set them via cmake as CMake variable in the build
- directory:
- <pre>
-
- $ cmake -DTEST_PICKUP_REGEX="<regular expression>" .
- </pre>
- A variable set via cmake always <i>overrides</i> one
- set via environment.
- </p>
-
<a name="run"></a>
<h2>Running the testsuite</h2>
<ol>
+ <li> Changed: step-9, step-13 and step-14 have been converted to use the
+ more modern WorkStream concept for assembling linear systems in parallel.
+ <br>
+ (Bruno Turcksin, Wolfgang Bangerth, 2013/10/26)
+ </li>
+
<li> New: The testsuite is now ported to <a href="http://www.cmake.org/">
CMake</a> and uses CTest as test driver.
<br>
<br>
(Eric Heien, 2013/09/27)
</li>
-
+
<li>
New: DataOutBase::DataOutFilter provides a way to remove duplicate vertices
and values from a solution vector when generating output. Currently it only
<h3>Specific improvements</h3>
<ol>
+ <li>
+ Fixed: the DerivativeApproximation class was not working correctly when
+ used with parallel vectors.
+ (Timo Heister, 2013/10/28)
+ </li>
+
<li>
~Subscriptor and ~GrowingVectorMemory no longer throw an exception (the
former if disable_abort_on_exception was called) to be compatible with the
</li>
<li>
- New: parallel::distributed::BlockVector has now methods update_ghost_values,
+ New: parallel::distributed::BlockVector has now methods update_ghost_values,
compress, set_out_ghosts, and has_ghost_elements that do the respective
operation on each block of parallel::distributed::Vector.
<br>
</li>
<li> <a href="http://www.math.tamu.edu/~bangerth"
target="_top">W. Bangerth</a>,
- <a href="http://www.clemson.edu/~heister"
+ <a href="http://www.math.clemson.edu/~heister"
target="_top">T. Heister</a>
and
<a href="http://www.math.tamu.edu/~kanschat"
target="_top">W. Bangerth</a>,
<a href="http://users.ices.utexas.edu/~carsten/"
target="_top">C. Burstedde</a>,
- <a href="http://www.math.tamu.edu/~heister"
- target="_top">T. Heister</a>,
+ T. Heister,
M. Kronbichler
<br>
<strong>Algorithms and Data Structures for Massively Parallel Generic
element programs. The main reason for this is the size and complexity
of modern research software: applications implementing modern error
estimation concepts and adaptive solution methods tend to become
-rather large. For example, the three largest applications by the main
+rather large. For example, when this program was written in 2002, the
+three largest applications by the main
authors of deal.II, are at the time of writing of this example
program:
<ol>
<li> a program for solving conservation hyperbolic equations by the
Discontinuous Galerkin Finite Element method: 33,775 lines of
- code;
+ code;
<li> a parameter estimation program: 28,980 lines of code;
<li> a wave equation solver: 21,020 lines of code.
</ol>
-(The library proper - without example programs and
-test suite - has slightly more than 150,000 lines of code as of spring 2002.)
-In the opinion of the author of this example program, the sizes of these
-applications are at the edge of what one person, even an experienced
-programmer, can manage.
+
+(The library proper - without example programs and test suite - has slightly
+more than 150,000 lines of code as of spring 2002. It is of course several
+times larger now.) The sizes of these applications are at the edge of what
+one person, even an experienced programmer, can manage.
mappings.
</ol>
Besides these, and a large number of smaller classes, there are of
-course the following ``tool'' modules:
+course the following "tool" modules:
<ol>
<li>output in various graphical formats;
<li>linear algebra classes.
</ol>
-
+These complexes can also be found as a flow chart on the front page of
+the deal.II manual website.
<ol>
<li>The classes that implement the process of numerically solving the
equation are no more responsible for driving the process of
- ``solving-estimating error-refining-solving again'', but we delegate
+ "solving-estimating error-refining-solving again", but we delegate
this to external functions. This allows first to use it as a
building block in a larger context, where the solution of a
Laplace equation might only be one part (for example, in a
classes that compute the solution.
<li>Separate the description of the test case with which we will
present the program, from the rest of the program.
+<li>Parallelize the assembly of linear systems using the WorkStream
+ facilities. This follows the extensive description that can be
+ found in the @ref threads "Parallel computing with multiple processors accessing shared memory"
+ documentation module. The implementation essentially follows what
+ has already been described in step-9.
</ol>
implementing the desired mathematical method. However, we must
stress that software design is in part also a subjective matter:
different persons have different programming backgrounds and have
-different opinions about the ``right'' style of programming; this
+different opinions about the "right" style of programming; this
program therefore expresses only what the author considers useful
practice, and is not necessarily a style that you have to adopt in
order to write successful numerical software if you feel uncomfortable
What the program actually does is not even the main point of this
program, the structure of the program is more important. However, in a
few words, a description would be: solve the Laplace equation for a
-given right hand side such that the solution is the function
+given right hand side such that the solution is the function
$u(x,t)=\exp(x+\sin(10y+5x^2))$. The goal of the
computation is to get the value of the solution at the point
$x_0=(0.5,0.5)$, and to compare the accuracy with
implemented the corresponding evaluation class. The results (i.e. the
output) of the program looks as follows:
@code
- Running tests with "global" refinement criterion:
- -------------------------------------------------
- Refinement cycle: 0 1 2 3 4 5 6
- DoFs u(x_0)
- 25 1.2868
- 81 1.6945
- 289 1.4658
- 1089 1.5679
- 4225 1.5882
- 16641 1.5932
- 66049 1.5945
-
- Running tests with "kelly" refinement criterion:
- ------------------------------------------------
- Refinement cycle: 0 1 2 3 4 5 6 7 8 9 10 11
- DoFs u(x_0)
- 25 1.2868
- 47 0.8775
- 89 1.5365
- 165 1.2974
- 316 1.6442
- 589 1.5221
- 1090 1.5724
- 2035 1.5622
- 3754 1.5916
- 7100 1.5876
- 13059 1.5942
- 24749 1.5933
+Running tests with "global" refinement criterion:
+-------------------------------------------------
+Refinement cycle: 0 1 2 3 4 5 6
+DoFs u(x_0)
+ 25 1.2868
+ 81 1.6945
+ 289 1.4658
+ 1089 1.5679
+ 4225 1.5882
+16641 1.5932
+66049 1.5945
+
+Running tests with "kelly" refinement criterion:
+------------------------------------------------
+Refinement cycle: 0 1 2 3 4 5 6 7 8 9 10 11
+DoFs u(x_0)
+ 25 1.2868
+ 47 0.8775
+ 89 1.5365
+ 165 1.2974
+ 316 1.6442
+ 589 1.5221
+ 1093 1.5724
+ 2042 1.5627
+ 3766 1.5916
+ 7124 1.5876
+13111 1.5942
+24838 1.5932
@endcode
What surprises here is that the exact value is 1.59491554..., and that
-it is obviously surprisingly complicated to compute the solution even to
+it is apparently surprisingly complicated to compute the solution even to
only one per cent accuracy, although the solution is smooth (in fact
-infinite often differentiable). This smoothness is shown in the
+infinitely often differentiable). This smoothness is shown in the
graphical output generated by the program, here coarse grid and the
first 9 refinement steps of the Kelly refinement indicator:
{
cell->clear_coarsen_flag();
refinement_indicated |= cell->refine_flag_set();
- };
+ }
if (refinement_indicated)
for (cell=triangulation->begin_active();
cell!=triangulation->end(); ++cell)
#include <deal.II/base/function.h>
#include <deal.II/base/logstream.h>
#include <deal.II/base/table_handler.h>
+#include <deal.II/base/thread_management.h>
#include <deal.II/base/work_stream.h>
#include <deal.II/lac/vector.h>
#include <deal.II/lac/full_matrix.h>
#include <list>
#include <sstream>
-#ifdef DEAL_II_WITH_THREADS
-# include <tbb/task.h>
-# include <tbb/task_scheduler_init.h>
-#endif
-
// The last step is as in all previous programs:
namespace Step13
{
using namespace dealii;
- namespace Assembler
- {
- // Dummy structure
- struct Scratch
- {
- Scratch() {}
- };
-
- struct CopyData
- {
- CopyData() {}
-
- unsigned int dofs_per_cell;
- FullMatrix<double> cell_matrix;
- std::vector<types::global_dof_index> local_dof_indices;
- };
- }
-
// @sect3{Evaluation of the solution}
// As for the program itself, we first define classes that evaluate the
// various subobjects, and there is a function that implements a
// conjugate gradient method as solver.
private:
- struct LinearSystem
+ struct LinearSystem
{
LinearSystem (const DoFHandler<dim> &dof_handler);
Vector<double> rhs;
};
-#ifdef DEAL_II_WITH_THREADS
- // Tasks in TBB must be derived from tbb::task and override tbb::task*
- // execute.
- // The purpose of HangingNodeTask is to apply execute DoFTools::make_hanging_node_constraints.
- struct HangingNodeTask : public tbb::task
+ // Finally, there is a set of functions which will be used to
+ // assemble the actual system matrix. The main function of this
+ // group, <code>assemble_linear_system()</code> computes the
+ // matrix in parallel on multicore systems, using the following
+ // two helper functions. The mechanism for doing so is the same
+ // as in the step-9 example program and follows the WorkStream
+ // concept outlined in @ref threads . The main function also
+ // calls the virtual function assembling the right hand side.
+ struct AssemblyScratchData
{
- HangingNodeTask (const DoFHandler<dim> &dof_handler,ConstraintMatrix &hanging_node_constraints) :
- dof_handler(&dof_handler),
- hanging_node_constraints(& hanging_node_constraints) {}
-
- tbb::task* execute()
- {
- DoFTools::make_hanging_node_constraints(*dof_handler,*hanging_node_constraints);
-
- return NULL;
- }
-
- const DoFHandler<dim>* dof_handler;
- ConstraintMatrix* hanging_node_constraints;
- };
-
+ AssemblyScratchData (const FiniteElement<dim> &fe,
+ const Quadrature<dim> &quadrature);
+ AssemblyScratchData (const AssemblyScratchData &scratch_data);
+ FEValues<dim> fe_values;
+ };
- // The purpose of SparsityPatternTask is to create the sparsity pattern.
- struct SparsityPatternTask : public tbb::task
+ struct AssemblyCopyData
{
- SparsityPatternTask (const DoFHandler<dim> &dof_handler,SparsityPattern &sparsity_pattern) :
- dof_handler(&dof_handler),
- sparsity_pattern(&sparsity_pattern) {}
-
- tbb::task* execute()
- {
- sparsity_pattern->reinit (dof_handler->n_dofs(),
- dof_handler->n_dofs(),
- dof_handler->max_couplings_between_dofs());
- DoFTools::make_sparsity_pattern (*dof_handler, *sparsity_pattern);
-
- return NULL;
- }
-
- const DoFHandler<dim>* dof_handler;
- SparsityPattern* sparsity_pattern;
+ FullMatrix<double> cell_matrix;
+ std::vector<types::global_dof_index> local_dof_indices;
};
-#endif
-
- // Finally, there is a pair of functions which will be used to assemble
- // the actual system matrix. It calls the virtual function assembling
- // the right hand side, and installs a number threads each running the
- // second function which assembles part of the system matrix. The
- // mechanism for doing so is the same as in the step-9 example program.
void
assemble_linear_system (LinearSystem &linear_system);
void
- assemble_matrix (const typename DoFHandler<dim>::active_cell_iterator &cell,
- Assembler::Scratch &scratch,
- Assembler::CopyData ©_data) const;
-
+ local_assemble_matrix (const typename DoFHandler<dim>::active_cell_iterator &cell,
+ AssemblyScratchData &scratch_data,
+ AssemblyCopyData ©_data) const;
void
- copy_local_to_global(Assembler::CopyData const ©_data,
- LinearSystem &linear_system) const;
+ copy_local_to_global(const AssemblyCopyData ©_data,
+ LinearSystem &linear_system) const;
};
}
- // The following function assembles matrix and right hand side of the
- // linear system to be solved in each step. It goes along the same lines
- // as used in previous examples, so we explain it only briefly. Note that
- // we do a number of things in parallel, a process described in more
- // detail in the @ref threads module.
+ // The following function assembles matrix and right hand side of
+ // the linear system to be solved in each step. We will do things
+ // in parallel at a couple of levels. First, note that we need to
+ // assemble both the matrix and the right hand side. These are
+ // independent operations, and we should do this in parallel. To
+ // this end, we use the concept of "tasks" that is discussed in
+ // the @ref threads documentation module. In essence, what we want
+ // to say "here is something that needs to be worked on, go do it
+ // whenever a CPU core is available", then do something else, and
+ // when we need the result of the first operation wait for its
+ // completion. At the second level, we want to assemble the matrix
+ // using the exact same strategy we have already used in step-9,
+ // namely the WorkStream concept.
+ //
+ // While we could consider either assembling the right hand side
+ // or assembling the matrix as the thing to do in the background
+ // while doing the other, we will opt for the former approach
+ // simply because the call to <code>Solver::assemble_rhs</code> is
+ // so much simpler to write than the call to WorkStream::run with
+ // its many arguments. In any case, the code then looks like this
+ // to assemble the entire linear system:
template <int dim>
void
Solver<dim>::assemble_linear_system (LinearSystem &linear_system)
{
- // First define a convenience abbreviation for these lengthy iterator
- // names...
- typedef
- typename DoFHandler<dim>::active_cell_iterator
- active_cell_iterator;
-
- // ... and use it to split up the set of cells into a number of pieces
- // of equal size. The number of blocks is set to the default number of
- // threads to be used, which by default is set to the number of
- // processors found in your computer at startup of the program:
-
- // These ranges are then assigned to a number of threads which we create
- // next. Each will assemble the local cell matrices on the assigned
- // cells, and fill the matrix object with it. Since there is need for
- // synchronization when filling the same matrix from different threads,
- // we need a mutex here:
-
- Assembler::Scratch scratch;
- Assembler::CopyData copy_data;
- WorkStream::run(dof_handler.begin_active(),dof_handler.end(),
- std_cxx1x::bind(&Solver<dim>::assemble_matrix,this,std_cxx1x::_1,std_cxx1x::_2,std_cxx1x::_3),
- std_cxx1x::bind(&Solver<dim>::copy_local_to_global,this,std_cxx1x::_1,std_cxx1x::ref(linear_system)),
- scratch,copy_data);
-
- // While the new threads assemble the system matrix, we can already
- // compute the right hand side vector in the main thread, and condense
- // away the constraints due to hanging nodes:
- assemble_rhs (linear_system.rhs);
- linear_system.hanging_node_constraints.condense (linear_system.rhs);
+ Threads::Task<> rhs_task = Threads::new_task (&Solver<dim>::assemble_rhs,
+ *this,
+ linear_system.rhs);
+
+ WorkStream::run(dof_handler.begin_active(),
+ dof_handler.end(),
+ std_cxx1x::bind(&Solver<dim>::local_assemble_matrix,
+ this,
+ std_cxx1x::_1,
+ std_cxx1x::_2,
+ std_cxx1x::_3),
+ std_cxx1x::bind(&Solver<dim>::copy_local_to_global,
+ this,
+ std_cxx1x::_1,
+ std_cxx1x::ref(linear_system)),
+ AssemblyScratchData(*fe, *quadrature),
+ AssemblyCopyData());
+ linear_system.hanging_node_constraints.condense (linear_system.matrix);
- // And while we're already computing things in parallel, interpolating
- // boundary values is one more thing that can be done independently, so
- // we do it here:
+ // The syntax above using <code>std_cxx1x::bind</code> requires
+ // some explanation. There are multiple version of
+ // WorkStream::run that expect different arguments. In step-9,
+ // we used one version that took a pair of iterators, a pair of
+ // pointers to member functions with very specific argument
+ // lists, a pointer or reference to the object on which these
+ // member functions have to work, and a scratch and copy data
+ // object. This is a bit restrictive since the member functions
+ // called this way have to have an argument list that exactly
+ // matches what WorkStream::run expects: the local assembly
+ // function needs to take an iterator, a scratch object and a
+ // copy object; and the copy-local-to-global function needs to
+ // take exactly a copy object. But, what if we want something
+ // that's slightly more general? For example, in the current
+ // program, the copy-local-to-global function needs to know
+ // which linear system object to write the local contributions
+ // into, i.e., it also has to take a <code>LinearSystem</code>
+ // argument. That won't work with the approach using member
+ // function pointers.
+ //
+ // Fortunately, C++ offers a way out. These are called function
+ // objects. In essence, what WorkStream::run wants to do is not
+ // call a member function. It wants to call some function that
+ // takes an iterator, a scratch object and a copy object in the
+ // first case, and a copy object in the second case. Whether
+ // these are member functions, global functions, or something
+ // else, is really not of much concern to
+ // WorkStream. Consequently, there is a second version of the
+ // function that just takes function objects -- objects that
+ // have an <code>operator()</code> and that consequently can be
+ // called like functions, whatever they really represent. The
+ // typical way to generate such function objects is using
+ // <code>std::bind</code> (or, if the compiler is too old, a
+ // replacement for it, which we generically call
+ // <code>std_cxx1x::bind</code>) which takes a pointer to a
+ // (member) function and then <i>binds</i> individual arguments
+ // to fixed values. For example, you can create a function that
+ // takes an iterator, a scratch object and a copy object by
+ // taking the address of a member function and binding the
+ // (implicit) argument to the object on which it is to work to
+ // <code>*this</code>. This is what we do in the first call
+ // above. In the second call, we need to create a function
+ // object that takes a copy object, and we do so by taking the
+ // address of a member function that takes an implicit pointer
+ // to <code>*this</code>, a reference to a copy object, and a
+ // reference to a linear system, and binding the first and third
+ // of these, leaving something that has only one open argument
+ // that can then be filled by WorkStream::run().
+ //
+ // There remains the question of what the
+ // <code>std_cxx1x::_1</code>, <code>std_cxx1x::_2</code>, etc.,
+ // mean. (These arguments are called <i>placeholders</i>.) The
+ // idea of using <code>std_cxx1x::bind</code> in the first of
+ // the two cases above is that it produces an object that can be
+ // called with three arguments. But how are the three arguments
+ // the function object is being called with going to be
+ // distributed to the four arguments
+ // <code>local_assemble_matrix()</code> (including the implicit
+ // <code>this</code> pointer)? As specified, the first argument
+ // given to the function object will become the first argument
+ // given to <code>local_assemble_matrix()</code>, the second the
+ // second, etc. This is trivial here, but allows for interesting
+ // games in other circumstances. Consider, for example, having a
+ // function <code>void f(double x, double y)</code>. Then,
+ // creating a variable <code>p</code> of type
+ // <code>std_cxx1x::function@<void f(double,double)@></code> and
+ // initializing <code>p=std_cxx1x::bind(&f, std_cxx1x::_2,
+ // std_cxx1x::_1)</code> then calling <code>p(1,2)</code> will
+ // result in calling <code>f(2,1)</code>.
+ //
+ // @note Once deal.II can rely on every compiler being able to
+ // fully understand the syntax of the C++11 standard, one can
+ // use C++'s version of <a
+ // href="http://en.wikipedia.org/wiki/Anonymous_function">lambda
+ // functions</a> to achieve the same goal. In essence, a lambda
+ // function is a function without a name that is defined right
+ // at the one place where it is going to be used -- i.e., where
+ // we pass the third and fourth argument to WorkStream::run. The
+ // functions one would define in these locations would take 3
+ // and 1 arguments, respectively, and all they do is call
+ // <code>Solver::local_assemble_matrix</code> and
+ // <code>Solver::copy_local_to_global</code> with the required
+ // number of arguments, utilizing what the lambda function has
+ // gotten as arguments itself. We won't show the syntax this
+ // would require since it is no less confusing than the one used
+ // above.
+
+ // At this point, we have assembled the matrix and condensed
+ // it. The right hand side may or may not have been completely
+ // assembled, but we would like to condense the right hand side
+ // vector next. We can only do this if the assembly of this
+ // vector has finished, so we have to wait for the task to
+ // finish; in computer science, waiting for a task is typically
+ // called "joining" the task, explaining the name of the
+ // function we call below.
+ //
+ // Since that task may or may not have finished, and since we
+ // may have to wait for it to finish, we may as well try to pack
+ // other things that need to be done anyway into this
+ // gap. Consequently, we first interpolate boundary values
+ // before we wait for the right hand side. Of course, another
+ // possibility would have been to also interpolate the boundary
+ // values on a separate task since doing so is independent of
+ // the other things we have done in this function so far. Feel
+ // free to find the correct syntax to also create a task for
+ // this interpolation and start it at the top of this function,
+ // along with the assembly of the right hand side. (You will
+ // find that this is slightly more complicated since there are
+ // multiple versions of
+ // VectorTools::interpolate_boundary_values(), and so simply
+ // taking the address
+ // <code>&VectorTools::interpolate_boundary_values</code>
+ // produces a set of overloaded functions that can't be passed
+ // to Threads::new_task() right away -- you have to select which
+ // element of this overload set you want by casting the address
+ // expression to a function pointer type that is specific to the
+ // version of the function that you want to call on the task.)
std::map<types::global_dof_index,double> boundary_value_map;
VectorTools::interpolate_boundary_values (dof_handler,
0,
*boundary_values,
boundary_value_map);
+ rhs_task.join ();
+ linear_system.hanging_node_constraints.condense (linear_system.rhs);
- // If this is done, wait for the matrix assembling threads, and condense
- // the constraints in the matrix as well:
- linear_system.hanging_node_constraints.condense (linear_system.matrix);
-
- // Now that we have the linear system, we can also treat boundary
- // values, which need to be eliminated from both the matrix and the
- // right hand side:
+ // Now that we have the complete linear system, we can also
+ // treat boundary values, which need to be eliminated from both
+ // the matrix and the right hand side:
MatrixTools::apply_boundary_values (boundary_value_map,
linear_system.matrix,
solution,
}
- // The second of this pair of functions takes a range of cell iterators,
- // and assembles the system matrix on this part of the domain. Since it's
- // actions have all been explained in previous programs, we do not comment
- // on it any more, except for one point below.
+ // The second half of this set of functions deals with the local
+ // assembly on each cell and copying local contributions into the
+ // global matrix object. This works in exactly the same way as
+ // described in step-9:
+ template <int dim>
+ Solver<dim>::AssemblyScratchData::
+ AssemblyScratchData (const FiniteElement<dim> &fe,
+ const Quadrature<dim> &quadrature)
+ :
+ fe_values (fe,
+ quadrature,
+ update_gradients | update_JxW_values)
+ {}
+
+
+ template <int dim>
+ Solver<dim>::AssemblyScratchData::
+ AssemblyScratchData (const AssemblyScratchData &scratch_data)
+ :
+ fe_values (scratch_data.fe_values.get_fe(),
+ scratch_data.fe_values.get_quadrature(),
+ update_gradients | update_JxW_values)
+ {}
+
+
template <int dim>
void
- Solver<dim>::assemble_matrix (const typename DoFHandler<dim>::active_cell_iterator &cell,
- Assembler::Scratch &scratch,
- Assembler::CopyData ©_data) const
+ Solver<dim>::local_assemble_matrix (const typename DoFHandler<dim>::active_cell_iterator &cell,
+ AssemblyScratchData &scratch_data,
+ AssemblyCopyData ©_data) const
{
- FEValues<dim> fe_values (*fe, *quadrature,
- update_gradients | update_JxW_values);
-
- copy_data.dofs_per_cell = fe->dofs_per_cell;
- const unsigned int n_q_points = quadrature->size();
+ const unsigned int dofs_per_cell = fe->dofs_per_cell;
+ const unsigned int n_q_points = quadrature->size();
- copy_data.cell_matrix = FullMatrix<double> (copy_data.dofs_per_cell, copy_data.dofs_per_cell);
+ copy_data.cell_matrix.reinit (dofs_per_cell, dofs_per_cell);
- copy_data.local_dof_indices.resize(copy_data.dofs_per_cell);
+ copy_data.local_dof_indices.resize(dofs_per_cell);
- fe_values.reinit (cell);
+ scratch_data.fe_values.reinit (cell);
for (unsigned int q_point=0; q_point<n_q_points; ++q_point)
- for (unsigned int i=0; i<copy_data.dofs_per_cell; ++i)
- for (unsigned int j=0; j<copy_data.dofs_per_cell; ++j)
- copy_data.cell_matrix(i,j) += (fe_values.shape_grad(i,q_point) *
- fe_values.shape_grad(j,q_point) *
- fe_values.JxW(q_point));
+ for (unsigned int i=0; i<dofs_per_cell; ++i)
+ for (unsigned int j=0; j<dofs_per_cell; ++j)
+ copy_data.cell_matrix(i,j) += (scratch_data.fe_values.shape_grad(i,q_point) *
+ scratch_data.fe_values.shape_grad(j,q_point) *
+ scratch_data.fe_values.JxW(q_point));
cell->get_dof_indices (copy_data.local_dof_indices);
}
template <int dim>
void
- Solver<dim>::copy_local_to_global(Assembler::CopyData const ©_data,
- LinearSystem &linear_system) const
+ Solver<dim>::copy_local_to_global(const AssemblyCopyData ©_data,
+ LinearSystem &linear_system) const
{
- // In the step-9 program, we have shown that you have to use the
- // mutex to lock the matrix when copying the elements from the local
- // to the global matrix. This was necessary to avoid that two
- // threads access it at the same time, eventually overwriting their
- // respective work. Previously, we have used the
- // <code>acquire</code> and <code>release</code> functions of the
- // mutex to lock and unlock the mutex, respectively. While this is
- // valid, there is one possible catch: if between the locking
- // operation and the unlocking operation an exception is thrown, the
- // mutex remains in the locked state, and in some cases this might
- // lead to deadlocks. A similar situation arises, when one changes
- // the code to have a return statement somewhere in the middle of
- // the locked block, and forgets that before we call
- // <code>return</code>, we also have to unlock the mutex. All this
- // is no problem here, but we want to show the general
- // technique to cope with these problems nevertheless: have an
- // object that upon initialization (i.e. in its constructor) locks
- // the mutex, and on running the destructor unlocks it again. This
- // is called the <code>scoped lock</code> pattern (apparently
- // invented by Doug Schmidt originally), and it works because
- // destructors of local objects are also run when we exit the
- // function either through a <code>return</code> statement, or when
- // an exception is raised. Thus, it is guaranteed that the mutex
- // will always be unlocked when we exit this part of the program,
- // whether the operation completed successfully or not, whether the
- // exit path was something we implemented willfully or whether the
- // function was exited by an exception that we did not foresee.
- for (unsigned int i=0; i<copy_data.dofs_per_cell; ++i)
- for (unsigned int j=0; j<copy_data.dofs_per_cell; ++j)
- linear_system.matrix.add (copy_data.local_dof_indices[i],
- copy_data.local_dof_indices[j],
- copy_data.cell_matrix(i,j));
+ for (unsigned int i=0; i<copy_data.local_dof_indices.size(); ++i)
+ for (unsigned int j=0; j<copy_data.local_dof_indices.size(); ++j)
+ linear_system.matrix.add (copy_data.local_dof_indices[i],
+ copy_data.local_dof_indices[j],
+ copy_data.cell_matrix(i,j));
}
{
hanging_node_constraints.clear ();
-#ifdef DEAL_II_WITH_THREADS
- tbb::task_scheduler_init init;
- // Create an empty task to be the parent of the two tasks that we need.
- tbb::empty_task* empty_task = new (tbb::task::allocate_root()) tbb::empty_task;
- // Set the reference count to 3 (number of children+1 because
- // wati_for_all returns when ref_count is one).
- empty_task->set_ref_count(3);
-
- HangingNodeTask* hanging_node_task =
- new (empty_task->allocate_child()) HangingNodeTask(dof_handler,hanging_node_constraints);
- SparsityPatternTask* sparsity_pattern_task =
- new (empty_task->allocate_child()) SparsityPatternTask(dof_handler,sparsity_pattern);
-
- // Spawn the two tasks
- empty_task->spawn(*hanging_node_task);
- empty_task->spawn(*sparsity_pattern_task);
-
- // Wait for children to finish
- empty_task->wait_for_all();
- // empty_task must be destroy manually because it does not return.
- empty_task->destroy(*empty_task);
-#else
- DoFTools::make_hanging_node_constraints(dof_handler,hanging_node_constraints);
+ void (*mhnc_p) (const DoFHandler<dim> &,
+ ConstraintMatrix &)
+ = &DoFTools::make_hanging_node_constraints;
+
+ // Start a side task then continue on the main thread
+ Threads::Task<> side_task(std_cxx1x::bind(mhnc_p,std_cxx1x::cref(dof_handler),
+ std_cxx1x::ref(hanging_node_constraints)));
sparsity_pattern.reinit (dof_handler.n_dofs(),
dof_handler.n_dofs(),
dof_handler.max_couplings_between_dofs());
DoFTools::make_sparsity_pattern (dof_handler, sparsity_pattern);
-#endif
+
+ // Wait for the side task to be done before going further
+ side_task.join();
hanging_node_constraints.close ();
hanging_node_constraints.condense (sparsity_pattern);
<h3>The maths</h3>
-The Heidelberg group of Professor Rolf Rannacher, to which the three main
+The Heidelberg group of Professor Rolf Rannacher, to which the three initial
authors of the deal.II library belonged during their PhD time and partly also
afterwards, has been involved with adaptivity and error estimation for finite
-element discretizations since the mid-90ies. The main achievement is the
+element discretizations since the mid-1990ies. The main achievement is the
development of error estimates for arbitrary functionals of the solution, and
of optimal mesh refinement for its computation.
#include <deal.II/base/quadrature_lib.h>
#include <deal.II/base/function.h>
#include <deal.II/base/logstream.h>
+#include <deal.II/base/thread_management.h>
#include <deal.II/base/work_stream.h>
#include <deal.II/lac/vector.h>
#include <deal.II/lac/full_matrix.h>
#include <numeric>
#include <sstream>
-#ifdef DEAL_II_WITH_THREADS
-# include <tbb/task.h>
-# include <tbb/task_scheduler_init.h>
-#endif
-
// The last step is as in all previous programs:
namespace Step14
{
using namespace dealii;
- namespace Assembler
- {
- struct Scratch
- {
- Scratch() {}
- };
-
- struct CopyData
- {
- CopyData() {}
-
- unsigned int dofs_per_cell;
- FullMatrix<double> cell_matrix;
- std::vector<types::global_dof_index> local_dof_indices;
- };
- }
-
// @sect3{Evaluating the solution}
// As mentioned in the introduction, significant parts of the program have
Vector<double> rhs;
};
-#ifdef DEAL_II_WITH_THREADS
- struct HangingNodeTask : public tbb::task
+ // The remainder of the class is essentially a copy of step-13
+ // as well, including the data structures and functions
+ // necessary to compute the linear system in parallel using the
+ // WorkStream framework:
+ struct AssemblyScratchData
{
- HangingNodeTask (const DoFHandler<dim> &dof_handler,ConstraintMatrix &hanging_node_constraints) :
- dof_handler(&dof_handler),
- hanging_node_constraints(& hanging_node_constraints) {}
+ AssemblyScratchData (const FiniteElement<dim> &fe,
+ const Quadrature<dim> &quadrature);
+ AssemblyScratchData (const AssemblyScratchData &scratch_data);
- tbb::task* execute()
- {
- DoFTools::make_hanging_node_constraints(*dof_handler,*hanging_node_constraints);
-
- return NULL;
- }
-
- const DoFHandler<dim>* dof_handler;
- ConstraintMatrix* hanging_node_constraints;
- };
+ FEValues<dim> fe_values;
+ };
- struct SparsityPatternTask : public tbb::task
+ struct AssemblyCopyData
{
- SparsityPatternTask (const DoFHandler<dim> &dof_handler,SparsityPattern &sparsity_pattern) :
- dof_handler(&dof_handler),
- sparsity_pattern(&sparsity_pattern) {}
-
- tbb::task* execute()
- {
- sparsity_pattern->reinit (dof_handler->n_dofs(),
- dof_handler->n_dofs(),
- dof_handler->max_couplings_between_dofs());
- DoFTools::make_sparsity_pattern (*dof_handler, *sparsity_pattern);
-
- return NULL;
- }
-
- const DoFHandler<dim>* dof_handler;
- SparsityPattern* sparsity_pattern;
+ FullMatrix<double> cell_matrix;
+ std::vector<types::global_dof_index> local_dof_indices;
};
-#endif
void
assemble_linear_system (LinearSystem &linear_system);
void
- assemble_matrix (const typename DoFHandler<dim>::active_cell_iterator &cell,
- Assembler::Scratch &scratch,
- Assembler::CopyData ©_data) const;
-
+ local_assemble_matrix (const typename DoFHandler<dim>::active_cell_iterator &cell,
+ AssemblyScratchData &scratch_data,
+ AssemblyCopyData ©_data) const;
+
void
- copy_local_to_global(Assembler::CopyData const ©_data,
- LinearSystem &linear_system) const;
+ copy_local_to_global(const AssemblyCopyData ©_data,
+ LinearSystem &linear_system) const;
};
}
+ // The following few functions and constructors are verbatim
+ // copies taken from step-13:
template <int dim>
void
Solver<dim>::assemble_linear_system (LinearSystem &linear_system)
{
- typedef
- typename DoFHandler<dim>::active_cell_iterator
- active_cell_iterator;
+ Threads::Task<> rhs_task = Threads::new_task (&Solver<dim>::assemble_rhs,
+ *this,
+ linear_system.rhs);
+
+ WorkStream::run(dof_handler.begin_active(),
+ dof_handler.end(),
+ std_cxx1x::bind(&Solver<dim>::local_assemble_matrix,
+ this,
+ std_cxx1x::_1,
+ std_cxx1x::_2,
+ std_cxx1x::_3),
+ std_cxx1x::bind(&Solver<dim>::copy_local_to_global,
+ this,
+ std_cxx1x::_1,
+ std_cxx1x::ref(linear_system)),
+ AssemblyScratchData(*fe, *quadrature),
+ AssemblyCopyData());
- const unsigned int n_threads = multithread_info.n_threads();
- std::vector<std::pair<active_cell_iterator,active_cell_iterator> >
- thread_ranges
- = Threads::split_range<active_cell_iterator> (dof_handler.begin_active (),
- dof_handler.end (),
- n_threads);
-
- Assembler::Scratch scratch;
- Assembler::CopyData copy_data;
- WorkStream::run(dof_handler.begin_active(),dof_handler.end(),
- std_cxx1x::bind(&Solver<dim>::assemble_matrix,this,std_cxx1x::_1,std_cxx1x::_2,std_cxx1x::_3),
- std_cxx1x::bind(&Solver<dim>::copy_local_to_global,this,std_cxx1x::_1,std_cxx1x::ref(linear_system)),
- scratch,copy_data);
-
-
- assemble_rhs (linear_system.rhs);
linear_system.hanging_node_constraints.condense (linear_system.rhs);
std::map<types::global_dof_index,double> boundary_value_map;
*boundary_values,
boundary_value_map);
+ rhs_task.join ();
+
linear_system.hanging_node_constraints.condense (linear_system.matrix);
MatrixTools::apply_boundary_values (boundary_value_map,
}
+ template <int dim>
+ Solver<dim>::AssemblyScratchData::
+ AssemblyScratchData (const FiniteElement<dim> &fe,
+ const Quadrature<dim> &quadrature)
+ :
+ fe_values (fe,
+ quadrature,
+ update_gradients | update_JxW_values)
+ {}
+
+
+ template <int dim>
+ Solver<dim>::AssemblyScratchData::
+ AssemblyScratchData (const AssemblyScratchData &scratch_data)
+ :
+ fe_values (scratch_data.fe_values.get_fe(),
+ scratch_data.fe_values.get_quadrature(),
+ update_gradients | update_JxW_values)
+ {}
+
+
template <int dim>
void
- Solver<dim>::assemble_matrix (const typename DoFHandler<dim>::active_cell_iterator &cell,
- Assembler::Scratch &scratch,
- Assembler::CopyData ©_data) const
+ Solver<dim>::local_assemble_matrix (const typename DoFHandler<dim>::active_cell_iterator &cell,
+ AssemblyScratchData &scratch_data,
+ AssemblyCopyData ©_data) const
{
- FEValues<dim> fe_values (*fe, *quadrature,
- update_gradients | update_JxW_values);
-
- copy_data.dofs_per_cell = fe->dofs_per_cell;
- const unsigned int n_q_points = quadrature->size();
+ const unsigned int dofs_per_cell = fe->dofs_per_cell;
+ const unsigned int n_q_points = quadrature->size();
- copy_data.cell_matrix = FullMatrix<double> (copy_data.dofs_per_cell, copy_data.dofs_per_cell);
+ copy_data.cell_matrix.reinit (dofs_per_cell, dofs_per_cell);
- copy_data.local_dof_indices.resize(copy_data.dofs_per_cell);
+ copy_data.local_dof_indices.resize(dofs_per_cell);
- fe_values.reinit (cell);
+ scratch_data.fe_values.reinit (cell);
for (unsigned int q_point=0; q_point<n_q_points; ++q_point)
- for (unsigned int i=0; i<copy_data.dofs_per_cell; ++i)
- for (unsigned int j=0; j<copy_data.dofs_per_cell; ++j)
- copy_data.cell_matrix(i,j) += (fe_values.shape_grad(i,q_point) *
- fe_values.shape_grad(j,q_point) *
- fe_values.JxW(q_point));
+ for (unsigned int i=0; i<dofs_per_cell; ++i)
+ for (unsigned int j=0; j<dofs_per_cell; ++j)
+ copy_data.cell_matrix(i,j) += (scratch_data.fe_values.shape_grad(i,q_point) *
+ scratch_data.fe_values.shape_grad(j,q_point) *
+ scratch_data.fe_values.JxW(q_point));
cell->get_dof_indices (copy_data.local_dof_indices);
}
template <int dim>
void
- Solver<dim>::copy_local_to_global(Assembler::CopyData const ©_data,
- LinearSystem &linear_system) const
- {
- for (unsigned int i=0; i<copy_data.dofs_per_cell; ++i)
- for (unsigned int j=0; j<copy_data.dofs_per_cell; ++j)
- linear_system.matrix.add (copy_data.local_dof_indices[i],
- copy_data.local_dof_indices[j],
- copy_data.cell_matrix(i,j));
+ Solver<dim>::copy_local_to_global(const AssemblyCopyData ©_data,
+ LinearSystem &linear_system) const
+ {
+ for (unsigned int i=0; i<copy_data.local_dof_indices.size(); ++i)
+ for (unsigned int j=0; j<copy_data.local_dof_indices.size(); ++j)
+ linear_system.matrix.add (copy_data.local_dof_indices[i],
+ copy_data.local_dof_indices[j],
+ copy_data.cell_matrix(i,j));
}
- // Now for the functions that implement actions in the linear system
- // class. First, the constructor initializes all data elements to their
- // correct sizes, and sets up a number of additional data structures, such
- // as constraints due to hanging nodes. Since setting up the hanging nodes
- // and finding out about the nonzero elements of the matrix is
- // independent, we do that in parallel (if the library was configured to
- // use concurrency, at least; otherwise, the actions are performed
- // sequentially). Note that we start only one thread, and do the second
- // action in the main thread. Since only one thread is generated, we don't
- // use the <code>Threads::ThreadGroup</code> class here, but rather use
- // the one created thread object directly to wait for this particular
- // thread's exit.
+ // Now for the functions that implement actions in the linear
+ // system class. First, the constructor initializes all data
+ // elements to their correct sizes, and sets up a number of
+ // additional data structures, such as constraints due to hanging
+ // nodes. Since setting up the hanging nodes and finding out about
+ // the nonzero elements of the matrix is independent, we do that
+ // in parallel (if the library was configured to use concurrency,
+ // at least; otherwise, the actions are performed
+ // sequentially). Note that we start only one thread, and do the
+ // second action in the main thread. Since only one thread is
+ // generated, we don't use the <code>Threads::ThreadGroup</code>
+ // class here, but rather use the one created thread object
+ // directly to wait for this particular thread's exit. The
+ // approach is generally the same as the one we have used in
+ // <code>Solver::assemble_linear_system()</code> above.
//
- // Note that taking up the address of the
- // <code>DoFTools::make_hanging_node_constraints</code> function is a
- // little tricky, since there are actually three of them, one for each
- // supported space dimension. Taking addresses of overloaded functions is
- // somewhat complicated in C++, since the address-of operator
- // <code>&</code> in that case returns more like a set of values (the
- // addresses of all functions with that name), and selecting the right one
- // is then the next step. If the context dictates which one to take (for
- // example by assigning to a function pointer of known type), then the
- // compiler can do that by itself, but if this set of pointers shall be
- // given as the argument to a function that takes a template, the compiler
- // could choose all without having a preference for one. We therefore have
- // to make it clear to the compiler which one we would like to have; for
- // this, we could use a cast, but for more clarity, we assign it to a
- // temporary <code>mhnc_p</code> (short for <code>pointer to
- // make_hanging_node_constraints</code>) with the right type, and using
- // this pointer instead.
+ // Note that taking the address of the
+ // <code>DoFTools::make_hanging_node_constraints</code> function
+ // is a little tricky, since there are actually three functions of
+ // this name, one for each supported space dimension. Taking
+ // addresses of overloaded functions is somewhat complicated in
+ // C++, since the address-of operator <code>&</code> in that case
+ // returns a set of values (the addresses of all
+ // functions with that name), and selecting the right one is then
+ // the next step. If the context dictates which one to take (for
+ // example by assigning to a function pointer of known type), then
+ // the compiler can do that by itself, but if this set of pointers
+ // shall be given as the argument to a function that takes a
+ // template, the compiler could choose all without having a
+ // preference for one. We therefore have to make it clear to the
+ // compiler which one we would like to have; for this, we could
+ // use a cast, but for more clarity, we assign it to a temporary
+ // <code>mhnc_p</code> (short for <code>pointer to
+ // make_hanging_node_constraints</code>) with the right type, and
+ // using this pointer instead.
template <int dim>
Solver<dim>::LinearSystem::
LinearSystem (const DoFHandler<dim> &dof_handler)
{
hanging_node_constraints.clear ();
-#ifdef DEAL_II_WITH_THREADS
- tbb::task_scheduler_init init;
- // Create an empty task to be the parent of the two tasks that we need.
- tbb::empty_task* empty_task = new (tbb::task::allocate_root()) tbb::empty_task;
- // Set the reference count to 3 (number of children+1)
- empty_task->set_ref_count(3);
-
- HangingNodeTask* hanging_node_task =
- new (empty_task->allocate_child()) HangingNodeTask(dof_handler,hanging_node_constraints);
- SparsityPatternTask* sparsity_pattern_task =
- new (empty_task->allocate_child()) SparsityPatternTask(dof_handler,sparsity_pattern);
-
- empty_task->spawn(*hanging_node_task);
- empty_task->spawn(*sparsity_pattern_task);
-
- // Wait for children to finish
- empty_task->wait_for_all();
- empty_task->destroy(*empty_task);
-#else
- DoFTools::make_hanging_node_constraints(dof_handler,hanging_node_constraints);
+ void (*mhnc_p) (const DoFHandler<dim> &,
+ ConstraintMatrix &)
+ = &DoFTools::make_hanging_node_constraints;
+
+ Threads::Task<> side_task
+ = Threads::new_task (mhnc_p,
+ dof_handler,
+ hanging_node_constraints);
sparsity_pattern.reinit (dof_handler.n_dofs(),
- dof_handler.n_dofs(),
- dof_handler.max_couplings_between_dofs());
+ dof_handler.n_dofs(),
+ dof_handler.max_couplings_between_dofs());
DoFTools::make_sparsity_pattern (dof_handler, sparsity_pattern);
-#endif
+
+ side_task.join();
hanging_node_constraints.close ();
hanging_node_constraints.condense (sparsity_pattern);
void
WeightedResidual<dim>::solve_problem ()
{
- Threads::ThreadGroup<> threads;
- threads += Threads::new_thread (&WeightedResidual<dim>::solve_primal_problem,
- *this);
- threads += Threads::new_thread (&WeightedResidual<dim>::solve_dual_problem,
- *this);
- threads.join_all ();
+ Threads::TaskGroup<> tasks;
+ tasks += Threads::new_task (&WeightedResidual<dim>::solve_primal_problem,
+ *this);
+ tasks += Threads::new_task (&WeightedResidual<dim>::solve_dual_problem,
+ *this);
+ tasks.join_all();
}
// parts of all the cells, and once they are all started wait until they
// have all finished:
const unsigned int n_threads = multithread_info.n_threads();
- Threads::ThreadGroup<> threads;
+ Threads::TaskGroup<> tasks;
for (unsigned int i=0; i<n_threads; ++i)
- threads += Threads::new_thread (&WeightedResidual<dim>::estimate_some,
+ tasks += Threads::new_task<> (&WeightedResidual<dim>::estimate_some,
*this,
primal_solution,
dual_weights,
n_threads, i,
error_indicators,
face_integrals);
- threads.join_all();
+ tasks.join_all();
// Once the error contributions are computed, sum them up. For this,
// note that the cell terms are already set, and that only the edge
write the sum back into memory without danger of producing a <a
href="http://en.wikipedia.org/wiki/Race_condition">race condition</a>.
-deal.II has a class that is made for exactly this workflow: WorkStream. Its
-use is extensively documented in the module on @ref threads (in the section
+deal.II has a class that is made for exactly this workflow: WorkStream, first
+discussed in step-9 and step-13. Its
+use is also extensively documented in the module on @ref threads (in the section
on @ref MTWorkStream "the WorkStream class") and we won't repeat here the
rationale and detailed instructions laid out there, though you will want to
read through this module to understand the distinction between scratch space
// i.e., it returns true exactly if the cell is owned by the current
// processor. The resulting iterator range is then exactly what we need.
//
- // With this obstacle out of the way, we call the WorkStream::run function
- // with this set of cells, scratch and copy objects, and with pointers to
- // two functions: the local assembly and copy-local-to-global
- // function. These functions need to have very specific signatures: three
- // arguments in the first and one argument in the latter case (see the
- // documentation of the WorkStream::run function for the meaning of these
- // arguments). Note how we use the construct <code>std_cxx1x::bind</code>
- // to create a function object that satisfies this requirement. It uses
- // placeholders <code>_1, std_cxx1x::_2, _3</code> for the local assembly
- // function that specify cell, scratch data, and copy data, as well as the
- // placeholder <code>_1</code> for the copy function that expects the data
- // to be written into the global matrix. On the other hand, the implicit
- // zeroth argument of member functions (namely the <code>this</code> pointer
- // of the object on which that member function is to operate on) is
- // <i>bound</i> to the <code>this</code> pointer of the current
- // function. The WorkStream::run function, as a consequence, does not need
- // to know anything about the object these functions work on.
+ // With this obstacle out of the way, we call the WorkStream::run
+ // function with this set of cells, scratch and copy objects, and
+ // with pointers to two functions: the local assembly and
+ // copy-local-to-global function. These functions need to have very
+ // specific signatures: three arguments in the first and one
+ // argument in the latter case (see the documentation of the
+ // WorkStream::run function for the meaning of these arguments).
+ // Note how we use the construct <code>std_cxx1x::bind</code> to
+ // create a function object that satisfies this requirement. It uses
+ // placeholders <code>std_cxx1x::_1, std_cxx1x::_2,
+ // std_cxx1x::_3</code> for the local assembly function that specify
+ // cell, scratch data, and copy data, as well as the placeholder
+ // <code>std_cxx1x::_1</code> for the copy function that expects the
+ // data to be written into the global matrix (for placeholder
+ // arguments, also see the discussion in step-13's
+ // <code>assemble_linear_system()</code> function). On the other
+ // hand, the implicit zeroth argument of member functions (namely
+ // the <code>this</code> pointer of the object on which that member
+ // function is to operate on) is <i>bound</i> to the
+ // <code>this</code> pointer of the current function. The
+ // WorkStream::run function, as a consequence, does not need to know
+ // anything about the object these functions work on.
//
// When the WorkStream is executed, it will create several local assembly
// routines of the first kind for several cells and let some available
const unsigned int n_q_points = quadrature_formula.size();
std::vector<types::global_dof_index> local_dof_indices (dofs_per_cell);
- const Coefficient<dim> coefficient;
- std::vector<double> coefficient_values (n_q_points);
typename DoFHandler<dim>::active_cell_iterator cell = dof_handler.begin_active(),
endc = dof_handler.end();
In other words, it considers how a three-dimensional body deforms if one pushes
into it a rigid obstacle (the contact problem) where deformation is governed
by an elasto-plastic material law (a material that can only accommodate a certain
-maximal stress) that hardens as deformation accumulates. To show we we intend to
+maximal stress) that hardens as deformation accumulates. To show what we intend to
do before going into too many details, let us just show a picture of what the
solution will look like (the deformable body is a cube - only half of
which is actually shown -, the obstacle corresponds
@f{align*}
\label{eq:linearization}
\left(I_{\Pi}\varepsilon(\tilde {\mathbf u}^{i}),
- \varepsilon(\varphi) - \varepsilon(\tilde {\mathbf u}^{i})\right) \geq 0,
+ \varepsilon(\varphi) - \varepsilon(\tilde {\mathbf u}^{i})\right) \geq
+ \left(\left(I_{\Pi}\varepsilon({\mathbf u}^{i-1}),
+ \varepsilon(\varphi) - \varepsilon(\tilde {\mathbf u}^{i})\right) -
+ \left(P_{\Pi}(C\varepsilon({\mathbf u}^{i-1})),
+ \varepsilon(\varphi) - \varepsilon(\tilde {\mathbf u}^{i})\right)\right),
\quad \forall \varphi\in V^+,
@f}
where the rank-4 tensor $I_\Pi=I_\Pi(\varepsilon^D(\mathbf u^{i-1}))$ given by
(1-\alpha^i_l)U^{i-1}@f}
satisfies
@f{gather*}
- \vert F\left(U^{i}\right) \vert < \vert F\left(U^{i-1}\right) \vert.
+ \vert {\hat R}\left({\mathbf u}^{i}\right) \vert < \vert {\hat R}\left({\mathbf u}^{i-1}\right) \vert.
\f}
+ with ${\hat R}\left({\mathbf u}\right)=\left(P_{Pi}(C\varepsilon(u)),\varepsilon(\varphi^{i}_p\right)$ with
+ the exceptions of (i) elements $p\in\mathcal{A}_i$ where we set ${\hat R}\left({\mathbf u}\right)=0$,
+ and (ii) elements that correpond to hanging nodes, which we eliminate in the usual manner.
<li> Define the new active and inactive sets by
@f{gather*}\mathcal{A}_{i+1}:=\lbrace p\in\mathcal{S}:\Lambda^i_p +
where $g_{h,p}$ is the <i>gap</i> denoting the distance of the obstacle
from the undisplaced configuration of the body.
- <li> If $\mathcal{A}_{i+1} = \mathcal{A}_k$ and $\vert
- F\left(U^{i}\right) \vert < \delta$ then stop, else set $i=i+1$ and go to
+ <li> If $\mathcal{A}_{i+1} = \mathcal{A}_k$ and $\left\|
+ {\hat R}\left({\mathbf u}^{i}\right)\right) \right\|_{\ell_2} < \delta$ then stop, else set $i=i+1$ and go to
step (1). This step ensures that we only stop iterations if both the correct
active set has been found and the plasticity has been iterated to sufficient
accuracy.
// matrix corresponds to the bilinear form
// $A_{ij}=(I_\Pi\varepsilon(\varphi_i),\varepsilon(\varphi_j))$ in the
// notation of the accompanying publication, whereas the right
- // hand side is $F_i=([I_\Pi-P_\Pi]\varepsilon(\varphi_i),\varepsilon(\mathbf u))$
+ // hand side is $F_i=([I_\Pi-P_\Pi C]\varepsilon(\varphi_i),\varepsilon(\mathbf u))$
// where $u$ is the current linearization points (typically the last solution).
// This might suggest that the right hand side will be zero if the material
// is completely elastic (where $I_\Pi=P_\Pi$) but this ignores the fact
-step-7 step-32 step-44
+step-7 step-9
must solve in an implicit system. This is because, unlike continuous finite
elements, in typical discontinuous elements there is one degree of freedom at
each vertex <i>for each of the adjacent elements</i>, rather than just one,
-and similarly for edges and faces. As an example of how fast the number of
+and similarly for edges and faces. As an example of how fast the number of
unknowns grows,
consider the <code>FE_DGP_Monomial</code> basis: each
scalar solution component is represented by polynomials of degree $p$
matrix <i>A</i> element by element (the local solution of the Dirichlet
problem) and subtract $CA^{-1}B$ from $D$. The steps in the Dirichlet-to-Neumann map concept hence correspond to
<ol>
- <li> constructing the Schur complement matrix $D-C A^{-1} B$ and right hand side $G - C A^{-1} F$ <i>locally on each cell</i>
+ <li> constructing the Schur complement matrix $D-C A^{-1} B$ and right hand side $G - C A^{-1} F$ <i>locally on each cell</i>
and inserting the contribution into the global trace matrix in the usual way,
<li> solving the Schur complement system for $\Lambda$, and
<li> solving for <i>U</i> using the second equation, given $\Lambda$.
The variable $\hat {u}$ is introduced as an additional independent variable
and is the one for which we finally set up a globally coupled linear
system. As mentioned above, it is defined on the element faces and
-discontinuous from one face to another.
+discontinuous from one face to another wherever faces meet (at
+vertices in 2d, and at edges and vertices in 3d).
Values for $u$ and $\mathbf{q}$ appearing in the numerical trace function
are taken to be the cell's interior solution restricted
-to the boundary $\partial K$.
+to the boundary $\partial K$.
The local stabilization parameter $\tau$ has effects on stability and accuracy
of HDG solutions; see the literature for a further discussion. A stabilization
@f{eqnarray*}
\tau = \frac{\kappa}{\ell} + |\mathbf{c} \cdot \mathbf{n}|
@f}
-where we set the diffusion $\kappa=1$ and the diffusion length scale to
+where we set the diffusion $\kappa=1$ and the diffusion length scale to
$\ell = \frac{1}{5}$.
The trace/skeleton variables in HDG methods are single-valued on element
- ( \nabla\cdot\mathbf{v}, u_h)_{\mathcal{T}}
+ \left<\mathbf{v}\cdot\mathbf{n}, \hat{u}_h\right>_{\partial\mathcal{T}}
&=& 0,
- \quad \forall \mathbf{v} \in \mathcal{V}_h^p,
+ \quad &&\forall \mathbf{v} \in \mathcal{V}_h^p,
\\
- (\nabla w, \mathbf{c} u_h)_{\mathcal{T}}
+ (w, \nabla \cdot \mathbf{q}_h)_{\mathcal{T}}
+ \left<w, \tau (u_h - \hat{u}_h)\right>_{\partial \mathcal{T}}
&=&
(w, f)_{\mathcal{T}},
- \quad \forall w \in \mathcal{W}_h^p,
+ \quad &&\forall w \in \mathcal{W}_h^p,
\\
\left< \mu, \hat{u}_h\mathbf{c} \cdot \mathbf{n}
+ \mathbf{q}_h\cdot \mathbf{n}
+ \tau (u_h - \hat{u}_h)\right>_{\partial \mathcal{T}}
&=&
\left<\mu, g_N\right>_{\partial\Omega_N},
- \quad \forall \mu \in \mathcal{M}_h^p.
+ \quad &&\forall \mu \in \mathcal{M}_h^p.
@f}
The unknowns $(\mathbf{q}_h, u_h)$ are referred to as local variables; they are
to denote the sum of integrals over all cells and $\left<\cdot,
\cdot\right>_{\partial \mathcal{T}} = \sum_K \left<\cdot,
\cdot\right>_{\partial K}$ to denote integration over all faces of all cells,
-i.e., interior faces are visited twice. When combining the contribution from
-both elements sharing a face, the above equation yields terms familiar for DG
-with jumps of the solution over the cell boundaries.
+i.e., interior faces are visited twice, once from each side and with
+the corresponding normal vectors. When combining the contribution from
+both elements sharing a face, the above equation yields terms familiar
+from the DG method, with jumps of the solution over the cell boundaries.
In the equation above, the space $\mathcal {W}_h^{p}$ for the scalar variable
<i>u<sub>h</sub></i> is defined as the space of functions that are tensor
<ol>
<li> The matrix $A$ consists of local-local coupling terms. These arise when the
local weighting functions $(\mathbf{v}, w)$ multiply the local solution terms
- $(\mathbf{q}_h, u_h)$.
+ $(\mathbf{q}_h, u_h)$. Because the elements are discontinuous, $A$
+ is block diagonal.
<li> The matrix $B$ represents the local-face coupling. These are the terms
with weighting functions $(\mathbf{v}, w)$ multiplying the skeleton variable
$\hat{u}_h$.
</ol>
We now introduce a new variable $u_h^* \in \mathcal{V}_h^{p+1}$, which we find
-by the expression $|\kappa \nabla u_h^* + \mathbf{q}_h|^2$ over the cell
+by minimizing the expression $|\kappa \nabla u_h^* + \mathbf{q}_h|^2$ over the cell
<i>K</i> under the constraint $\left(1, u_h^*\right)_K &=& \left(1,
-u_h\right)_K$. This translates to the following system of equations:
+u_h\right)_K$. The constraint is necessary because the minimization
+functional does not determine the constant part of $u_h^*$. This
+translates to the following system of equations:
@f{eqnarray*}
\left(1, u_h^*\right)_K &=& \left(1, u_h\right)_K\\
-\left(\nabla w_h^*, \kappa \nabla u_h^*\right)_K &=&
+\left(\nabla w_h^*, \kappa \nabla u_h^*\right)_K &=&
-\left(\nabla w_h^*, \mathbf{q}_h\right)_K
\quad \text{for all } w_h^* \in \mathcal Q^{p+1}.
@f}
Besides implementing the above equations, the implementation below provides the following features:
<ul>
- <li> WorkStream to parallelize local solvers. Workstream is already used in
- step-32, step-44.
+ <li> WorkStream to parallelize local solvers. Workstream has been presented
+ in detail in step-9.
<li> Reconstruct the local DG solution from the trace.
<li> Post-processing the solution for superconvergence.
<li> DataOutFaces for direct output of the global skeleton solution.
its values are quite close along lines in the same coordinate direction. The
skeleton solution can be interpreted as a rubber spring between the two sides
that balances the jumps in the solution (or rather, the flux $\kappa \nabla u
-+ \mathbf{c} u$). As the mesh is refined, the jumps between the cells get
++ \mathbf{c} u$). From the picture at the top left, it is clear that
+the bulk solution frequently over- and undershoots and that the
+skeleton variable in indeed a better approximation to the exact
+solution; this explains why we can get a better solution using a
+postprocessing step.
+
+As the mesh is refined, the jumps between the cells get
small (we represent a smooth solution), and the skeleton solution approaches
the interior parts. For cycle 8, there is no visible difference in the two
variables. We also see how boundary conditions are implemented weakly and that
Q3 elements, global refinement:
cells dofs val L2 grad L2 val L2-post
- 16 160 3.613e-01 - 1.891e+00 - 3.020e-01 -
- 36 336 6.411e-02 4.26 5.081e-01 3.24 3.238e-02 5.51
- 64 576 3.480e-02 2.12 2.533e-01 2.42 5.277e-03 6.31
- 144 1248 8.297e-03 3.54 5.924e-02 3.58 6.330e-04 5.23
- 256 2176 2.254e-03 4.53 1.636e-02 4.47 1.403e-04 5.24
- 576 4800 4.558e-04 3.94 3.277e-03 3.96 1.844e-05 5.01
- 1024 8448 1.471e-04 3.93 1.052e-03 3.95 4.378e-06 5.00
- 2304 18816 2.956e-05 3.96 2.104e-04 3.97 5.750e-07 5.01
- 4096 33280 9.428e-06 3.97 6.697e-05 3.98 1.362e-07 5.01
- 9216 74496 1.876e-06 3.98 1.330e-05 3.99 1.788e-08 5.01
+ 16 160 3.613e-01 - 1.891e+00 - 3.020e-01 -
+ 36 336 6.411e-02 4.26 5.081e-01 3.24 3.238e-02 5.51
+ 64 576 3.480e-02 2.12 2.533e-01 2.42 5.277e-03 6.31
+ 144 1248 8.297e-03 3.54 5.924e-02 3.58 6.330e-04 5.23
+ 256 2176 2.254e-03 4.53 1.636e-02 4.47 1.403e-04 5.24
+ 576 4800 4.558e-04 3.94 3.277e-03 3.96 1.844e-05 5.01
+ 1024 8448 1.471e-04 3.93 1.052e-03 3.95 4.378e-06 5.00
+ 2304 18816 2.956e-05 3.96 2.104e-04 3.97 5.750e-07 5.01
+ 4096 33280 9.428e-06 3.97 6.697e-05 3.98 1.362e-07 5.01
+ 9216 74496 1.876e-06 3.98 1.330e-05 3.99 1.788e-08 5.01
@endcode
global refinement was performed, also the convergence rates. The quadratic
convergence rates of Q1 elements in the $L_2$ norm for both the scalar
variable and the gradient variable is apparent, as is the cubic rate for the
-postprocessed scalar variable in the $L_2$ norm. Note that is is a distinctive
+postprocessed scalar variable in the $L_2$ norm. Note this distinctive
feature of an HDG solution. In typical continuous finite elements, the
gradient of the solution of order <i>p</i> converges at rate <i>p</i> only, as
opposed to <i>p</i>+1 for the actual solution. Even though superconvergence
@code
Q1 elements, adaptive refinement:
cells dofs val L2 grad L2 val L2-post
- 8 144 7.122e+00 1.941e+01 6.102e+00
- 29 500 3.309e+00 1.023e+01 2.145e+00
- 113 1792 2.204e+00 1.023e+01 1.912e+00
- 379 5732 6.085e-01 5.008e+00 2.233e-01
- 1317 19412 1.543e-01 1.464e+00 4.196e-02
- 4579 64768 5.058e-02 5.611e-01 9.521e-03
- 14596 199552 2.129e-02 3.122e-01 4.569e-03
- 46180 611400 1.033e-02 1.622e-01 1.684e-03
-144859 1864212 5.007e-03 8.371e-02 7.364e-04
-451060 5684508 2.518e-03 4.562e-02 3.070e-04
+ 8 144 7.122e+00 1.941e+01 6.102e+00
+ 29 500 3.309e+00 1.023e+01 2.145e+00
+ 113 1792 2.204e+00 1.023e+01 1.912e+00
+ 379 5732 6.085e-01 5.008e+00 2.233e-01
+ 1317 19412 1.543e-01 1.464e+00 4.196e-02
+ 4579 64768 5.058e-02 5.611e-01 9.521e-03
+ 14596 199552 2.129e-02 3.122e-01 4.569e-03
+ 46180 611400 1.033e-02 1.622e-01 1.684e-03
+144859 1864212 5.007e-03 8.371e-02 7.364e-04
+451060 5684508 2.518e-03 4.562e-02 3.070e-04
Q1 elements, global refinement:
cells dofs val L2 grad L2 val L2-post
- 8 144 7.122e+00 - 1.941e+01 - 6.102e+00 -
- 27 432 5.491e+00 0.64 2.184e+01 -0.29 4.448e+00 0.78
- 64 960 3.646e+00 1.42 1.299e+01 1.81 3.306e+00 1.03
- 216 3024 1.595e+00 2.04 8.550e+00 1.03 1.441e+00 2.05
- 512 6912 6.922e-01 2.90 5.306e+00 1.66 2.511e-01 6.07
- 1728 22464 2.915e-01 2.13 2.490e+00 1.87 8.588e-02 2.65
- 4096 52224 1.684e-01 1.91 1.453e+00 1.87 4.055e-02 2.61
- 13824 172800 7.972e-02 1.84 6.861e-01 1.85 1.335e-02 2.74
- 32768 405504 4.637e-02 1.88 3.984e-01 1.89 5.932e-03 2.82
-110592 1354752 2.133e-02 1.92 1.830e-01 1.92 1.851e-03 2.87
+ 8 144 7.122e+00 - 1.941e+01 - 6.102e+00 -
+ 27 432 5.491e+00 0.64 2.184e+01 -0.29 4.448e+00 0.78
+ 64 960 3.646e+00 1.42 1.299e+01 1.81 3.306e+00 1.03
+ 216 3024 1.595e+00 2.04 8.550e+00 1.03 1.441e+00 2.05
+ 512 6912 6.922e-01 2.90 5.306e+00 1.66 2.511e-01 6.07
+ 1728 22464 2.915e-01 2.13 2.490e+00 1.87 8.588e-02 2.65
+ 4096 52224 1.684e-01 1.91 1.453e+00 1.87 4.055e-02 2.61
+ 13824 172800 7.972e-02 1.84 6.861e-01 1.85 1.335e-02 2.74
+ 32768 405504 4.637e-02 1.88 3.984e-01 1.89 5.932e-03 2.82
+110592 1354752 2.133e-02 1.92 1.830e-01 1.92 1.851e-03 2.87
Q3 elements, global refinement:
cells dofs val L2 grad L2 val L2-post
- 8 576 5.670e+00 - 1.868e+01 - 5.462e+00 -
- 27 1728 1.048e+00 4.16 6.988e+00 2.42 8.011e-01 4.73
- 64 3840 2.831e-01 4.55 2.710e+00 3.29 1.363e-01 6.16
- 216 12096 7.883e-02 3.15 7.721e-01 3.10 2.158e-02 4.55
- 512 27648 3.642e-02 2.68 3.305e-01 2.95 5.231e-03 4.93
- 1728 89856 8.546e-03 3.58 7.581e-02 3.63 7.640e-04 4.74
- 4096 208896 2.598e-03 4.14 2.313e-02 4.13 1.783e-04 5.06
- 13824 691200 5.314e-04 3.91 4.697e-03 3.93 2.355e-05 4.99
- 32768 1622016 1.723e-04 3.91 1.517e-03 3.93 5.602e-06 4.99
+ 8 576 5.670e+00 - 1.868e+01 - 5.462e+00 -
+ 27 1728 1.048e+00 4.16 6.988e+00 2.42 8.011e-01 4.73
+ 64 3840 2.831e-01 4.55 2.710e+00 3.29 1.363e-01 6.16
+ 216 12096 7.883e-02 3.15 7.721e-01 3.10 2.158e-02 4.55
+ 512 27648 3.642e-02 2.68 3.305e-01 2.95 5.231e-03 4.93
+ 1728 89856 8.546e-03 3.58 7.581e-02 3.63 7.640e-04 4.74
+ 4096 208896 2.598e-03 4.14 2.313e-02 4.13 1.783e-04 5.06
+ 13824 691200 5.314e-04 3.91 4.697e-03 3.93 2.355e-05 4.99
+ 32768 1622016 1.723e-04 3.91 1.517e-03 3.93 5.602e-06 4.99
110592 5419008 3.482e-05 3.94 3.055e-04 3.95 7.374e-07 5.00
@endcode
volume-to-surface effect for discontinuous solutions with too much of the
solution living on the surfaces and hence duplicating work when the elements
are linear. Put in other words, DG methods are often most efficient when used
-at relatively high order, despite their focus on discontinuous (and hence,
+at relatively high order, despite their focus on a discontinuous (and hence,
seemingly low accurate) representation of solutions.
<h4>Resuls for 3D</h4>
sparse matrix structures and similar solvers (optimal AMG preconditioners for
both without particular tuning of the AMG parameters on any of them) to give a
fair picture of the cost versus accuracy of two methods, on a toy example. It
-should be noted however that GMG for continuous finite elements is about a
+should be noted however that geometric multigrid (GMG) for continuous finite elements is about a
factor four to five faster for <i>p</i>=3 and <i>p</i>=6. The authors of this
tutorial have not seen similarly advanced solvers for the HDG linear
systems. Also, there are other implementation aspects for CG available such as
continuous elements more competitive. Again, it is not clear to the authors of
the tutorial whether similar improvements could be made for HDG.
+
<h3>Possibilities for improvements</h3>
As already mentioned in the introduction, one possibility is to implement
</table>
As can be seen from the table, the solver and assembly calls dominate the
-runtime of the program. This also gives a clear indication of where an
-improvement makes most sense.
+runtime of the program. This also gives a clear indication of where
+improvements would make the most sense:
<ol>
<li> Better linear solvers: We use a BiCGStab iterative solver without
preconditioner, where the number of iteration increases with increasing
problem size (the number of iterations for Q1 elements and global
- refinements start at 35 for the small sizes but increase up to 701 for the
+ refinements starts at 35 for the small sizes but increase up to 701 for the
largest size). To do better, one could for example use an algebraic
- multigrid preconditioner from Trilinos. For diffusion-dominated problems as
+ multigrid preconditioner from Trilinos. For diffusion-dominated
+ problems such as
the problem at hand with finer meshes, such a solver can be designed that
uses the matrix-vector products from the more efficient ChunkSparseMatrix on
the finest level, as long as we are not working in parallel with MPI. For
While the second aim is difficult to describe in general terms without
reference to the code, we will discuss the other two aims in the
following. The use of multiple threads will then be detailed at the
-relevant places within the program. Furthermore, there exists a report on this
-subject, which is also available online from the ``Documentation'' section of
-the deal.II homepage.
+relevant places within the program. We will, however, follow the
+general discussion of the WorkStream approach detailed in the
+@ref threads "Parallel computing with multiple processors accessing shared memory"
+documentation module.
<h3>Discretizing the advection equation</h3>
// The following two files provide classes and information for multithreaded
// programs. In the first one, the classes and functions are declared which we
-// need to start new threads and to wait for threads to return (i.e. the
-// <code>Thread</code> class and the <code>new_thread</code> functions). The
+// need to do assembly in parallel (i.e. the
+// <code>WorkStream</code> namespace). The
// second file has a class <code>MultithreadInfo</code> (and a global object
// <code>multithread_info</code> of that type) which can be used to query the
// number of processors in your system, which is often useful when deciding
{
using namespace dealii;
- namespace Assembler
- {
- struct Scratch
- {
- Scratch() {};
- };
-
- struct CopyData
- {
- CopyData() {};
-
- unsigned int dofs_per_cell;
- std::vector<types::global_dof_index> local_dof_indices;
- // We declare cell matrix and cell right hand side...
- FullMatrix<double> cell_matrix;
- Vector<double> cell_rhs;
- };
- }
-
// @sect3{AdvectionProblem class declaration}
// Following we declare the main class of this program. It is very much
private:
void setup_system ();
- // The next function will be used to assemble the matrix. However, unlike
- // in the previous examples, the function will not do the work itself, but
- // rather it will split the range of active cells into several chunks and
- // then call the following function on each of these chunks. The rationale
- // is that matrix assembly can be parallelized quite well, as the
+
+ // The next set of functions will be used to assemble the
+ // matrix. However, unlike in the previous examples, the
+ // <code>assemble_system()</code> function will not do the work
+ // itself, but rather will delegate the actual assembly to helper
+ // functions <code>assemble_local_system()</code> and
+ // <code>copy_local_to_global()</code>. The rationale is that
+ // matrix assembly can be parallelized quite well, as the
// computation of the local contributions on each cell is entirely
- // independent of other cells, and we only have to synchronize when we add
- // the contribution of a cell to the global matrix. The second function,
- // doing the actual work, accepts two parameters which denote the first
- // cell on which it shall operate, and the one past the last.
+ // independent of other cells, and we only have to synchronize
+ // when we add the contribution of a cell to the global
+ // matrix.
//
// The strategy for parallelization we choose here is one of the
- // possibilities mentioned in detail in the @ref threads module in the
- // documentation. While it is a straightforward way to distribute the work
- // for assembling the system onto multiple processor cores. As mentioned
- // in the module, there are other, and possibly better suited, ways to
- // achieve the same goal.
+ // possibilities mentioned in detail in the @ref threads module in
+ // the documentation. Specifically, we will use the WorkStream
+ // approach discussed there. Since there is so much documentation
+ // in this module, we will not repeat the rationale for the design
+ // choices here (for example, if you read through the module
+ // mentioned above, you will understand what the purpose of the
+ // <code>AssemblyScratchData</code> and
+ // <code>AssemblyCopyData</code> structures is). Rather, we will
+ // only discuss the specific implementation.
+ //
+ // If you read the page mentioned above, you will find that in
+ // order to parallelize assembly, we need two data structures --
+ // one that corresponds to data that we need during local
+ // integration ("scratch data", i.e., things we only need as
+ // temporary storage), and one that carries information from the
+ // local integration to the function that then adds the local
+ // contributions to the corresponding elements of the global
+ // matrix. The former of these typically contains the FEValues and
+ // FEFaceValues objects, whereas the latter has the local matrix,
+ // local right hand side, and information about which degrees of
+ // freedom live on the cell for which we are assembling a local
+ // contribution. With this information, the following should be
+ // relatively self-explanatory:
+ struct AssemblyScratchData
+ {
+ AssemblyScratchData (const FiniteElement<dim> &fe);
+ AssemblyScratchData (const AssemblyScratchData &scratch_data);
+
+ FEValues<dim> fe_values;
+ FEFaceValues<dim> fe_face_values;
+ };
+
+ struct AssemblyCopyData
+ {
+ FullMatrix<double> cell_matrix;
+ Vector<double> cell_rhs;
+ std::vector<types::global_dof_index> local_dof_indices;
+ };
+
void assemble_system ();
- void build_local_system (typename DoFHandler<dim>::active_cell_iterator const &cell,
- Assembler::Scratch &scratch,Assembler::CopyData ©_data);
- void copy_local_to_global (Assembler::CopyData const ©_data);
-
+ void local_assemble_system (const typename DoFHandler<dim>::active_cell_iterator &cell,
+ AssemblyScratchData &scratch,
+ AssemblyCopyData ©_data);
+ void copy_local_to_global (const AssemblyCopyData ©_data);
+
// The following functions again are as in previous examples, as are the
// subsequent variables.
DeclException0 (ExcInsufficientDirections);
private:
- typedef std::pair<unsigned int,unsigned int> IndexInterval;
+ template <int dim>
+ struct EstimateScratchData
+ {
+ EstimateScratchData (const FiniteElement<dim> &fe,
+ const Vector<double> &solution);
+ EstimateScratchData (const EstimateScratchData &data);
+
+ FEValues<dim> fe_midpoint_value;
+ Vector<double> solution;
+ };
+
+ // There is nothing to copy but WorkStream requires a CopyData structure
+ template <int dim>
+ struct EstimateCopyData
+ {
+ EstimateCopyData () {}
+ };
template <int dim>
- static void estimate_interval (const DoFHandler<dim> &dof,
- const Vector<double> &solution,
- const IndexInterval &index_interval,
- Vector<float> &error_per_cell);
+ static void estimate_cell (
+ const SynchronousIterators<std_cxx1x::tuple<typename DoFHandler<dim>::active_cell_iterator,
+ Vector<float>::iterator> > &cell,
+ EstimateScratchData<dim> &scratch_data,
+ const EstimateCopyData<dim> ©_data);
+ // There is nothing to copy but WorkStream required a copy function
+ template <int dim>
+ static void dummy_copy(const EstimateCopyData<dim> ©_data) {}
};
// In the following function, the matrix and right hand side are
// assembled. As stated in the documentation of the main class above, it
// does not do this itself, but rather delegates to the function following
- // next, by splitting up the range of cells into chunks of approximately the
- // same size and assembling on each of these chunks in parallel.
+ // next, utilizing the WorkStream concept discussed in @ref threads .
+ //
+ // If you have looked through the @ref threads module, you will have
+ // seen that assembling in parallel does not take an incredible
+ // amount of extra code as long as you diligently describe what the
+ // scratch and copy data objects are, and if you define suitable
+ // functios for the local assembly and the copy operation from local
+ // contributions to global objects. This done, the following will do
+ // all the heavy lifting to get these operations done on multiple
+ // threads on as many cores as you have in your system:
template <int dim>
void AdvectionProblem<dim>::assemble_system ()
{
- // First, we want to find out how many threads shall assemble the matrix
- // in parallel. A reasonable choice would be that each processor in your
- // system processes one chunk of cells; if we were to use this
- // information, we could use the value of the global variable
- // <code>multithread_info.n_cpus</code>, which is determined at start-up
- // time of your program automatically. (Note that if the library was not
- // configured for multithreading, then the number of CPUs is set to one.)
- // However, sometimes there might be reasons to use another value. For
- // example, you might want to use less processors than there are in your
- // system in order not to use too many computational resources. For
- // this reason, the number of threads can be set by
- // <code>MultithreadInfo::set_thread_limit</code> and the current value
- // is returned by n_threads(). This
- // is also queried by functions inside the library to determine
- // how many threads they shall create.
-
- // It is worth noting, however, that this setup determines the load
- // distribution onto processor in a static way: it does not take into
- // account that some other part of our program may also be running
- // something in parallel at the same time as we get here (this is not the
- // case in the current program, but may easily be the case in more complex
- // applications). A discussion of how to deal with this case can be found
- // in the @ref threads module.
- //
- // Next, we need an object which is capable of keeping track of the
- // threads we created, and allows us to wait until they all have finished
- // (to <code>join</code> them in the language of threads). The
- // Threads::ThreadGroup class does this, which is basically just a
- // container for objects of type Threads::Thread that represent a single
- // thread; Threads::Thread is what the Threads::new_thread function below
- // will return when we start a new thread.
- //
- // Note that both Threads::ThreadGroup and Threads::Thread have a template
- // argument that represents the return type of the function being called
- // on a separate thread. Since most of the functions that we will call on
- // different threads have return type <code>void</code>, the template
- // argument has a default value <code>void</code>, so that in that case it
- // can be omitted. (However, you still need to write the angle brackets,
- // even if they are empty.)
- //
- // If you did not configure for multithreading, then the
- // <code>new_thread</code> function that is supposed to start a new thread
- // in parallel only executes the function which should be run in parallel,
- // waits for it to return (i.e. the function is executed sequentially),
- // and puts the return value into the <code>Thread</code>
- // object. Likewise, the function <code>join</code> that is supposed to
- // wait for all spawned threads to return, returns immediately, as there
- // can't be any threads running.
-
- // Now we have to split the range of cells into chunks of approximately
- // the same size. Each thread will then assemble the local contributions
- // of the cells within its chunk and transfer these contributions to the
- // global matrix. As splitting a range of cells is a rather common task
- // when using multithreading, there is a function in the
- // <code>Threads</code> namespace that does exactly this. In fact, it does
- // this not only for a range of cell iterators, but for iterators in
- // general, so you could use it for <code>std::vector::iterator</code> or
- // usual pointers as well.
- //
- // The function returns a vector of pairs of iterators, where the first
- // denotes the first cell of each chunk, while the second denotes the one
- // past the last (this half-open interval is the usual convention in the
- // C++ standard library, so we keep to it). Note that we have to specify
- // the actual data type of the iterators in angle brackets to the
- // function. This is necessary, since it is a template function which
- // takes the data type of the iterators as template argument; in the
- // present case, however, the data types of the two first parameters
- // differ (<code>begin_active</code> returns an
- // <code>active_iterator</code>, while <code>end</code> returns a
- // <code>raw_iterator</code>), and in this case the C++ language requires
- // us to specify the template type explicitly. For brevity, we first
- // typedef this data type to an alias.
-
- typedef typename DoFHandler<dim>::active_cell_iterator active_cell_iterator;
-
- // Finally, for each of the chunks of iterators we have computed, start
- // one thread (or if not in multithread mode: execute assembly on these
- // chunks sequentially). This is done using the following sequence of
- // function calls:
-
- Assembler::Scratch scratch;
- Assembler::CopyData copy_data;
- WorkStream::run(dof_handler.begin_active(),dof_handler.end(),*this,
- &AdvectionProblem::build_local_system,&AdvectionProblem::copy_local_to_global,
- scratch,copy_data);
-
-
- // The reasons and internal workings of these functions can be found in
- // the report on the subject of multithreading, which is available online
- // as well. Suffice it to say that we create a new thread that calls the
- // <code>assemble_system_interval</code> function on the present object
- // (the <code>this</code> pointer), with the arguments following in the
- // second set of parentheses passed as parameters. The Threads::new_thread
- // function returns an object of type Threads::Thread, which we put into
- // the <code>threads</code> container. If a thread exits, the return value
- // of the function being called is put into a place such that the thread
- // objects can access it using their <code>return_value</code> function;
- // since the function we call doesn't have a return value, this does not
- // apply here. Note that you can copy around thread objects freely, and
- // that of course they will still represent the same thread.
-
- // When all the threads are running, the only thing we have to do is wait
- // for them to finish. This is necessary of course, as we can't proceed
- // with our tasks before the matrix and right hand side are
- // assembled. Waiting for all the threads to finish can be done using the
- // <code>join_all</code> function in the <code>ThreadGroup</code>
- // container, which just calls <code>join</code> on each of the thread
- // objects it stores.
- //
- // Again, if the library was not configured to use multithreading, then
- // no threads can run in parallel and the function returns immediately.
+ WorkStream::run(dof_handler.begin_active(),
+ dof_handler.end(),
+ *this,
+ &AdvectionProblem::local_assemble_system,
+ &AdvectionProblem::copy_local_to_global,
+ AssemblyScratchData(fe),
+ AssemblyCopyData());
// After the matrix has been assembled in parallel, we still have to
// eliminate hanging node constraints. This is something that can't be
// done on each of the threads separately, so we have to do it now.
- hanging_node_constraints.condense (system_matrix);
- hanging_node_constraints.condense (system_rhs);
// Note also, that unlike in previous examples, there are no boundary
// conditions to be applied to the system of equations. This, of course,
// is due to the fact that we have included them into the weak formulation
// of the problem.
+ hanging_node_constraints.condense (system_matrix);
+ hanging_node_constraints.condense (system_rhs);
}
+ // As already mentioned above, we need to have scratch objects for
+ // the parallel computation of local contributions. These objects
+ // contain FEValues and FEFaceValues objects, and so we will need to
+ // have constructors and copy constructors that allow us to create
+ // them. In initializing them, note first that we use bilinear
+ // elements, soGauss formulae with two points in each space
+ // direction are sufficient. For the cell terms we need the values
+ // and gradients of the shape functions, the quadrature points in
+ // order to determine the source density and the advection field at
+ // a given point, and the weights of the quadrature points times the
+ // determinant of the Jacobian at these points. In contrast, for the
+ // boundary integrals, we don't need the gradients, but rather the
+ // normal vectors to the cells. This determines which update flags
+ // we will have to pass to the constructors of the members of the
+ // class:
+ template <int dim>
+ AdvectionProblem<dim>::AssemblyScratchData::
+ AssemblyScratchData (const FiniteElement<dim> &fe)
+ :
+ fe_values (fe,
+ QGauss<dim>(2),
+ update_values | update_gradients |
+ update_quadrature_points | update_JxW_values),
+ fe_face_values (fe,
+ QGauss<dim-1>(2),
+ update_values | update_quadrature_points |
+ update_JxW_values | update_normal_vectors)
+ {}
+
+
+
+ template <int dim>
+ AdvectionProblem<dim>::AssemblyScratchData::
+ AssemblyScratchData (const AssemblyScratchData &scratch_data)
+ :
+ fe_values (scratch_data.fe_values.get_fe(),
+ scratch_data.fe_values.get_quadrature(),
+ update_values | update_gradients |
+ update_quadrature_points | update_JxW_values),
+ fe_face_values (scratch_data.fe_face_values.get_fe(),
+ scratch_data.fe_face_values.get_quadrature(),
+ update_values | update_quadrature_points |
+ update_JxW_values | update_normal_vectors)
+ {}
+
+
+
+
// Now, this is the function that does the actual work. It is not very
// different from the <code>assemble_system</code> functions of previous
// example programs, so we will again only comment on the differences. The
// mathematical stuff follows closely what we have said in the introduction.
+ //
+ // There are a number of points worth mentioning here, though. The
+ // first one is that we have moved the FEValues and FEFaceValues
+ // objects into the ScratchData object. We have done so because the
+ // alternative would have been to simply create one every time we
+ // get into this function -- i.e., on every cell. It now turns out
+ // that the FEValues classes were written with the explicit goal of
+ // moving everything that remains the same from cell to cell into
+ // the construction of the object, and only do as little work as
+ // possible in FEValues::reinit() whenever we move to a new
+ // cell. What this means is that it would be very expensive to
+ // create a new object of this kind in this function as we would
+ // have to do it for every cell -- exactly the thing we wanted to
+ // avoid with the FEValues class. Instead, what we do is create it
+ // only once (or a small number of times) in the scratch objects and
+ // then re-use it as often as we can.
+ //
+ // This begs the question of whether there are other objects we
+ // create in this function whose creation is expensive compared to
+ // its use. Indeed, at the top of the function, we declare all sorts
+ // of objects. The <code>AdvectionField</code>,
+ // <code>RightHandSide</code> and <code>BoundaryValues</code> do not
+ // cost much to create, so there is no harm here. However,
+ // allocating memory in creating the <code>rhs_values</code> and
+ // similar variables below typically costs a significant amount of
+ // time, compared to just accessing the (temporary) values we store
+ // in them. Consequently, these would be candidates for moving into
+ // the <code>AssemblyScratchData</code> class. We will leave this as
+ // an exercise.
template <int dim>
void
AdvectionProblem<dim>::
- build_local_system (typename DoFHandler<dim>::active_cell_iterator const &cell,
- Assembler::Scratch &scratch,
- Assembler::CopyData ©_data)
+ local_assemble_system (const typename DoFHandler<dim>::active_cell_iterator &cell,
+ AssemblyScratchData &scratch_data,
+ AssemblyCopyData ©_data)
{
// First of all, we will need some objects that describe boundary values,
// right hand side function and the advection field. As we will only
const RightHandSide<dim> right_hand_side;
const BoundaryValues<dim> boundary_values;
- // Next we need quadrature formula for the cell terms, but also for the
- // integral over the inflow boundary, which will be a face integral. As we
- // use bilinear elements, Gauss formulae with two points in each space
- // direction are sufficient.
- QGauss<dim> quadrature_formula(2);
- QGauss<dim-1> face_quadrature_formula(2);
-
- // Finally, we need objects of type <code>FEValues</code> and
- // <code>FEFaceValues</code>. For the cell terms we need the values and
- // gradients of the shape functions, the quadrature points in order to
- // determine the source density and the advection field at a given point,
- // and the weights of the quadrature points times the determinant of the
- // Jacobian at these points. In contrast, for the boundary integrals, we
- // don't need the gradients, but rather the normal vectors to the cells.
- FEValues<dim> fe_values (fe, quadrature_formula,
- update_values | update_gradients |
- update_quadrature_points | update_JxW_values);
- FEFaceValues<dim> fe_face_values (fe, face_quadrature_formula,
- update_values | update_quadrature_points |
- update_JxW_values | update_normal_vectors);
-
// Then we define some abbreviations to avoid unnecessarily long lines:
- copy_data.dofs_per_cell = fe.dofs_per_cell;
- const unsigned int n_q_points = quadrature_formula.size();
- const unsigned int n_face_q_points = face_quadrature_formula.size();
+ const unsigned int dofs_per_cell = fe.dofs_per_cell;
+ const unsigned int n_q_points = scratch_data.fe_values.get_quadrature().size();
+ const unsigned int n_face_q_points = scratch_data.fe_face_values.get_quadrature().size();
// We declare cell matrix and cell right hand side...
- copy_data.cell_matrix = FullMatrix<double> (copy_data.dofs_per_cell, copy_data.dofs_per_cell);
- copy_data.cell_rhs = Vector<double> (copy_data.dofs_per_cell);
+ copy_data.cell_matrix.reinit (dofs_per_cell, dofs_per_cell);
+ copy_data.cell_rhs.reinit (dofs_per_cell);
// ... an array to hold the global indices of the degrees of freedom of
// the cell on which we are presently working...
- copy_data.local_dof_indices.resize(copy_data.dofs_per_cell);
+ copy_data.local_dof_indices.resize(dofs_per_cell);
// ... and array in which the values of right hand side, advection
// direction, and boundary values will be stored, for cell and face
// ... then initialize the <code>FEValues</code> object...
- fe_values.reinit (cell);
+ scratch_data.fe_values.reinit (cell);
// ... obtain the values of right hand side and advection directions
// at the quadrature points...
- advection_field.value_list (fe_values.get_quadrature_points(),
+ advection_field.value_list (scratch_data.fe_values.get_quadrature_points(),
advection_directions);
- right_hand_side.value_list (fe_values.get_quadrature_points(),
+ right_hand_side.value_list (scratch_data.fe_values.get_quadrature_points(),
rhs_values);
// ... set the value of the streamline diffusion parameter as
// ... and assemble the local contributions to the system matrix and
// right hand side as also discussed above:
for (unsigned int q_point=0; q_point<n_q_points; ++q_point)
- for (unsigned int i=0; i<copy_data.dofs_per_cell; ++i)
+ for (unsigned int i=0; i<dofs_per_cell; ++i)
{
- for (unsigned int j=0; j<copy_data.dofs_per_cell; ++j)
+ for (unsigned int j=0; j<dofs_per_cell; ++j)
copy_data.cell_matrix(i,j) += ((advection_directions[q_point] *
- fe_values.shape_grad(j,q_point) *
- (fe_values.shape_value(i,q_point) +
+ scratch_data.fe_values.shape_grad(j,q_point) *
+ (scratch_data.fe_values.shape_value(i,q_point) +
delta *
(advection_directions[q_point] *
- fe_values.shape_grad(i,q_point)))) *
- fe_values.JxW(q_point));
+ scratch_data.fe_values.shape_grad(i,q_point)))) *
+ scratch_data.fe_values.JxW(q_point));
- copy_data.cell_rhs(i) += ((fe_values.shape_value(i,q_point) +
+ copy_data.cell_rhs(i) += ((scratch_data.fe_values.shape_value(i,q_point) +
delta *
(advection_directions[q_point] *
- fe_values.shape_grad(i,q_point)) ) *
+ scratch_data.fe_values.shape_grad(i,q_point)) ) *
rhs_values[q_point] *
- fe_values.JxW (q_point));
- };
+ scratch_data.fe_values.JxW (q_point));
+ }
- // Besides the cell terms which we have build up now, the bilinear
+ // Besides the cell terms which we have built up now, the bilinear
// form of the present problem also contains terms on the boundary of
// the domain. Therefore, we have to check whether any of the faces of
// this cell are on the boundary of the domain, and if so assemble the
// domain. Just as for the usual FEValues object which we have
// used in previous examples and also above, we have to
// reinitialize the FEFaceValues object for the present face:
- fe_face_values.reinit (cell, face);
+ scratch_data.fe_face_values.reinit (cell, face);
// For the quadrature points at hand, we ask for the values of
// the inflow function and for the direction of flow:
- boundary_values.value_list (fe_face_values.get_quadrature_points(),
+ boundary_values.value_list (scratch_data.fe_face_values.get_quadrature_points(),
face_boundary_values);
- advection_field.value_list (fe_face_values.get_quadrature_points(),
+ advection_field.value_list (scratch_data.fe_face_values.get_quadrature_points(),
face_advection_directions);
// Now loop over all quadrature points and see whether it is on
// so if the advection direction points into the domain, its
// scalar product with the normal vector must be negative):
for (unsigned int q_point=0; q_point<n_face_q_points; ++q_point)
- if (fe_face_values.normal_vector(q_point) *
+ if (scratch_data.fe_face_values.normal_vector(q_point) *
face_advection_directions[q_point]
< 0)
// If the is part of the inflow boundary, then compute the
// hand side, using the values obtained from the
// FEFaceValues object and the formulae discussed in the
// introduction:
- for (unsigned int i=0; i<copy_data.dofs_per_cell; ++i)
+ for (unsigned int i=0; i<dofs_per_cell; ++i)
{
- for (unsigned int j=0; j<copy_data.dofs_per_cell; ++j)
+ for (unsigned int j=0; j<dofs_per_cell; ++j)
copy_data.cell_matrix(i,j) -= (face_advection_directions[q_point] *
- fe_face_values.normal_vector(q_point) *
- fe_face_values.shape_value(i,q_point) *
- fe_face_values.shape_value(j,q_point) *
- fe_face_values.JxW(q_point));
+ scratch_data.fe_face_values.normal_vector(q_point) *
+ scratch_data.fe_face_values.shape_value(i,q_point) *
+ scratch_data.fe_face_values.shape_value(j,q_point) *
+ scratch_data.fe_face_values.JxW(q_point));
copy_data.cell_rhs(i) -= (face_advection_directions[q_point] *
- fe_face_values.normal_vector(q_point) *
+ scratch_data.fe_face_values.normal_vector(q_point) *
face_boundary_values[q_point] *
- fe_face_values.shape_value(i,q_point) *
- fe_face_values.JxW(q_point));
- };
- };
+ scratch_data.fe_face_values.shape_value(i,q_point) *
+ scratch_data.fe_face_values.JxW(q_point));
+ }
+ }
// Now go on by transferring the local contributions to the system of
}
-
+
+ // The second function we needed to write was the one that copies
+ // the local contributions the previous function has computed and
+ // put into the copy data object, into the global matrix and right
+ // hand side vector objects. This is essentially what we always had
+ // as the last block of code when assembling something on every
+ // cell. The following should therefore be pretty obvious:
template <int dim>
void
- AdvectionProblem<dim>::copy_local_to_global (Assembler::CopyData const ©_data)
+ AdvectionProblem<dim>::copy_local_to_global (const AssemblyCopyData ©_data)
{
+ for (unsigned int i=0; i<copy_data.local_dof_indices.size(); ++i)
+ {
+ for (unsigned int j=0; j<copy_data.local_dof_indices.size(); ++j)
+ system_matrix.add (copy_data.local_dof_indices[i],
+ copy_data.local_dof_indices[j],
+ copy_data.cell_matrix(i,j));
- // Up until now we have not taken care of the fact that this function
- // might run more than once in parallel, as the operations above only
- // work on variables that are local to this function, or if they are
- // global (such as the information on the grid, the DoF handler, or
- // the DoF numbers) they are only read. Thus, the different threads do
- // not disturb each other.
- //
- // On the other hand, we would now like to write the local
- // contributions to the global system of equations into the global
- // objects. This needs some kind of synchronization, as if we would
- // not take care of the fact that multiple threads write into the
- // matrix at the same time, we might be surprised that one threads
- // reads data from the matrix that another thread is presently
- // overwriting, or similar things. Thus, to make sure that only one
- // thread operates on these objects at a time, we have to lock
- // it. This is done using a <code>Mutex</code>, which is short for
- // <code>mutually exclusive</code>: a thread that wants to write to
- // the global objects acquires this lock, but has to wait if it is
- // presently owned by another thread. If it has acquired the lock, it
- // can be sure that no other thread is presently writing to the
- // matrix, and can do so freely. When finished, we release the lock
- // again so as to allow other threads to acquire it and write to the
- // matrix.
- for (unsigned int i=0; i<copy_data.dofs_per_cell; ++i)
- {
- for (unsigned int j=0; j<copy_data.dofs_per_cell; ++j)
- system_matrix.add (copy_data.local_dof_indices[i],
- copy_data.local_dof_indices[j],
- copy_data.cell_matrix(i,j));
-
- system_rhs(copy_data.local_dof_indices[i]) += copy_data.cell_rhs(i);
- };
- // At this point, the locked operations on the global matrix are done,
- // i.e. other threads can now enter into the protected section by
- // acquiring the lock. Two final notes are in place here, however:
- //
- // 1. If the library was not configured for multithreading, then
- // there can't be parallel threads and there is no need to
- // synchronize. Thus, the <code>lock</code> and <code>release</code>
- // functions are no-ops, i.e. they return without doing anything.
- //
- // 2. In order to work properly, it is essential that all threads try
- // to acquire the same lock. This, of course, can not be achieved if
- // the lock is a local variable, as then each thread would acquire its
- // own lock. Therefore, the lock variable is a member variable of the
- // class; since all threads execute member functions of the same
- // object, they have the same <code>this</code> pointer and therefore
- // also operate on the same <code>lock</code>.
+ system_rhs(copy_data.local_dof_indices[i]) += copy_data.cell_rhs(i);
+ }
}
-
+
else
{
refine_grid ();
- };
+ }
std::cout << " Number of active cells: "
assemble_system ();
solve ();
output_results (cycle);
- };
+ }
DataOut<dim> data_out;
data_out.attach_dof_handler (dof_handler);
// @sect3{GradientEstimation class implementation}
+ // ScratchData used by estimate_cell
+ template <int dim>
+ GradientEstimation::EstimateScratchData<dim>
+ ::EstimateScratchData (const FiniteElement<dim> &fe,
+ const Vector<double> &solution)
+ :
+ fe_midpoint_value(fe,
+ QMidpoint<dim> (),
+ update_values | update_quadrature_points),
+ solution(solution)
+ {}
+
+
+
+ // ScratchData used by estimate_cell
+ template <int dim>
+ GradientEstimation::EstimateScratchData<dim>
+ ::EstimateScratchData(const EstimateScratchData &scratch_data)
+ :
+ fe_midpoint_value(scratch_data.fe_midpoint_value.get_fe(),
+ scratch_data.fe_midpoint_value.get_quadrature(),
+ update_values | update_quadrature_points),
+ solution(scratch_data.solution)
+ {}
+
+
// Now for the implementation of the <code>GradientEstimation</code>
// class. The first function does not much except for delegating work to the
// other function:
ExcInvalidVectorLength (error_per_cell.size(),
dof_handler.get_tria().n_active_cells()));
- // Next, we subdivide the range of cells into chunks of equal size. Just
- // as we have used the function <code>Threads::split_range</code> when
- // assembling above, there is a function that computes intervals of
- // roughly equal size from a larger interval. This is used here:
- const unsigned int n_threads = multithread_info.n_threads();
- std::vector<IndexInterval> index_intervals
- = Threads::split_interval (0, dof_handler.get_tria().n_active_cells(),
- n_threads);
-
// In the same way as before, we use a <code>Threads::ThreadGroup</code>
// object to collect the descriptor objects of different threads. Note
// that as the function called is not a member function, but rather a
// or the other compiler, but have to take a temporary variable for that
// purpose. Here, in this case, Compaq's <code>cxx</code> compiler choked
// on the code so we use this workaround with the function pointer:
- Threads::ThreadGroup<> threads;
- void (*estimate_interval_ptr) (const DoFHandler<dim> &,
- const Vector<double> &,
- const IndexInterval &,
- Vector<float> &)
- = &GradientEstimation::template estimate_interval<dim>;
- for (unsigned int i=0; i<n_threads; ++i)
- threads += Threads::new_thread (estimate_interval_ptr,
- dof_handler, solution,
- index_intervals[i],
- error_per_cell);
- // Ok, now the threads are at work, and we only have to wait for them to
- // finish their work:
- threads.join_all ();
+ void (*estimate_cell_ptr) (const SynchronousIterators<std_cxx1x::tuple<
+ typename DoFHandler<dim>::active_cell_iterator,Vector<float>::iterator> > &cell,
+ EstimateScratchData<dim> &scratch_data,
+ const EstimateCopyData<dim> ©_data)
+ = &GradientEstimation::template estimate_cell<dim>;
+
+ void (*dummy_copy) (const EstimateCopyData<dim> ©_data)
+ = &GradientEstimation::template dummy_copy<dim>;
+
+ typedef std_cxx1x::tuple<typename DoFHandler<dim>::active_cell_iterator,Vector<float>::iterator>
+ Iterators;
+ SynchronousIterators<Iterators> begin_sync_it(Iterators(dof_handler.begin_active(),
+ error_per_cell.begin()));
+ SynchronousIterators<Iterators> end_sync_it(Iterators(dof_handler.end(),error_per_cell.end()));
+
+ WorkStream::run(begin_sync_it,end_sync_it,
+ estimate_cell_ptr,
+ dummy_copy,
+ EstimateScratchData<dim> (dof_handler.get_fe(),solution),
+ EstimateCopyData<dim> ());
+
// Note that if the value of the variable
// <code>multithread_info.n_threads()</code> was one, or if the
// library was not configured to use threads, then the sequence of
// Now for the details:
template <int dim>
void
- GradientEstimation::estimate_interval (const DoFHandler<dim> &dof_handler,
- const Vector<double> &solution,
- const IndexInterval &index_interval,
- Vector<float> &error_per_cell)
- {
- // First we need a way to extract the values of the given finite element
- // function at the center of the cells. As usual with values of finite
- // element functions, we use an object of type <code>FEValues</code>, and
- // we use (or mis-use in this case) the midpoint quadrature rule to get at
- // the values at the center. Note that the <code>FEValues</code> object
- // only needs to compute the values at the centers, and the location of
- // the quadrature points in real space in order to get at the vectors
- // <code>y</code>.
- QMidpoint<dim> midpoint_rule;
- FEValues<dim> fe_midpoint_value (dof_handler.get_fe(),
- midpoint_rule,
- update_values | update_quadrature_points);
-
- // Then we need space foe the tensor <code>Y</code>, which is the sum of
+ GradientEstimation::estimate_cell (const SynchronousIterators<std_cxx1x::tuple<
+ typename DoFHandler<dim>::active_cell_iterator,Vector<float>::iterator> > &cell,
+ EstimateScratchData<dim> &scratch_data,
+ const EstimateCopyData<dim> ©_data)
+ {
+ // We need space for the tensor <code>Y</code>, which is the sum of
// outer products of the y-vectors.
Tensor<2,dim> Y;
- // Then define iterators into the cells and into the output vector, which
- // are to be looped over by the present instance of this function. We get
- // start and end iterators over cells by setting them to the first active
- // cell and advancing them using the given start and end index. Note that
- // we can use the <code>advance</code> function of the standard C++
- // library, but that we have to cast the distance by which the iterator is
- // to be moved forward to a signed quantity in order to avoid warnings by
- // the compiler.
- typename DoFHandler<dim>::active_cell_iterator cell, endc;
-
- cell = dof_handler.begin_active();
- advance (cell, static_cast<signed int>(index_interval.first));
-
- endc = dof_handler.begin_active();
- advance (endc, static_cast<signed int>(index_interval.second));
-
- // Getting an iterator into the output array is simpler. We don't need an
- // end iterator, as we always move this iterator forward by one element
- // for each cell we are on, but stop the loop when we hit the end cell, so
- // we need not have an end element for this iterator.
- Vector<float>::iterator
- error_on_this_cell = error_per_cell.begin() + index_interval.first;
-
// Then we allocate a vector to hold iterators to all active neighbors of
// a cell. We reserve the maximal number of active neighbors in order to
active_neighbors.reserve (GeometryInfo<dim>::faces_per_cell *
GeometryInfo<dim>::max_children_per_face);
- // Well then, after all these preliminaries, lets start the computations:
- for (; cell!=endc; ++cell, ++error_on_this_cell)
- {
- // First initialize the <code>FEValues</code> object, as well as the
- // <code>Y</code> tensor:
- fe_midpoint_value.reinit (cell);
- Y.clear ();
-
- // Then allocate the vector that will be the sum over the y-vectors
- // times the approximate directional derivative:
- Tensor<1,dim> projected_gradient;
-
-
- // Now before going on first compute a list of all active neighbors of
- // the present cell. We do so by first looping over all faces and see
- // whether the neighbor there is active, which would be the case if it
- // is on the same level as the present cell or one level coarser (note
- // that a neighbor can only be once coarser than the present cell, as
- // we only allow a maximal difference of one refinement over a face in
- // deal.II). Alternatively, the neighbor could be on the same level
- // and be further refined; then we have to find which of its children
- // are next to the present cell and select these (note that if a child
- // of of neighbor of an active cell that is next to this active cell,
- // needs necessarily be active itself, due to the one-refinement rule
- // cited above).
- //
- // Things are slightly different in one space dimension, as there the
- // one-refinement rule does not exist: neighboring active cells may
- // differ in as many refinement levels as they like. In this case, the
- // computation becomes a little more difficult, but we will explain
- // this below.
- //
- // Before starting the loop over all neighbors of the present cell, we
- // have to clear the array storing the iterators to the active
- // neighbors, of course.
- active_neighbors.clear ();
- for (unsigned int face_no=0; face_no<GeometryInfo<dim>::faces_per_cell; ++face_no)
- if (! cell->at_boundary(face_no))
+ typename DoFHandler<dim>::active_cell_iterator cell_it(std_cxx1x::get<0>(cell.iterators));
+
+ // First initialize the <code>FEValues</code> object, as well as the
+ // <code>Y</code> tensor:
+ scratch_data.fe_midpoint_value.reinit (cell_it);
+
+ // Then allocate the vector that will be the sum over the y-vectors
+ // times the approximate directional derivative:
+ Tensor<1,dim> projected_gradient;
+
+
+ // Now before going on first compute a list of all active neighbors of
+ // the present cell. We do so by first looping over all faces and see
+ // whether the neighbor there is active, which would be the case if it
+ // is on the same level as the present cell or one level coarser (note
+ // that a neighbor can only be once coarser than the present cell, as
+ // we only allow a maximal difference of one refinement over a face in
+ // deal.II). Alternatively, the neighbor could be on the same level
+ // and be further refined; then we have to find which of its children
+ // are next to the present cell and select these (note that if a child
+ // of of neighbor of an active cell that is next to this active cell,
+ // needs necessarily be active itself, due to the one-refinement rule
+ // cited above).
+ //
+ // Things are slightly different in one space dimension, as there the
+ // one-refinement rule does not exist: neighboring active cells may
+ // differ in as many refinement levels as they like. In this case, the
+ // computation becomes a little more difficult, but we will explain
+ // this below.
+ //
+ // Before starting the loop over all neighbors of the present cell, we
+ // have to clear the array storing the iterators to the active
+ // neighbors, of course.
+ active_neighbors.clear ();
+ for (unsigned int face_no=0; face_no<GeometryInfo<dim>::faces_per_cell; ++face_no)
+ if (! std_cxx1x::get<0>(cell.iterators)->at_boundary(face_no))
+ {
+ // First define an abbreviation for the iterator to the face and
+ // the neighbor
+ const typename DoFHandler<dim>::face_iterator
+ face = std_cxx1x::get<0>(cell.iterators)->face(face_no);
+ const typename DoFHandler<dim>::cell_iterator
+ neighbor = std_cxx1x::get<0>(cell.iterators)->neighbor(face_no);
+
+ // Then check whether the neighbor is active. If it is, then it
+ // is on the same level or one level coarser (if we are not in
+ // 1D), and we are interested in it in any case.
+ if (neighbor->active())
+ active_neighbors.push_back (neighbor);
+ else
{
- // First define an abbreviation for the iterator to the face and
- // the neighbor
- const typename DoFHandler<dim>::face_iterator
- face = cell->face(face_no);
- const typename DoFHandler<dim>::cell_iterator
- neighbor = cell->neighbor(face_no);
-
- // Then check whether the neighbor is active. If it is, then it
- // is on the same level or one level coarser (if we are not in
- // 1D), and we are interested in it in any case.
- if (neighbor->active())
- active_neighbors.push_back (neighbor);
- else
+ // If the neighbor is not active, then check its children.
+ if (dim == 1)
{
- // If the neighbor is not active, then check its children.
- if (dim == 1)
- {
- // To find the child of the neighbor which bounds to the
- // present cell, successively go to its right child if
- // we are left of the present cell (n==0), or go to the
- // left child if we are on the right (n==1), until we
- // find an active cell.
- typename DoFHandler<dim>::cell_iterator
- neighbor_child = neighbor;
- while (neighbor_child->has_children())
- neighbor_child = neighbor_child->child (face_no==0 ? 1 : 0);
-
- // As this used some non-trivial geometrical intuition,
- // we might want to check whether we did it right,
- // i.e. check whether the neighbor of the cell we found
- // is indeed the cell we are presently working
- // on. Checks like this are often useful and have
- // frequently uncovered errors both in algorithms like
- // the line above (where it is simple to involuntarily
- // exchange <code>n==1</code> for <code>n==0</code> or
- // the like) and in the library (the assumptions
- // underlying the algorithm above could either be wrong,
- // wrongly documented, or are violated due to an error
- // in the library). One could in principle remove such
- // checks after the program works for some time, but it
- // might be a good things to leave it in anyway to check
- // for changes in the library or in the algorithm above.
- //
- // Note that if this check fails, then this is certainly
- // an error that is irrecoverable and probably qualifies
- // as an internal error. We therefore use a predefined
- // exception class to throw here.
- Assert (neighbor_child->neighbor(face_no==0 ? 1 : 0)==cell,
- ExcInternalError());
-
- // If the check succeeded, we push the active neighbor
- // we just found to the stack we keep:
- active_neighbors.push_back (neighbor_child);
- }
- else
- // If we are not in 1d, we collect all neighbor children
- // `behind' the subfaces of the current face
- for (unsigned int subface_no=0; subface_no<face->n_children(); ++subface_no)
- active_neighbors.push_back (
- cell->neighbor_child_on_subface(face_no, subface_no));
- };
- };
-
- // OK, now that we have all the neighbors, lets start the computation
- // on each of them. First we do some preliminaries: find out about the
- // center of the present cell and the solution at this point. The
- // latter is obtained as a vector of function values at the quadrature
- // points, of which there are only one, of course. Likewise, the
- // position of the center is the position of the first (and only)
- // quadrature point in real space.
- const Point<dim> this_center = fe_midpoint_value.quadrature_point(0);
-
- std::vector<double> this_midpoint_value(1);
- fe_midpoint_value.get_function_values (solution, this_midpoint_value);
-
-
- // Now loop over all active neighbors and collect the data we
- // need. Allocate a vector just like <code>this_midpoint_value</code>
- // which we will use to store the value of the solution in the
- // midpoint of the neighbor cell. We allocate it here already, since
- // that way we don't have to allocate memory repeatedly in each
- // iteration of this inner loop (memory allocation is a rather
- // expensive operation):
- std::vector<double> neighbor_midpoint_value(1);
- typename std::vector<typename DoFHandler<dim>::active_cell_iterator>::const_iterator
- neighbor_ptr = active_neighbors.begin();
- for (; neighbor_ptr!=active_neighbors.end(); ++neighbor_ptr)
- {
- // First define an abbreviation for the iterator to the active
- // neighbor cell:
- const typename DoFHandler<dim>::active_cell_iterator
- neighbor = *neighbor_ptr;
-
- // Then get the center of the neighbor cell and the value of the
- // finite element function thereon. Note that for this information
- // we have to reinitialize the <code>FEValues</code> object for
- // the neighbor cell.
- fe_midpoint_value.reinit (neighbor);
- const Point<dim> neighbor_center = fe_midpoint_value.quadrature_point(0);
-
- fe_midpoint_value.get_function_values (solution,
- neighbor_midpoint_value);
-
- // Compute the vector <code>y</code> connecting the centers of the
- // two cells. Note that as opposed to the introduction, we denote
- // by <code>y</code> the normalized difference vector, as this is
- // the quantity used everywhere in the computations.
- Point<dim> y = neighbor_center - this_center;
- const double distance = std::sqrt(y.square());
- y /= distance;
-
- // Then add up the contribution of this cell to the Y matrix...
- for (unsigned int i=0; i<dim; ++i)
- for (unsigned int j=0; j<dim; ++j)
- Y[i][j] += y[i] * y[j];
-
- // ... and update the sum of difference quotients:
- projected_gradient += (neighbor_midpoint_value[0] -
- this_midpoint_value[0]) /
- distance *
- y;
- };
-
- // If now, after collecting all the information from the neighbors, we
- // can determine an approximation of the gradient for the present
- // cell, then we need to have passed over vectors <code>y</code> which
- // span the whole space, otherwise we would not have all components of
- // the gradient. This is indicated by the invertibility of the matrix.
- //
- // If the matrix should not be invertible, this means that the present
- // cell had an insufficient number of active neighbors. In contrast to
- // all previous cases, where we raised exceptions, this is, however,
- // not a programming error: it is a runtime error that can happen in
- // optimized mode even if it ran well in debug mode, so it is
- // reasonable to try to catch this error also in optimized mode. For
- // this case, there is the <code>AssertThrow</code> macro: it checks
- // the condition like the <code>Assert</code> macro, but not only in
- // debug mode; it then outputs an error message, but instead of
- // terminating the program as in the case of the <code>Assert</code>
- // macro, the exception is thrown using the <code>throw</code> command
- // of C++. This way, one has the possibility to catch this error and
- // take reasonable counter actions. One such measure would be to
- // refine the grid globally, as the case of insufficient directions
- // can not occur if every cell of the initial grid has been refined at
- // least once.
- AssertThrow (determinant(Y) != 0,
- ExcInsufficientDirections());
-
- // If, on the other hand the matrix is invertible, then invert it,
- // multiply the other quantity with it and compute the estimated error
- // using this quantity and the right powers of the mesh width:
- const Tensor<2,dim> Y_inverse = invert(Y);
-
- Point<dim> gradient;
- contract (gradient, Y_inverse, projected_gradient);
-
- *error_on_this_cell = (std::pow(cell->diameter(),
- 1+1.0*dim/2) *
- std::sqrt(gradient.square()));
- };
+ // To find the child of the neighbor which bounds to the
+ // present cell, successively go to its right child if
+ // we are left of the present cell (n==0), or go to the
+ // left child if we are on the right (n==1), until we
+ // find an active cell.
+ typename DoFHandler<dim>::cell_iterator
+ neighbor_child = neighbor;
+ while (neighbor_child->has_children())
+ neighbor_child = neighbor_child->child (face_no==0 ? 1 : 0);
+
+ // As this used some non-trivial geometrical intuition,
+ // we might want to check whether we did it right,
+ // i.e. check whether the neighbor of the cell we found
+ // is indeed the cell we are presently working
+ // on. Checks like this are often useful and have
+ // frequently uncovered errors both in algorithms like
+ // the line above (where it is simple to involuntarily
+ // exchange <code>n==1</code> for <code>n==0</code> or
+ // the like) and in the library (the assumptions
+ // underlying the algorithm above could either be wrong,
+ // wrongly documented, or are violated due to an error
+ // in the library). One could in principle remove such
+ // checks after the program works for some time, but it
+ // might be a good things to leave it in anyway to check
+ // for changes in the library or in the algorithm above.
+ //
+ // Note that if this check fails, then this is certainly
+ // an error that is irrecoverable and probably qualifies
+ // as an internal error. We therefore use a predefined
+ // exception class to throw here.
+ Assert (neighbor_child->neighbor(face_no==0 ? 1 : 0)
+ ==std_cxx1x::get<0>(cell.iterators),ExcInternalError());
+
+ // If the check succeeded, we push the active neighbor
+ // we just found to the stack we keep:
+ active_neighbors.push_back (neighbor_child);
+ }
+ else
+ // If we are not in 1d, we collect all neighbor children
+ // `behind' the subfaces of the current face
+ for (unsigned int subface_no=0; subface_no<face->n_children(); ++subface_no)
+ active_neighbors.push_back (
+ std_cxx1x::get<0>(cell.iterators)->neighbor_child_on_subface(face_no,subface_no));
+ }
+ }
+
+ // OK, now that we have all the neighbors, lets start the computation
+ // on each of them. First we do some preliminaries: find out about the
+ // center of the present cell and the solution at this point. The
+ // latter is obtained as a vector of function values at the quadrature
+ // points, of which there are only one, of course. Likewise, the
+ // position of the center is the position of the first (and only)
+ // quadrature point in real space.
+ const Point<dim> this_center = scratch_data.fe_midpoint_value.quadrature_point(0);
+
+ std::vector<double> this_midpoint_value(1);
+ scratch_data.fe_midpoint_value.get_function_values (scratch_data.solution, this_midpoint_value);
+
+
+ // Now loop over all active neighbors and collect the data we
+ // need. Allocate a vector just like <code>this_midpoint_value</code>
+ // which we will use to store the value of the solution in the
+ // midpoint of the neighbor cell. We allocate it here already, since
+ // that way we don't have to allocate memory repeatedly in each
+ // iteration of this inner loop (memory allocation is a rather
+ // expensive operation):
+ std::vector<double> neighbor_midpoint_value(1);
+ typename std::vector<typename DoFHandler<dim>::active_cell_iterator>::const_iterator
+ neighbor_ptr = active_neighbors.begin();
+ for (; neighbor_ptr!=active_neighbors.end(); ++neighbor_ptr)
+ {
+ // First define an abbreviation for the iterator to the active
+ // neighbor cell:
+ const typename DoFHandler<dim>::active_cell_iterator
+ neighbor = *neighbor_ptr;
+
+ // Then get the center of the neighbor cell and the value of the
+ // finite element function thereon. Note that for this information
+ // we have to reinitialize the <code>FEValues</code> object for
+ // the neighbor cell.
+ scratch_data.fe_midpoint_value.reinit (neighbor);
+ const Point<dim> neighbor_center = scratch_data.fe_midpoint_value.quadrature_point(0);
+
+ scratch_data.fe_midpoint_value.get_function_values (scratch_data.solution,
+ neighbor_midpoint_value);
+
+ // Compute the vector <code>y</code> connecting the centers of the
+ // two cells. Note that as opposed to the introduction, we denote
+ // by <code>y</code> the normalized difference vector, as this is
+ // the quantity used everywhere in the computations.
+ Point<dim> y = neighbor_center - this_center;
+ const double distance = std::sqrt(y.square());
+ y /= distance;
+
+ // Then add up the contribution of this cell to the Y matrix...
+ for (unsigned int i=0; i<dim; ++i)
+ for (unsigned int j=0; j<dim; ++j)
+ Y[i][j] += y[i] * y[j];
+
+ // ... and update the sum of difference quotients:
+ projected_gradient += (neighbor_midpoint_value[0] -
+ this_midpoint_value[0]) /
+ distance *
+ y;
+ }
+
+ // If now, after collecting all the information from the neighbors, we
+ // can determine an approximation of the gradient for the present
+ // cell, then we need to have passed over vectors <code>y</code> which
+ // span the whole space, otherwise we would not have all components of
+ // the gradient. This is indicated by the invertibility of the matrix.
+ //
+ // If the matrix should not be invertible, this means that the present
+ // cell had an insufficient number of active neighbors. In contrast to
+ // all previous cases, where we raised exceptions, this is, however,
+ // not a programming error: it is a runtime error that can happen in
+ // optimized mode even if it ran well in debug mode, so it is
+ // reasonable to try to catch this error also in optimized mode. For
+ // this case, there is the <code>AssertThrow</code> macro: it checks
+ // the condition like the <code>Assert</code> macro, but not only in
+ // debug mode; it then outputs an error message, but instead of
+ // terminating the program as in the case of the <code>Assert</code>
+ // macro, the exception is thrown using the <code>throw</code> command
+ // of C++. This way, one has the possibility to catch this error and
+ // take reasonable counter actions. One such measure would be to
+ // refine the grid globally, as the case of insufficient directions
+ // can not occur if every cell of the initial grid has been refined at
+ // least once.
+ AssertThrow (determinant(Y) != 0,
+ ExcInsufficientDirections());
+
+ // If, on the other hand the matrix is invertible, then invert it,
+ // multiply the other quantity with it and compute the estimated error
+ // using this quantity and the right powers of the mesh width:
+ const Tensor<2,dim> Y_inverse = invert(Y);
+
+ Point<dim> gradient;
+ contract (gradient, Y_inverse, projected_gradient);
+
+ *(std_cxx1x::get<1>(cell.iterators)) = (std::pow(std_cxx1x::get<0>(cell.iterators)->diameter(),
+ 1+1.0*dim/2) *
+ std::sqrt(gradient.square()));
+
}
}
<< "----------------------------------------------------"
<< std::endl;
return 1;
- };
+ }
return 0;
}
/**
* Set the file name and line of where the exception appeared as
* well as the violated condition and the name of the exception as
- * a char pointer.
+ * a char pointer. This function also populates the stacktrace.
*/
void set_fields (const char *file,
const int line,
* variable. Zero if the system does not support stack traces.
*/
int n_stacktrace_frames;
+
+private:
+ /**
+ * Internal function that generates the c_string that gets printed by
+ * exception::what(). Called by the ExceptionBase constructor and
+ * set_fields.
+ */
+ void generate_message();
};
* This class is used instead of a true lock class when not using
* multithreading. It allows to write programs such that they start new
* threads and/or lock objects in multithreading mode, and use dummy thread
- * management and synchronisation classes instead when running in
+ * management and synchronization classes instead when running in
* single-thread mode. Specifically, the <tt>new_thread</tt> functions only
* call the function but wait for it to return instead of running in on
* another thread, and the mutices do nothing really. The only reason to
* waiting for some other part of the program to reach a certain point
* of execution), the constructor of this class throws an exception if
* the <tt>count</tt> argument denoting the number of threads that need to
- * be synchronised is not equal to one.
+ * be synchronized is not equal to one.
*
* @author Wolfgang Bangerth, 2001
*/
* function based on the template
* arguments, and whether the
* second argument is a const or
- * non-const class, dependening on
+ * non-const class, depending on
* which the member function will
* also me const or
* non-const. There are
* by having the descriptor keep
* a pointer to itself and reset
* it to zero once it is done --
- * effectly keeping the use
+ * effectively keeping the use
* pointer above zero as long as
* work is going on.
*
* having the descriptor keep a
* pointer to itself and reset
* it to zero once it is done
- * -- effectly keeping the use
+ * -- effectively keeping the use
* pointer above zero as long
* as work is going on.
*/
/**
- * A class whose main template function supports running multiple
+ * A namespace whose main template function supports running multiple
* threads each of which operates on a subset of the given range of
* objects. The class uses the Intel Threading Building Blocks (TBB)
* to load balance the individual subranges onto the available
* threads. For a lengthy discussion of the rationale of this class,
* see the @ref threads "Parallel computing with multiple processors"
- * module.
+ * module. It is used in the tutorial first in step-9, and again in
+ * step-13, step-14, step-32 and others.
*
* The class is built on the following premise: One frequently has some work
* that needs to be done on a sequence of objects; a prototypical example is
* sequentially.
*
* @ingroup threads
- * @author Wolfgang Bangerth, 2007, 2008, 2009
+ * @author Wolfgang Bangerth, 2007, 2008, 2009, 2013. Bruno Turcksin, 2013.
*/
namespace WorkStream
{
// and that is available also as a fall-back whenever via boost or similar
/**
- * A class that creates a sequence of
- * items from a range of iterators.
+ * A class that creates a sequence of items from a range of iterators.
*/
template <typename Iterator,
typename ScratchData,
{
public:
/**
- * A data type that we use to identify
- * items to be worked on. This is the structure
- * that is passed around between the different parts of
- * the WorkStream implementation to identify what needs
- * to be done by the various stages of the pipeline.
+ * A data type that we use to identify items to be worked on. This is
+ * the structure that is passed around between the different parts of
+ * the WorkStream implementation to identify what needs to be done by
+ * the various stages of the pipeline.
*/
struct ItemType
{
/**
* Default constructor.
- * Initialize everything that doesn't
- * have a default constructor itself.
+ * Initialize everything that doesn't have a default constructor
+ * itself.
*/
ItemType ()
:
/**
- * Constructor. Take an iterator
- * range, the size of a buffer that
- * can hold items, and the sample
- * additional data object that will
- * be passed to each worker and
- * copier function invokation.
+ * Constructor. Take an iterator range, the size of a buffer that can
+ * hold items, and the sample additional data object that will be passed
+ * to each worker and copier function invokation.
*/
IteratorRangeToItemStream (const Iterator &begin,
const Iterator &end,
}
- // return an invalid
- // item since we are at
- // the end of the
+ // return an invalid item since we are at the end of the
// pipeline
return 0;
}
private:
/**
- * Pointer to the function
- * that does the copying of
- * data.
+ * Pointer to the function that does the copying of data.
*/
const std_cxx1x::function<void (const CopyData &)> copier;
};
/**
- * This is the main function of the
- * WorkStream concept, doing work as
- * described in the introduction to this
- * namespace.
+ * This is the main function of the WorkStream concept, doing work as
+ * described in the introduction to this namespace.
*
- * This is the function that can be used
- * for worker and copier objects that are
- * either pointers to non-member
- * functions or objects that allow to be
- * called with an operator(), for example
- * objects created by std::bind.
+ * This is the function that can be used for worker and copier objects that
+ * are either pointers to non-member functions or objects that allow to be
+ * called with an operator(), for example objects created by std::bind.
*
- * The argument passed as @p end must be
- * convertible to the same type as
- * @p begin, but doesn't have to be of the
- * same type itself. This allows to write
- * code like
- * <code>WorkStream().run(dof_handler.begin_active(),
- * dof_handler.end(), ...</code> where
- * the first is of type
- * DoFHandler::active_cell_iterator
- * whereas the second is of type
+ * The argument passed as @p end must be convertible to the same type as @p
+ * begin, but doesn't have to be of the same type itself. This allows to
+ * write code like <code>WorkStream().run(dof_handler.begin_active(),
+ * dof_handler.end(), ...</code> where the first is of type
+ * DoFHandler::active_cell_iterator whereas the second is of type
* DoFHandler::raw_cell_iterator.
*
- * The two data types
- * <tt>ScratchData</tt> and
- * <tt>CopyData</tt> need to have a
- * working copy
- * constructor. <tt>ScratchData</tt>
- * is only used in the
- * <tt>worker</tt> function, while
- * <tt>CopyData</tt> is the object
- * passed from the <tt>worker</tt>
- * to the <tt>copier</tt>.
+ * The two data types <tt>ScratchData</tt> and <tt>CopyData</tt> need to
+ * have a working copy constructor. <tt>ScratchData</tt> is only used in the
+ * <tt>worker</tt> function, while <tt>CopyData</tt> is the object passed
+ * from the <tt>worker</tt> to the <tt>copier</tt>.
*
- * The @p queue_length argument indicates
- * the number of items that can be live
- * at any given time. Each item consists
- * of @p chunk_size elements of the input
- * stream that will be worked on by the
- * worker and copier functions one after
- * the other on the same thread.
+ * The @p queue_length argument indicates the number of items that can be
+ * live at any given time. Each item consists of @p chunk_size elements of
+ * the input stream that will be worked on by the worker and copier
+ * functions one after the other on the same thread.
*
- * @note If your data objects are large,
- * or their constructors are expensive,
- * it is helpful to keep in mind
- * that <tt>queue_length</tt>
- * copies of the <tt>ScratchData</tt>
- * object and
- * <tt>queue_length*chunk_size</tt>
- * copies of the <tt>CopyData</tt>
- * object are generated.
+ * @note If your data objects are large, or their constructors are
+ * expensive, it is helpful to keep in mind that <tt>queue_length</tt>
+ * copies of the <tt>ScratchData</tt> object and
+ * <tt>queue_length*chunk_size</tt> copies of the <tt>CopyData</tt> object
+ * are generated.
*/
template <typename Worker,
typename Copier,
ExcMessage ("The chunk_size must be at least one."));
(void)chunk_size; // removes -Wunused-parameter warning in optimized mode
- // if no work then skip. (only use
- // operator!= for iterators since we may
- // not have an equality comparison
- // operator)
+ // if no work then skip. (only use operator!= for iterators since we may
+ // not have an equality comparison operator)
if (!(begin != end))
return;
for (Iterator i=begin; i!=end; ++i)
{
- if (static_cast<const std_cxx1x::function<void (const Iterator &,
- ScratchData &,
- CopyData &)> >(worker))
- worker (i, scratch_data, copy_data);
- if (static_cast<const std_cxx1x::function<void (const CopyData &)> >
- (copier))
- copier (copy_data);
+ // need to check if the function is not the zero function. To
+ // check zero-ness, create a C++ function out of it and check that
+ if (static_cast<const std_cxx1x::function<void (const Iterator &,
+ ScratchData &,
+ CopyData &)>& >(worker))
+ worker (i, scratch_data, copy_data);
+ if (static_cast<const std_cxx1x::function<void (const CopyData &)>& >
+ (copier))
+ copier (copy_data);
}
}
#ifdef DEAL_II_WITH_THREADS
/**
- * This is the main function of the
- * WorkStream concept, doing work as
- * described in the introduction to this
- * namespace.
+ * This is the main function of the WorkStream concept, doing work as
+ * described in the introduction to this namespace.
*
- * This is the function that can be used
- * for worker and copier objects that are
- * either pointers to non-member
- * functions or objects that allow to be
- * called with an operator(), for example
- * objects created by std::bind.
+ * This is the function that can be used for worker and copier objects that
+ * are either pointers to non-member functions or objects that allow to be
+ * called with an operator(), for example objects created by std::bind.
*
- * The argument passed as @p end must be
- * convertible to the same type as
- * @p begin, but doesn't have to be of the
- * same type itself. This allows to write
- * code like
- * <code>WorkStream().run(dof_handler.begin_active(),
- * dof_handler.end(), ...</code> where
- * the first is of type
- * DoFHandler::active_cell_iterator
- * whereas the second is of type
+ * The argument passed as @p end must be convertible to the same type as @p
+ * begin, but doesn't have to be of the same type itself. This allows to
+ * write code like <code>WorkStream().run(dof_handler.begin_active(),
+ * dof_handler.end(), ...</code> where the first is of type
+ * DoFHandler::active_cell_iterator whereas the second is of type
* DoFHandler::raw_cell_iterator.
*
- * The two data types
- * <tt>ScratchData</tt> and
- * <tt>CopyData</tt> need to have a
- * working copy
- * constructor. <tt>ScratchData</tt>
- * is only used in the
- * <tt>worker</tt> function, while
- * <tt>CopyData</tt> is the object
- * passed from the <tt>worker</tt>
- * to the <tt>copier</tt>.
+ * The two data types <tt>ScratchData</tt> and <tt>CopyData</tt> need to
+ * have a working copy constructor. <tt>ScratchData</tt> is only used in the
+ * <tt>worker</tt> function, while <tt>CopyData</tt> is the object passed
+ * from the <tt>worker</tt> to the <tt>copier</tt>.
*
- * The @p get_conflict_indices argument, is a function
- * that given an iterator computes the conflict indices
- * necessary for the graph_coloring. Graph coloring is
- * necessary to be able to copy the data in parallel. If
- * the number of elements in some colors is less than
- * @p chunk_size time multithread_info.n_threads(),
- * these elements are aggregated and copied serially.
+ * The @p get_conflict_indices argument, is a function that given an
+ * iterator computes the conflict indices necessary for the
+ * graph_coloring. Graph coloring is necessary to be able to copy the data
+ * in parallel. If the number of elements in some colors is less than @p
+ * chunk_size time multithread_info.n_threads(), these elements are
+ * aggregated and copied serially.
*
- * The @p queue_length argument indicates
- * the number of items that can be live
- * at any given time. Each item consists
- * of @p chunk_size elements of the input
- * stream that will be worked on by the
- * worker and copier functions one after
- * the other on the same thread.
+ * The @p queue_length argument indicates the number of items that can be
+ * live at any given time. Each item consists of @p chunk_size elements of
+ * the input stream that will be worked on by the worker and copier
+ * functions one after the other on the same thread.
*
- * @note If your data objects are large,
- * or their constructors are expensive,
- * it is helpful to keep in mind
- * that <tt>queue_length</tt>
- * copies of the <tt>ScratchData</tt>
- * object and
- * <tt>queue_length*chunk_size</tt>
- * copies of the <tt>CopyData</tt>
- * object are generated.
+ * @note If your data objects are large, or their constructors are
+ * expensive, it is helpful to keep in mind that <tt>queue_length</tt>
+ * copies of the <tt>ScratchData</tt> object and
+ * <tt>queue_length*chunk_size</tt> copies of the <tt>CopyData</tt> object
+ * are generated.
*/
template <typename Worker,
typename Copier,
{
if (static_cast<const std_cxx1x::function<void (const Iterator &,
ScratchData &,
- CopyData &)> >(worker))
+ CopyData &)>& >(worker))
worker (i, scratch_data, copy_data);
- if (static_cast<const std_cxx1x::function<void (const CopyData &)> >
+ if (static_cast<const std_cxx1x::function<void (const CopyData &)>& >
(copier))
copier (copy_data);
}
/**
- * This is the main function of the
- * WorkStream concept, doing work as
- * described in the introduction to this
- * namespace.
+ * This is the main function of the WorkStream concept, doing work as
+ * described in the introduction to this namespace.
*
- * This is the function that can be
- * used for worker and copier functions
+ * This is the function that can be used for worker and copier functions
* that are member functions of a class.
*
- * The argument passed as @p end must be
- * convertible to the same type as
- * @p begin, but doesn't have to be of the
- * same type itself. This allows to write
- * code like
- * <code>WorkStream().run(dof_handler.begin_active(),
- * dof_handler.end(), ...</code> where
- * the first is of type
- * DoFHandler::active_cell_iterator
- * whereas the second is of type
+ * The argument passed as @p end must be convertible to the same type as @p
+ * begin, but doesn't have to be of the same type itself. This allows to
+ * write code like <code>WorkStream().run(dof_handler.begin_active(),
+ * dof_handler.end(), ...</code> where the first is of type
+ * DoFHandler::active_cell_iterator whereas the second is of type
* DoFHandler::raw_cell_iterator.
*
- * The @p queue_length argument indicates
- * the number of items that can be live
- * at any given time. Each item consists
- * of @p chunk_size elements of the input
- * stream that will be worked on by the
- * worker and copier functions one after
- * the other on the same thread.
+ * The @p queue_length argument indicates the number of items that can be
+ * live at any given time. Each item consists of @p chunk_size elements of
+ * the input stream that will be worked on by the worker and copier
+ * functions one after the other on the same thread.
*
- * @note If your data objects are large,
- * or their constructors are expensive,
- * it is helpful to keep in mind
- * that <tt>queue_length</tt>
- * copies of the <tt>ScratchData</tt>
- * object and
- * <tt>queue_length*chunk_size</tt>
- * copies of the <tt>CopyData</tt>
- * object are generated.
+ * @note If your data objects are large, or their constructors are
+ * expensive, it is helpful to keep in mind that <tt>queue_length</tt>
+ * copies of the <tt>ScratchData</tt> object and
+ * <tt>queue_length*chunk_size</tt> copies of the <tt>CopyData</tt> object
+ * are generated.
*/
template <typename MainClass,
typename Iterator,
/**
- * This is the main function of the
- * WorkStream concept, doing work as
- * described in the introduction to this
- * namespace.
+ * This is the main function of the WorkStream concept, doing work as
+ * described in the introduction to this namespace.
*
- * This is the function that can be
- * used for worker and copier functions
+ * This is the function that can be used for worker and copier functions
* that are member functions of a class.
*
- * The argument passed as @p end must be
- * convertible to the same type as
- * @p begin, but doesn't have to be of the
- * same type itself. This allows to write
- * code like
- * <code>WorkStream().run(dof_handler.begin_active(),
- * dof_handler.end(), ...</code> where
- * the first is of type
- * DoFHandler::active_cell_iterator
- * whereas the second is of type
+ * The argument passed as @p end must be convertible to the same type as @p
+ * begin, but doesn't have to be of the same type itself. This allows to
+ * write code like <code>WorkStream().run(dof_handler.begin_active(),
+ * dof_handler.end(), ...</code> where the first is of type
+ * DoFHandler::active_cell_iterator whereas the second is of type
* DoFHandler::raw_cell_iterator.
*
- * The @p get_conflict_indices argument, is a function
- * that given an iterator computes the conflict indices
- * necessary for the graph_coloring. Graph coloring is
- * necessary to be able to copy the data in parallel. If
- * the number of elements in some colors is less than
- * @p chunk_size time multithread_info.n_threads(),
- * these elements are aggregated and copied serially.
+ * The @p get_conflict_indices argument, is a function that given an
+ * iterator computes the conflict indices necessary for the
+ * graph_coloring. Graph coloring is necessary to be able to copy the data
+ * in parallel. If the number of elements in some colors is less than @p
+ * chunk_size time multithread_info.n_threads(), these elements are
+ * aggregated and copied serially.
*
- * The @p queue_length argument indicates
- * the number of items that can be live
- * at any given time. Each item consists
- * of @p chunk_size elements of the input
- * stream that will be worked on by the
- * worker and copier functions one after
- * the other on the same thread.
+ * The @p queue_length argument indicates the number of items that can be
+ * live at any given time. Each item consists of @p chunk_size elements of
+ * the input stream that will be worked on by the worker and copier
+ * functions one after the other on the same thread.
*
- * @note If your data objects are large,
- * or their constructors are expensive,
- * it is helpful to keep in mind
- * that <tt>queue_length</tt>
- * copies of the <tt>ScratchData</tt>
- * object and
- * <tt>queue_length*chunk_size</tt>
- * copies of the <tt>CopyData</tt>
- * object are generated.
+ * @note If your data objects are large, or their constructors are
+ * expensive, it is helpful to keep in mind that <tt>queue_length</tt>
+ * copies of the <tt>ScratchData</tt> object and
+ * <tt>queue_length*chunk_size</tt> copies of the <tt>CopyData</tt> object
+ * are generated.
*/
template <typename MainClass,
typename Iterator,
#include <deal.II/grid/tria_accessor.h>
#include <deal.II/grid/tria_iterator.h>
#include <deal.II/fe/mapping_q1.h>
-#include <deal.II/base/std_cxx1x/tuple.h>
#include <bitset>
#include <list>
* equals the index in this container.
*/
std::vector<G> cells;
+
/**
* Index of the even children of an object.
* Since when objects are refined, all
void serialize(Archive &ar,
const unsigned int version);
};
+
/**
* Store boundary and material data. For
* example, in one dimension, this field
* function is only used by
* dealii::Triangulation::execute_refinement()
* in 3D.
- *
- * @warning Interestingly,
- * this function is not used
- * for 1D or 2D
- * triangulations, where it
- * seems the authors of the
- * refinement function insist
- * on reimplementing its
- * contents.
- *
- * @todo This function is
- * not instantiated for the
- * codim-one case
+ *
+ * @warning Interestingly,
+ * this function is not used
+ * for 1D or 2D
+ * triangulations, where it
+ * seems the authors of the
+ * refinement function insist
+ * on reimplementing its
+ * contents.
+ *
+ * @todo This function is
+ * not instantiated for the
+ * codim-one case
*/
template <int dim, int spacedim>
dealii::TriaRawIterator<dealii::TriaAccessor<G::dimension,dim,spacedim> >
* function is only used by
* dealii::Triangulation::execute_refinement()
* in 3D.
- *
- * @warning Interestingly,
- * this function is not used
- * for 1D or 2D
- * triangulations, where it
- * seems the authors of the
- * refinement function insist
- * on reimplementing its
- * contents.
- *
- * @todo This function is
- * not instantiated for the
- * codim-one case
+ *
+ * @warning Interestingly,
+ * this function is not used
+ * for 1D or 2D
+ * triangulations, where it
+ * seems the authors of the
+ * refinement function insist
+ * on reimplementing its
+ * contents.
+ *
+ * @todo This function is
+ * not instantiated for the
+ * codim-one case
*/
template <int dim, int spacedim>
dealii::TriaRawIterator<dealii::TriaAccessor<G::dimension,dim,spacedim> >
* a line/quad/etc.
*/
std::vector<UserData> user_data;
+
/**
* In order to avoid
* confusion between user
};
/**
- * For hexahedrons the data of TriaObjects needs to be extended, as we can obtain faces
+ * For hexahedra the data of TriaObjects needs to be extended, as we can obtain faces
* (quads) in non-standard-orientation, therefore we declare a class TriaObjectsHex, which
* additionally contains a bool-vector of the face-orientations.
*/
-
class TriaObjectsHex : public TriaObjects<TriaObject<3> >
{
public:
* has.
*/
std::vector<bool> face_orientations;
+
/**
* flip = rotation by 180 degrees
*/
std::vector<bool> face_flips;
+
/**
* rotation by 90 degrees
*/
* declare a class TriaObjectsQuad3D, which additionally contains a bool-vector
* of the line-orientations.
*/
-
class TriaObjectsQuad3D: public TriaObjects<TriaObject<2> >
{
public:
* importantly one can associate more than one matrix with the same
* sparsity pattern.
*
+ * The use of this class is demonstrated in step-51.
+ *
* @note Instantiations for this template are provided for <tt>@<float@> and
* @<double@></tt>; others can be generated in application programs (see the
* section on @ref Instantiations in the manual).
/**
* Structure representing the sparsity pattern of a sparse matrix.
- *
* This class is an example of the "static" type of @ref Sparsity.
- *
* It uses the compressed row storage (CSR) format to store data.
*
+ * The use of this class is demonstrated in step-51.
+ *
* @author Wolfgang Bangerth, 2008
*/
class ChunkSparsityPattern : public Subscriptor
typedef PETScWrappers::SparseMatrix SparseMatrix;
typedef PETScWrappers::SolverCG SolverCG;
-
+ typedef PETScWrappers::SolverGMRES SolverGMRES;
namespace MPI
{
typedef TrilinosWrappers::Vector Vector;
typedef TrilinosWrappers::SolverCG SolverCG;
+ typedef TrilinosWrappers::SolverGMRES SolverGMRES;
namespace MPI
{
template <typename number> class Vector;
}
}
-template <typename> class SolverCG;
cg_message.erase(0, pos+5);
std::string first = cg_message;
- first.erase(cg_message.find_first_of(" "), std::string::npos);
+ if (cg_message.find_first_of(" ") != std::string::npos)
+ first.erase(cg_message.find_first_of(" "), std::string::npos);
std::istringstream(first) >> min_eigenvalue;
- cg_message.erase(0, cg_message.find_last_of(" ")+1);
- std::istringstream(cg_message) >> max_eigenvalue;
+ if (cg_message.find_last_of(" ") != std::string::npos)
+ {
+ cg_message.erase(0, cg_message.find_last_of(" ")+1);
+ std::istringstream(cg_message) >> max_eigenvalue;
+ }
+ else max_eigenvalue = min_eigenvalue;
}
else
min_eigenvalue = max_eigenvalue = 1;
(src, data.matrix_diagonal_inverse, true, 0., 1./theta, update1,
update2, dst);
- for (unsigned int k=0; k<data.degree-1; ++k)
+ for (unsigned int k=0; k<data.degree; ++k)
{
matrix_ptr->Tvmult (update2, dst);
const double rhokp = 1./(2.*sigma-rhok);
std::fill (&*(dst.begin()+begin), &*(dst.begin()+end), s);
}
-
+
template <typename T>
void copy_subrange (const typename dealii::Vector<T>::size_type begin,
const typename dealii::Vector<T>::size_type end,
(end-begin)*sizeof(T));
}
-
+
template <typename T, typename U>
void copy_subrange (const typename dealii::Vector<T>::size_type begin,
const typename dealii::Vector<T>::size_type end,
copy_subrange (begin, end, src, dst);
}
-
+
template <typename T, typename U>
void copy_vector (const dealii::Vector<T> &src,
dealii::Vector<U> &dst)
Assert (vec_size == s.vec_size, ExcDimensionMismatch(vec_size, s.vec_size));
for (size_type i=0; i<vec_size; ++i)
- val[i] *= s.val[i];
+ val[i] *= Number(s.val[i]);
}
#include <deal.II/fe/fe_update_flags.h>
#include <deal.II/fe/mapping.h>
#include <deal.II/lac/vector.h>
+#include <deal.II/grid/filtered_iterator.h>
+
#include <utility>
DEAL_II_NAMESPACE_OPEN
{
public:
/**
- * This function is used to
- * obtain an approximation of the
- * gradient. Pass it the DoF
- * handler object that describes
- * the finite element field, a
- * nodal value vector, and
- * receive the cell-wise
- * Euclidian norm of the
+ * This function is used to obtain an approximation of the gradient. Pass it
+ * the DoF handler object that describes the finite element field, a nodal
+ * value vector, and receive the cell-wise Euclidian norm of the
* approximated gradient.
*
- * The last parameter denotes the
- * solution component, for which the
- * gradient is to be computed. It
- * defaults to the first component. For
- * scalar elements, this is the only
- * valid choice; for vector-valued ones,
- * any component between zero and the
- * number of vector components can be
- * given here.
+ * The last parameter denotes the solution component, for which the gradient
+ * is to be computed. It defaults to the first component. For scalar
+ * elements, this is the only valid choice; for vector-valued ones, any
+ * component between zero and the number of vector components can be given
+ * here.
+ *
+ * In a parallel computation the @p solution vector needs to contain the
+ * locally relevant unknowns.
*/
template <int dim, template <int, int> class DH, class InputVector, int spacedim>
static void
const unsigned int component = 0);
/**
- * Calls the @p interpolate
- * function, see above, with
+ * Calls the @p interpolate function, see above, with
* <tt>mapping=MappingQ1@<dim@>()</tt>.
*/
template <int dim, template <int, int> class DH, class InputVector, int spacedim>
const unsigned int component = 0);
/**
- * This function is the analogue
- * to the one above, computing
- * finite difference
- * approximations of the tensor
- * of second derivatives. Pass it
- * the DoF handler object that
- * describes the finite element
- * field, a nodal value vector,
- * and receive the cell-wise
- * spectral norm of the
- * approximated tensor of second
- * derivatives. The spectral norm
- * is the matrix norm associated
- * to the $l_2$ vector norm.
+ * This function is the analogue to the one above, computing finite
+ * difference approximations of the tensor of second derivatives. Pass it
+ * the DoF handler object that describes the finite element field, a nodal
+ * value vector, and receive the cell-wise spectral norm of the approximated
+ * tensor of second derivatives. The spectral norm is the matrix norm
+ * associated to the $l_2$ vector norm.
+ *
+ * The last parameter denotes the solution component, for which the gradient
+ * is to be computed. It defaults to the first component. For scalar
+ * elements, this is the only valid choice; for vector-valued ones, any
+ * component between zero and the number of vector components can be given
+ * here.
*
- * The last parameter denotes the
- * solution component, for which
- * the gradient is to be
- * computed. It defaults to the
- * first component. For
- * scalar elements, this is the only
- * valid choice; for vector-valued ones,
- * any component between zero and the
- * number of vector components can be
- * given here.
+ * In a parallel computation the @p solution vector needs to contain the
+ * locally relevant unknowns.
*/
template <int dim, template <int, int> class DH, class InputVector, int spacedim>
static void
const unsigned int component = 0);
/**
- * Calls the @p interpolate
- * function, see above, with
+ * Calls the @p interpolate function, see above, with
* <tt>mapping=MappingQ1@<dim@>()</tt>.
*/
template <int dim, template <int, int> class DH, class InputVector, int spacedim>
const unsigned int component = 0);
/**
- * This function calculates the
- * <tt>order</tt>-th order approximate
- * derivative and returns the full tensor
- * for a single cell.
+ * This function calculates the <tt>order</tt>-th order approximate
+ * derivative and returns the full tensor for a single cell.
*
- * The last parameter denotes the
- * solution component, for which
- * the gradient is to be
- * computed. It defaults to the
- * first component. For
- * scalar elements, this is the only
- * valid choice; for vector-valued ones,
- * any component between zero and the
- * number of vector components can be
- * given here.
+ * The last parameter denotes the solution component, for which the gradient
+ * is to be computed. It defaults to the first component. For scalar
+ * elements, this is the only valid choice; for vector-valued ones, any
+ * component between zero and the number of vector components can be given
+ * here.
+ *
+ * In a parallel computation the @p solution vector needs to contain the
+ * locally relevant unknowns.
*/
template <int dim, template <int, int> class DH, class InputVector, int order, int spacedim>
const unsigned int component = 0);
/**
- * Same as above, with
- * <tt>mapping=MappingQ1@<dim@>()</tt>.
+ * Same as above, with <tt>mapping=MappingQ1@<dim@>()</tt>.
*/
template <int dim, template <int, int> class DH, class InputVector, int order, int spacedim>
private:
/**
- * The following class is used to
- * describe the data needed to
- * compute the finite difference
- * approximation to the gradient
- * on a cell. See the general
- * documentation of this class
- * for more information on
+ * The following class is used to describe the data needed to compute the
+ * finite difference approximation to the gradient on a cell. See the
+ * general documentation of this class for more information on
* implementational details.
*
* @author Wolfgang Bangerth, 2000
{
public:
/**
- * Declare which data fields have
- * to be updated for the function
- * @p get_projected_derivative
- * to work.
+ * Declare which data fields have to be updated for the function @p
+ * get_projected_derivative to work.
*/
static const UpdateFlags update_flags;
/**
- * Declare the data type which
- * holds the derivative described
- * by this class.
+ * Declare the data type which holds the derivative described by this
+ * class.
*/
typedef Tensor<1,dim> Derivative;
/**
- * Likewise declare the data type
- * that holds the derivative
- * projected to a certain
- * directions.
+ * Likewise declare the data type that holds the derivative projected to a
+ * certain directions.
*/
typedef double ProjectedDerivative;
/**
- * Given an FEValues object
- * initialized to a cell, and a
- * solution vector, extract the
- * desired derivative at the
- * first quadrature point (which
- * is the only one, as we only
- * evaluate the finite element
- * field at the center of each
- * cell).
+ * Given an FEValues object initialized to a cell, and a solution vector,
+ * extract the desired derivative at the first quadrature point (which is
+ * the only one, as we only evaluate the finite element field at the
+ * center of each cell).
*/
template <class InputVector, int spacedim>
static ProjectedDerivative
const unsigned int component);
/**
- * Return the norm of the
- * derivative object. Here, for
- * the gradient, we choose the
- * Euclidian norm of the gradient
- * vector.
+ * Return the norm of the derivative object. Here, for the gradient, we
+ * choose the Euclidian norm of the gradient vector.
*/
static double derivative_norm (const Derivative &d);
/**
- * If for the present derivative
- * order, symmetrization of the
- * derivative tensor is
- * necessary, then do so on the
- * argument.
+ * If for the present derivative order, symmetrization of the derivative
+ * tensor is necessary, then do so on the argument.
*
- * For the first derivatives, no
- * such thing is necessary, so
- * this function is a no-op.
+ * For the first derivatives, no such thing is necessary, so this function
+ * is a no-op.
*/
static void symmetrize (Derivative &derivative_tensor);
};
/**
- * The following class is used to
- * describe the data needed to
- * compute the finite difference
- * approximation to the second
- * derivatives on a cell. See the
- * general documentation of this
- * class for more information on
+ * The following class is used to describe the data needed to compute the
+ * finite difference approximation to the second derivatives on a cell. See
+ * the general documentation of this class for more information on
* implementational details.
*
* @author Wolfgang Bangerth, 2000
{
public:
/**
- * Declare which data fields have
- * to be updated for the function
- * @p get_projected_derivative
- * to work.
+ * Declare which data fields have to be updated for the function @p
+ * get_projected_derivative to work.
*/
static const UpdateFlags update_flags;
/**
- * Declare the data type which
- * holds the derivative described
- * by this class.
+ * Declare the data type which holds the derivative described by this
+ * class.
*/
typedef Tensor<2,dim> Derivative;
/**
- * Likewise declare the data type
- * that holds the derivative
- * projected to a certain
- * directions.
+ * Likewise declare the data type that holds the derivative projected to a
+ * certain directions.
*/
typedef Tensor<1,dim> ProjectedDerivative;
/**
- * Given an FEValues object
- * initialized to a cell, and a
- * solution vector, extract the
- * desired derivative at the
- * first quadrature point (which
- * is the only one, as we only
- * evaluate the finite element
- * field at the center of each
- * cell).
+ * Given an FEValues object initialized to a cell, and a solution vector,
+ * extract the desired derivative at the first quadrature point (which is
+ * the only one, as we only evaluate the finite element field at the
+ * center of each cell).
*/
template <class InputVector, int spacedim>
static ProjectedDerivative
const unsigned int component);
/**
- * Return the norm of the
- * derivative object. Here, for
- * the (symmetric) tensor of
- * second derivatives, we choose
- * the absolute value of the
- * largest eigenvalue, which is
- * the matrix norm associated to
- * the $l_2$ norm of vectors. It
- * is also the largest value of
- * the curvature of the solution.
+ * Return the norm of the derivative object. Here, for the (symmetric)
+ * tensor of second derivatives, we choose the absolute value of the
+ * largest eigenvalue, which is the matrix norm associated to the $l_2$
+ * norm of vectors. It is also the largest value of the curvature of the
+ * solution.
*/
static double derivative_norm (const Derivative &d);
/**
- * If for the present derivative
- * order, symmetrization of the
- * derivative tensor is
- * necessary, then do so on the
- * argument.
+ * If for the present derivative order, symmetrization of the derivative
+ * tensor is necessary, then do so on the argument.
*
- * For the second derivatives,
- * each entry of the tensor is
- * set to the mean of its value
- * and the value of the transpose
- * element.
+ * For the second derivatives, each entry of the tensor is set to the mean
+ * of its value and the value of the transpose element.
*
- * Note that this function
- * actually modifies its
- * argument.
+ * Note that this function actually modifies its argument.
*/
static void symmetrize (Derivative &derivative_tensor);
};
{
public:
/**
- * Declare which data fields have
- * to be updated for the function
- * @p get_projected_derivative
- * to work.
+ * Declare which data fields have to be updated for the function @p
+ * get_projected_derivative to work.
*/
static const UpdateFlags update_flags;
typedef Tensor<3,dim> Derivative;
/**
- * Likewise declare the data type
- * that holds the derivative
- * projected to a certain
- * directions.
+ * Likewise declare the data type that holds the derivative projected to a
+ * certain directions.
*/
typedef Tensor<2,dim> ProjectedDerivative;
/**
- * Given an FEValues object
- * initialized to a cell, and a
- * solution vector, extract the
- * desired derivative at the
- * first quadrature point (which
- * is the only one, as we only
- * evaluate the finite element
- * field at the center of each
- * cell).
+ * Given an FEValues object initialized to a cell, and a solution vector,
+ * extract the desired derivative at the first quadrature point (which is
+ * the only one, as we only evaluate the finite element field at the
+ * center of each cell).
*/
template <class InputVector, int spacedim>
static ProjectedDerivative
const unsigned int component);
/**
- * Return the norm of the
- * derivative object. Here, for
- * the (symmetric) tensor of
- * second derivatives, we choose
- * the absolute value of the
- * largest eigenvalue, which is
- * the matrix norm associated to
- * the $l_2$ norm of vectors. It
- * is also the largest value of
- * the curvature of the solution.
+ * Return the norm of the derivative object. Here, for the (symmetric)
+ * tensor of second derivatives, we choose the absolute value of the
+ * largest eigenvalue, which is the matrix norm associated to the $l_2$
+ * norm of vectors. It is also the largest value of the curvature of the
+ * solution.
*/
static double derivative_norm (const Derivative &d);
/**
- * If for the present derivative
- * order, symmetrization of the
- * derivative tensor is
- * necessary, then do so on the
- * argument.
+ * If for the present derivative order, symmetrization of the derivative
+ * tensor is necessary, then do so on the argument.
*
- * For the second derivatives,
- * each entry of the tensor is
- * set to the mean of its value
- * and the value of the transpose
- * element.
+ * For the second derivatives, each entry of the tensor is set to the mean
+ * of its value and the value of the transpose element.
*
- * Note that this function
- * actually modifies its
- * argument.
+ * Note that this function actually modifies its argument.
*/
static void symmetrize (Derivative &derivative_tensor);
};
{
public:
/**
- * typedef to select the
- * DerivativeDescription corresponding
- * to the <tt>order</tt>th
- * derivative. In this general template
- * we set an unvalid typedef to void,
- * the real typedefs have to be
- * specialized.
+ * typedef to select the DerivativeDescription corresponding to the
+ * <tt>order</tt>th derivative. In this general template we set an unvalid
+ * typedef to void, the real typedefs have to be specialized.
*/
typedef void DerivDescr;
private:
/**
- * Convenience typedef denoting
- * the range of indices on which
- * a certain thread shall
- * operate.
+ * Convenience typedef denoting the range of indices on which a certain
+ * thread shall operate.
*/
typedef std::pair<unsigned int,unsigned int> IndexInterval;
/**
- * Kind of the main function of
- * this class. It is called by
- * the public entry points to
- * this class with the correct
- * template first argument and
- * then simply calls the
- * @p approximate function,
- * after setting up several
- * threads and doing some
- * administration that is
- * independent of the actual
+ * Kind of the main function of this class. It is called by the public entry
+ * points to this class with the correct template first argument and then
+ * simply calls the @p approximate function, after setting up several
+ * threads and doing some administration that is independent of the actual
* derivative to be computed.
*
- * The @p component argument
- * denotes which component of the
- * solution vector we are to work
- * on.
+ * The @p component argument denotes which component of the solution vector
+ * we are to work on.
*/
template <class DerivativeDescription, int dim,
template <int, int> class DH, class InputVector, int spacedim>
Vector<float> &derivative_norm);
/**
- * Compute the derivative
- * approximation on a given cell.
- * Fill the @p derivative_norm vector with
- * the norm of the computed derivative
- * tensors on the cell.
+ * Compute the derivative approximation on a given cell. Fill the @p
+ * derivative_norm vector with the norm of the computed derivative tensors
+ * on the cell.
*/
template <class DerivativeDescription, int dim,
template <int, int> class DH, class InputVector, int spacedim>
static void
- approximate (SynchronousIterators<std_cxx1x::tuple<typename DH<dim,spacedim>
- ::active_cell_iterator,Vector<float>::iterator> > const &cell,
+ approximate (SynchronousIterators<std_cxx1x::tuple<FilteredIterator<typename DH<dim,spacedim>::active_cell_iterator>,
+ Vector<float>::iterator> > const &cell,
const Mapping<dim,spacedim> &mapping,
const DH<dim,spacedim> &dof,
const InputVector &solution,
const unsigned int component);
/**
- * Compute the derivative approximation on
- * one cell. This computes the full
+ * Compute the derivative approximation on one cell. This computes the full
* derivative tensor.
*/
template <class DerivativeDescription, int dim,
*
* @note If the cell in which the point is found
* is not locally owned, an exception of type
- * VectorTools<dim, InVector, spacedim>::ExcPointNotAvailableHere
+ * VectorTools::ExcPointNotAvailableHere
* is thrown.
*/
template <int dim, class InVector, int spacedim>
*
* @note If the cell in which the point is found
* is not locally owned, an exception of type
- * VectorTools<dim, InVector, spacedim>::ExcPointNotAvailableHere
+ * VectorTools::ExcPointNotAvailableHere
* is thrown.
*/
template <int dim, class InVector, int spacedim>
*
* @note If the cell in which the point is found
* is not locally owned, an exception of type
- * VectorTools<dim, InVector, spacedim>::ExcPointNotAvailableHere
+ * VectorTools::ExcPointNotAvailableHere
* is thrown.
*/
template <int dim, class InVector, int spacedim>
*
* @note If the cell in which the point is found
* is not locally owned, an exception of type
- * VectorTools<dim, InVector, spacedim>::ExcPointNotAvailableHere
+ * VectorTools::ExcPointNotAvailableHere
* is thrown.
*/
template <int dim, class InVector, int spacedim>
*
* @note If the cell in which the point is found
* is not locally owned, an exception of type
- * VectorTools<dim, InVector, spacedim>::ExcPointNotAvailableHere
+ * VectorTools::ExcPointNotAvailableHere
* is thrown.
*/
template <int dim, class InVector, int spacedim>
*
* @note If the cell in which the point is found
* is not locally owned, an exception of type
- * VectorTools<dim, InVector, spacedim>::ExcPointNotAvailableHere
+ * VectorTools::ExcPointNotAvailableHere
* is thrown.
*/
template <int dim, class InVector, int spacedim>
*
* @note If the cell in which the point is found
* is not locally owned, an exception of type
- * VectorTools<dim, InVector, spacedim>::ExcPointNotAvailableHere
+ * VectorTools::ExcPointNotAvailableHere
* is thrown.
*/
template <int dim, class InVector, int spacedim>
*
* @note If the cell in which the point is found
* is not locally owned, an exception of type
- * VectorTools<dim, InVector, spacedim>::ExcPointNotAvailableHere
+ * VectorTools::ExcPointNotAvailableHere
* is thrown.
*/
template <int dim, class InVector, int spacedim>
*
* @note If the cell in which the point is found
* is not locally owned, an exception of type
- * VectorTools<dim, InVector, spacedim>::ExcPointNotAvailableHere
+ * VectorTools::ExcPointNotAvailableHere
* is thrown.
*/
template <int dim, class InVector, int spacedim>
*
* @note If the cell in which the point is found
* is not locally owned, an exception of type
- * VectorTools<dim, InVector, spacedim>::ExcPointNotAvailableHere
+ * VectorTools::ExcPointNotAvailableHere
* is thrown.
*/
template <int dim, class InVector, int spacedim>
${CMAKE_BINARY_DIR}/${DEAL_II_PROJECT_CONFIG_RELDIR}
)
EXPORT(TARGETS ${DEAL_II_BASE_NAME}${DEAL_II_${build}_SUFFIX}
- FILE ${CMAKE_BINARY_DIR}/${DEAL_II_PROJECT_CONFIG_RELDIR}/${DEAL_II_PROJECT_CONFIG_NAME}BuildTargets.cmake
+ FILE ${CMAKE_BINARY_DIR}/${DEAL_II_PROJECT_CONFIG_RELDIR}/${DEAL_II_PROJECT_CONFIG_NAME}Targets.cmake
APPEND
)
exc(""),
stacktrace (0),
n_stacktrace_frames (0)
-{}
+{
+ // Construct a minimalistic error message:
+ generate_message();
+}
cond = c;
exc = e;
- // if the system supports this, get a stacktrace how we got here
+ // If the system supports this, get a stacktrace how we got here:
+
+ if (stacktrace != 0)
+ {
+ free (stacktrace);
+ stacktrace = 0;
+ }
+
#ifdef HAVE_GLIBC_STACKTRACE
void *array[25];
n_stacktrace_frames = backtrace(array, 25);
stacktrace = backtrace_symbols(array, n_stacktrace_frames);
#endif
- // build up a string with the error message...
-
- std::ostringstream converter;
-
- converter << std::endl
- << "--------------------------------------------------------"
- << std::endl;
- // print out general data
- print_exc_data (converter);
- // print out exception specific data
- print_info (converter);
- print_stack_trace (converter);
-
- if (!deal_II_exceptions::additional_assert_output.empty())
- {
- converter << "--------------------------------------------------------"
- << std::endl
- << deal_II_exceptions::additional_assert_output
- << std::endl;
- }
-
- converter << "--------------------------------------------------------"
- << std::endl;
-
- // ... and setup the final error message with it:
- static_cast<std::runtime_error &>(*this) = std::runtime_error(converter.str());
+ // And finally populate the underlying std::runtime_error:
+ generate_message();
}
+void ExceptionBase::generate_message ()
+{
+ // build up a string with the error message...
+
+ std::ostringstream converter;
+
+ converter << std::endl
+ << "--------------------------------------------------------"
+ << std::endl;
+
+ // print out general data
+ print_exc_data (converter);
+ // print out exception specific data
+ print_info (converter);
+ print_stack_trace (converter);
+
+ if (!deal_II_exceptions::additional_assert_output.empty())
+ {
+ converter << "--------------------------------------------------------"
+ << std::endl
+ << deal_II_exceptions::additional_assert_output
+ << std::endl;
+ }
+
+ converter << "--------------------------------------------------------"
+ << std::endl;
+
+ // ... and set up std::runtime_error with it:
+ static_cast<std::runtime_error &>(*this) = std::runtime_error(converter.str());
+}
+
+
+
namespace deal_II_exceptions
{
namespace internals
{
#ifdef DEBUG
const char *name = (id != 0) ? id : unknown_subscriber;
- Assert (counter>0, ExcNoSubscriber(object_info->name(), name));
+ AssertNothrow (counter>0, ExcNoSubscriber(object_info->name(), name));
// This is for the case that we do
// not abort after the exception
if (counter == 0)
#ifndef DEAL_II_WITH_THREADS
map_iterator it = counter_map.find(name);
- Assert (it != counter_map.end(), ExcNoSubscriber(object_info->name(), name));
- Assert (it->second > 0, ExcNoSubscriber(object_info->name(), name));
+ AssertNothrow (it != counter_map.end(), ExcNoSubscriber(object_info->name(), name));
+ AssertNothrow (it->second > 0, ExcNoSubscriber(object_info->name(), name));
it->second--;
#endif
RefinementCase<G::dimension>::no_refinement);
}
- boundary_or_material_id.reserve (new_size);
- boundary_or_material_id.insert (boundary_or_material_id.end(),
- new_size-boundary_or_material_id.size(),
- BoundaryOrMaterialId());
+ boundary_or_material_id.resize (new_size);
manifold_id.reserve (new_size);
manifold_id.insert (manifold_id.end(),
- user_data.reserve (new_size);
- user_data.insert (user_data.end(),
- new_size-user_data.size(),
- UserData());
+ user_data.resize (new_size);
}
if (n_unused_singles==0)
4*new_size-children.size(),
-1);
+ // for the following two fields, we know exactly how many elements
+ // we need, so first reserve then resize (resize itself, at least
+ // with some compiler libraries, appears to round up the size it
+ // actually reserves)
boundary_or_material_id.reserve (new_size);
- boundary_or_material_id.insert (boundary_or_material_id.end(),
- new_size-boundary_or_material_id.size(),
- BoundaryOrMaterialId());
+ boundary_or_material_id.resize (new_size);
manifold_id.reserve (new_size);
manifold_id.insert (manifold_id.end(),
user_data.reserve (new_size);
- user_data.insert (user_data.end(),
- new_size-user_data.size(),
- UserData());
+ user_data.resize (new_size);
face_orientations.reserve (new_size * GeometryInfo<3>::faces_per_cell);
face_orientations.insert (face_orientations.end(),
= (*data.cell_to_patch_index_map)[neighbor->level()][neighbor->index()];
}
- const unsigned int patch_idx = cell_and_index->second;
+ const unsigned int patch_idx =
+ (*data.cell_to_patch_index_map)[cell_and_index->first->level()][cell_and_index->first->index()];
// did we mess up the indices?
Assert(patch_idx < patches.size(), ExcInternalError());
Assert (n_subdivisions >= 1,
ExcInvalidNumberOfSubdivisions(n_subdivisions));
- // first count the cells we want to create patches of. also fill the object
+ // First count the cells we want to create patches of. Also fill the object
// that maps the cell indices to the patch numbers, as this will be needed
- // for generation of neighborship information
+ // for generation of neighborship information.
+ // Note, there is a confusing mess of different indices here at play:
+ // patch_index - the index of a patch in all_cells
+ // cell->index - only unique on each level, used in cell_to_patch_index_map
+ // active_index - index for a cell when counting from begin_active() using ++cell
+ // cell_index - unique index of a cell counted using next_locally_owned_cell()
+ // starting from first_locally_owned_cell()
+ //
+ // It turns out that we create one patch for each selected cell, so patch_index==cell_index.
+ //
+ // will be cell_to_patch_index_map[cell->level][cell->index] = patch_index
std::vector<std::vector<unsigned int> > cell_to_patch_index_map;
cell_to_patch_index_map.resize (this->triangulation->n_levels());
for (unsigned int l=0; l<this->triangulation->n_levels(); ++l)
{
+ // max_index is the largest cell->index on level l
unsigned int max_index = 0;
for (cell_iterator cell=first_locally_owned_cell(); cell != this->triangulation->end();
cell = next_locally_owned_cell(cell))
dealii::DataOutBase::Patch<DH::dimension,DH::space_dimension>::no_neighbor);
}
+ // will be all_cells[patch_index] = pair(cell, active_index)
std::vector<std::pair<cell_iterator, unsigned int> > all_cells;
{
+ // important: we need to compute the active_index of the cell in the range
+ // 0..n_active_cells() because this is where we need to look up cell
+ // data from (cell data vectors do not have the length distance computed by
+ // first_locally_owned_cell/next_locally_owned_cell because this might skip
+ // some values (FilteredIterator).
+ active_cell_iterator active_cell = this->triangulation->begin_active();
+ unsigned int active_index = 0;
cell_iterator cell = first_locally_owned_cell();
- for (unsigned int index = 0; cell != this->triangulation->end(); ++index)
+ for (; cell != this->triangulation->end();
+ cell = next_locally_owned_cell(cell))
{
+ // move forward until active_cell points at the cell (cell) we are looking
+ // at to compute the current active_index
+ while (active_cell!=this->triangulation->end()
+ && cell->active()
+ && active_cell_iterator(cell) != active_cell)
+ {
+ ++active_cell;
+ ++active_index;
+ }
+
Assert (static_cast<unsigned int>(cell->level()) <
cell_to_patch_index_map.size(),
ExcInternalError());
Assert (static_cast<unsigned int>(cell->index()) <
cell_to_patch_index_map[cell->level()].size(),
ExcInternalError());
-
+ Assert (active_index < this->triangulation->n_active_cells(),
+ ExcInternalError());
cell_to_patch_index_map[cell->level()][cell->index()] = all_cells.size();
- all_cells.push_back (std::make_pair(cell, index));
- cell = next_locally_owned_cell(cell);
+ all_cells.push_back (std::make_pair(cell, active_index));
}
}
WorkStream::run (&all_cells[0],
&all_cells[0]+all_cells.size(),
std_cxx1x::bind(&DataOut<dim,DH>::build_one_patch,
- *this, std_cxx1x::_1, std_cxx1x::_2, std_cxx1x::_3,
+ this, std_cxx1x::_1, std_cxx1x::_2, std_cxx1x::_3,
curved_cell_region,std_cxx1x::ref(this->patches)),
std_cxx1x::bind(&internal::DataOut::copy<dim,DH::space_dimension>,
std_cxx1x::_1),
WorkStream::run (&all_faces[0],
&all_faces[0]+all_faces.size(),
std_cxx1x::bind(&DataOutFaces<dim,DH>::build_one_patch,
- *this, std_cxx1x::_1, std_cxx1x::_2, std_cxx1x::_3),
+ this, std_cxx1x::_1, std_cxx1x::_2, std_cxx1x::_3),
std_cxx1x::bind(&internal::DataOutFaces::
append_patch_to_list<dim,DH::space_dimension>,
std_cxx1x::_1, std_cxx1x::ref(this->patches)),
WorkStream::run (&all_cells[0],
&all_cells[0]+all_cells.size(),
std_cxx1x::bind(&DataOutRotation<dim,DH>::build_one_patch,
- *this, std_cxx1x::_1, std_cxx1x::_2, std_cxx1x::_3),
+ this, std_cxx1x::_1, std_cxx1x::_2, std_cxx1x::_3),
std_cxx1x::bind(&internal::DataOutRotation
::append_patch_to_list<dim,DH::space_dimension>,
std_cxx1x::_1, std_cxx1x::ref(this->patches)),
// Only act on the locally owned cells
typedef FilteredIterator<typename DH<dim,spacedim>::active_cell_iterator> CellFilter;
- typedef std_cxx1x::tuple<typename DH<dim,spacedim>::active_cell_iterator,Vector<float>::iterator>
+ typedef std_cxx1x::tuple<CellFilter,Vector<float>::iterator>
Iterators;
SynchronousIterators<Iterators> begin(Iterators (CellFilter(IteratorFilters::LocallyOwnedCell(),
dof_handler.begin_active()),derivative_norm.begin())),
template <class DerivativeDescription, int dim,
template <int, int> class DH, class InputVector, int spacedim>
void
-DerivativeApproximation::approximate (SynchronousIterators<std_cxx1x::tuple<typename DH<dim,spacedim>
- ::active_cell_iterator,Vector<float>::iterator> > const &cell,
+DerivativeApproximation::approximate (SynchronousIterators<std_cxx1x::tuple<FilteredIterator<typename DH<dim,spacedim>::active_cell_iterator>,Vector<float>::iterator> > const &cell,
const Mapping<dim,spacedim> &mapping,
const DH<dim,spacedim> &dof_handler,
const InputVector &solution,
// ...and the place where it lives
const Point<dim> this_center = fe_midpoint_value.quadrature_point(0);
-
// loop over all neighbors and
// accumulate the difference
// quotients from them. note
# Setup tests:
ADD_CUSTOM_TARGET(setup_tests)
+# Regenerate tests (run "make rebuild_cache" in subprojects):
+ADD_CUSTOM_TARGET(regen_tests)
+
# Clean all tests
ADD_CUSTOM_TARGET(clean_tests)
TEST_PICKUP_REGEX
TEST_TIME_LIMIT
)
+ # always undefine:
LIST(APPEND _options "-U${_var}")
IF(DEFINED ${_var})
LIST(APPEND _options "-D${_var}=${${_var}}")
# Define a subproject for every enabled category:
#
+MESSAGE(STATUS "")
+MESSAGE(STATUS "Regenerating testsuite subprojects")
+
FOREACH(_category ${_categories})
IF(EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/${_category}/CMakeLists.txt)
SET(_category_dir ${CMAKE_CURRENT_SOURCE_DIR}/${_category})
)
ADD_DEPENDENCIES(setup_tests setup_tests_${_category})
- # depend on a valid build directory (libraries built, config in place):
+ # depend on a valid build directory:
ADD_DEPENDENCIES(setup_tests_${_category} setup_build_dir)
+ ADD_CUSTOM_TARGET(regen_tests_${_category}
+ COMMAND [ ! -d ${_category} ] || ${CMAKE_COMMAND}
+ --build ${CMAKE_CURRENT_BINARY_DIR}/${_category} --target regenerate
+ > /dev/null # Shoo!
+ )
+ ADD_DEPENDENCIES(regen_tests regen_tests_${_category})
+
ADD_CUSTOM_TARGET(clean_tests_${_category}
COMMAND [ ! -d ${_category} ] || ${CMAKE_COMMAND}
--build ${CMAKE_CURRENT_BINARY_DIR}/${_category} --target clean
+ > /dev/null # Shoo!
)
ADD_DEPENDENCIES(clean_tests clean_tests_${_category})
"SUBDIRS(${_category})\n"
)
+ #
+ # Regenerate subprojects: The "regenerate" target of the subproject
+ # depends on "rebuild_cache" so that the subprojects rerun cmake if
+ # necessary.
+ # (TODO: Unfortunately this is sequential - due to the fact that we
+ # cannot call back into the main build system)
+ #
+
+ IF(EXISTS ${CMAKE_CURRENT_BINARY_DIR}/${_category}/CMakeCache.txt)
+ EXECUTE_PROCESS(
+ COMMAND ${CMAKE_COMMAND}
+ --build ${CMAKE_CURRENT_BINARY_DIR}/${_category} --target regenerate
+ OUTPUT_QUIET
+ )
+ ENDIF()
+
ENDIF()
ENDFOREACH()
+
+MESSAGE(STATUS "Regenerating testsuite subprojects - Done")
STRING(TOLOWER ${_build} _build_lowercase)
SET(_test ${_category}/${_path}/${_file}.${_build_lowercase})
- STRING(REGEX REPLACE "\\/" "-" _target ${_test})
+ STRING(REGEX REPLACE "\\/" "-" _target ${_path}/${_file}.${_build_lowercase})
# Respect TEST_PICKUP_REGEX:
IF( "${TEST_PICKUP_REGEX}" STREQUAL "" OR
_test MATCHES "${TEST_PICKUP_REGEX}" )
+ #
+ # Add a "guard file" rule: The purpose of interrupt_guard.cc is to
+ # force a complete rerun of this test (BUILD stage) if
+ # interrupt_guard.cc is removed by run_test.cmake due to an
+ # interruption.
+ #
+ ADD_CUSTOM_COMMAND(
+ OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/${_target}_interrupt_guard.cc
+ COMMAND touch ${CMAKE_CURRENT_BINARY_DIR}/${_target}_interrupt_guard.cc
+ )
+
# Add an object library for each header file and build configuration:
- ADD_LIBRARY(${_target} OBJECT EXCLUDE_FROM_ALL test_header.cc)
+ ADD_LIBRARY(${_target} OBJECT EXCLUDE_FROM_ALL test_header.cc
+ ${CMAKE_CURRENT_BINARY_DIR}/${_target}_interrupt_guard.cc
+ )
SET_TARGET_PROPERTIES(${_target} PROPERTIES
LINK_FLAGS "${DEAL_II_LINKER_FLAGS} ${DEAL_II_LINKER_FLAGS_${_build}}"
ADD_TEST(NAME ${_test}
COMMAND ${CMAKE_COMMAND} -DTRGT=${_target}.build -DTEST=${_test}
-DDEAL_II_BINARY_DIR=${CMAKE_BINARY_DIR}
+ -DGUARD_FILE=${CMAKE_CURRENT_BINARY_DIR}/${_target}_interrupt_guard.cc
-P ${DEAL_II_SOURCE_DIR}/cmake/scripts/run_test.cmake
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
)
ENDIF()
ENDFOREACH()
+ #
+ # Add a "guard file" rule: The purpose of interrupt_guard is to force
+ # a complete rerun of this test (CONFIGURE, BUILD and RUN stage) if
+ # interrupt_guard is removed by run_test.cmake due to an
+ # interruption.
+ #
+ ADD_CUSTOM_COMMAND(
+ OUTPUT ${_step_dir}/interrupt_guard
+ COMMAND ${CMAKE_COMMAND} -E make_directory ${_step_dir}
+ COMMAND touch ${_step_dir}/interrupt_guard
+ )
+
# A rule how to copy the example step to the current directory:
ADD_CUSTOM_COMMAND(OUTPUT ${_step_dir}/CMakeLists.txt
- COMMAND ${CMAKE_COMMAND} -E make_directory ${_step_dir}
${_command}
DEPENDS
${_files}
${DEAL_II_TARGET_DEBUG}
${DEAL_II_TARGET_RELEASE}
+ ${_step_dir}/interrupt_guard
)
# And a rule on how to configure the example step:
ITEM_MATCHES(_match ${_step} ${_${_build_lowercase}_steps})
IF(_match)
# Add a full test (CONFIGURE, BUILD, RUN):
- SET(_target ${_category}-${_step}.${_build_lowercase}.run)
+ SET(_target ${_step}.${_build_lowercase}.run)
ADD_CUSTOM_TARGET(${_target}
COMMAND
echo "${_test}: CONFIGURE successful."
)
ELSE()
# Add a minimal test (CONFIGURE, BUILD):
- SET(_target ${_category}-${_step}.${_build_lowercase}.build)
+ SET(_target ${_step}.${_build_lowercase}.build)
ADD_CUSTOM_TARGET(${_target}
COMMAND
echo "${_test}: CONFIGURE successful."
ADD_TEST(NAME ${_test}
COMMAND ${CMAKE_COMMAND} -DTRGT=${_target} -DTEST=${_test}
-DDEAL_II_BINARY_DIR=${CMAKE_BINARY_DIR}
+ -DGUARD_FILE=${_step_dir}/interrupt_guard
-P ${DEAL_II_SOURCE_DIR}/cmake/scripts/run_test.cmake
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
)
CMAKE_MINIMUM_REQUIRED(VERSION 2.8.8)
INCLUDE(${DEAL_II_SOURCE_DIR}/cmake/setup_testsuite.cmake)
PROJECT(testsuite CXX)
-INCLUDE(${DEAL_II_EXECUTABLE_CONFIG})
+INCLUDE(${DEAL_II_TARGET_CONFIG})
#
# Tests for the mesh_converter executable