From: heltai Date: Wed, 23 Oct 2013 11:47:56 +0000 (+0000) Subject: Merged from trunk X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=69d4dcbc70ba9da5998f8b646605def2fd0b3474;p=dealii-svn.git Merged from trunk git-svn-id: https://svn.dealii.org/branches/branch_manifold_id@31399 0785d39b-7218-0410-832d-ea1e28bc413d --- diff --git a/deal.II/CMakeLists.txt b/deal.II/CMakeLists.txt index 239be6ea18..c5e09474bc 100644 --- a/deal.II/CMakeLists.txt +++ b/deal.II/CMakeLists.txt @@ -80,9 +80,8 @@ PROJECT(deal.II CXX) ENABLE_LANGUAGE_OPTIONAL(C) ENABLE_LANGUAGE_OPTIONAL(Fortran) -INCLUDE(setup_post_project_call) - INCLUDE(setup_deal_ii) + INCLUDE(setup_compiler_flags) # @@ -146,9 +145,7 @@ ADD_SUBDIRECTORY(cmake/config) # has to be included after source ADD_SUBDIRECTORY(contrib) # has to be included after source ADD_SUBDIRECTORY(examples) -IF(DEAL_II_HAVE_TESTS_DIRECTORY) - ADD_SUBDIRECTORY(${TEST_DIR} ${CMAKE_BINARY_DIR}/tests) -ENDIF() +ADD_SUBDIRECTORY(tests) # # And finally, print the configuration: diff --git a/deal.II/CTestConfig.cmake b/deal.II/CTestConfig.cmake new file mode 100644 index 0000000000..7d3855819d --- /dev/null +++ b/deal.II/CTestConfig.cmake @@ -0,0 +1,51 @@ +## --------------------------------------------------------------------- +## $Id$ +## +## Copyright (C) 2013 by the deal.II authors +## +## This file is part of the deal.II library. +## +## The deal.II library is free software; you can use it, redistribute +## it, and/or modify it under the terms of the GNU Lesser General +## Public License as published by the Free Software Foundation; either +## version 2.1 of the License, or (at your option) any later version. +## The full text of the license can be found in the file LICENSE at +## the top level of the deal.II distribution. +## +## --------------------------------------------------------------------- + +# +# Dashboard configuration: +# + +SET(CTEST_PROJECT_NAME "deal.II") + +SET(CTEST_DROP_METHOD "http") +SET(CTEST_DROP_SITE "cdash.kyomu.43-1.org") +SET(CTEST_DROP_LOCATION "/submit.php?project=deal.II") +SET(CTEST_DROP_SITE_CDASH TRUE) + +SET(CTEST_CUSTOM_MAXIMUM_NUMBER_OF_ERRORS 100) +SET(CTEST_CUSTOM_MAXIMUM_NUMBER_OF_WARNINGS 300) + +# number of lines to submit before an error: +SET(CTEST_CUSTOM_ERROR_PRE_CONTEXT 5) +# number of lines to submit after an error: +SET(CTEST_CUSTOM_ERROR_POST_CONTEXT 20) + +# +# Coverage options: +# + +SET(CTEST_EXTRA_COVERAGE_GLOB + # These files should have executable lines and therefore coverage: + # source/**/*.cc + ) + +SET(CTEST_CUSTOM_COVERAGE_EXCLUDE + "/bundled" + "/cmake/scripts/" + "/contrib" + "/examples" + "/tests" + ) diff --git a/deal.II/README b/deal.II/README index ba6501b3eb..3d878e4de0 100644 --- a/deal.II/README +++ b/deal.II/README @@ -15,7 +15,7 @@ For the impatient: $ make install (alternatively $ make -j install) A detailed ReadME can be found at ./doc/readme.html and - ./doc/development/cmake.html or at http://www.dealii.org/. + ./doc/users/cmake.html or at http://www.dealii.org/. Getting started: @@ -29,5 +29,5 @@ License: Further information: - For further information have a look at ./doc/readme.html or at + For further information have a look at ./doc/index.html or at http://www.dealii.org. diff --git a/deal.II/bundled/CMakeLists.txt b/deal.II/bundled/CMakeLists.txt index 897b4632e1..ff7abbe921 100644 --- a/deal.II/bundled/CMakeLists.txt +++ b/deal.II/bundled/CMakeLists.txt @@ -41,7 +41,7 @@ IF(FEATURE_BOOST_BUNDLED_CONFIGURED) ADD_SUBDIRECTORY(${BOOST_FOLDER}/libs/serialization/src) - IF( DEAL_II_WITH_THREADS AND NOT DEAL_II_CAN_USE_CXX11) + IF( DEAL_II_WITH_THREADS AND NOT DEAL_II_USE_CXX11) # # If the C++ compiler doesn't completely support the C++11 standard # (and consequently we can't use std::thread, std::mutex, etc), then diff --git a/deal.II/bundled/boost-1.49.0/include/boost/signals2/detail/foreign_ptr.hpp b/deal.II/bundled/boost-1.49.0/include/boost/signals2/detail/foreign_ptr.hpp index 2e7be17839..a6ad99cae4 100644 --- a/deal.II/bundled/boost-1.49.0/include/boost/signals2/detail/foreign_ptr.hpp +++ b/deal.II/bundled/boost-1.49.0/include/boost/signals2/detail/foreign_ptr.hpp @@ -39,7 +39,7 @@ namespace boost // should only be used by deal.II and dependent projects... // // - Maier, 2013 -#ifdef DEAL_II_CAN_USE_CXX11 +#ifdef DEAL_II_USE_CXX11 template struct weak_ptr_traits > { typedef std::shared_ptr shared_type; @@ -54,7 +54,7 @@ namespace boost typedef boost::weak_ptr weak_type; }; // as above -#ifdef DEAL_II_CAN_USE_CXX11 +#ifdef DEAL_II_USE_CXX11 template struct shared_ptr_traits > { typedef std::weak_ptr weak_type; diff --git a/deal.II/bundled/tbb41_20130401oss/src/CMakeLists.txt b/deal.II/bundled/tbb41_20130401oss/src/CMakeLists.txt index 1551dc14de..6e5c208abe 100644 --- a/deal.II/bundled/tbb41_20130401oss/src/CMakeLists.txt +++ b/deal.II/bundled/tbb41_20130401oss/src/CMakeLists.txt @@ -118,4 +118,3 @@ ELSE() DEAL_II_ADD_DEFINITIONS(obj_tbb "DO_ITT_NOTIFY") ENDIF() ENDIF() - diff --git a/deal.II/cmake/checks/check_01_cxx_features.cmake b/deal.II/cmake/checks/check_01_cxx_features.cmake index 7fcf064bfb..84a9d178a2 100644 --- a/deal.II/cmake/checks/check_01_cxx_features.cmake +++ b/deal.II/cmake/checks/check_01_cxx_features.cmake @@ -21,7 +21,7 @@ # # DEAL_II_HAVE_CXX11_FLAG # DEAL_II_CXX11_FLAG -# DEAL_II_CAN_USE_CXX11 +# DEAL_II_USE_CXX11 # HAVE_ISNAN # HAVE_UNDERSCORE_ISNAN # DEAL_II_HAVE_ISFINITE @@ -168,7 +168,7 @@ IF(DEAL_II_HAVE_CXX11_FLAG) MESSAGE(STATUS "Sufficient C++11 support. Enabling ${DEAL_II_CXX11_FLAG}.") - SET(DEAL_II_CAN_USE_CXX11 TRUE) + SET(DEAL_II_USE_CXX11 TRUE) ADD_FLAGS(CMAKE_CXX_FLAGS "${DEAL_II_CXX11_FLAG}") @@ -176,17 +176,15 @@ IF(DEAL_II_HAVE_CXX11_FLAG) MESSAGE(STATUS "Insufficient C++11 support. Disabling ${DEAL_II_CXX11_FLAG}.") ENDIF() -# -# Currently unused -# -# IF(DEAL_II_CAN_USE_CXX11) -# # -# # Also test for a couple of C++11 things that we don't use in the -# # library but that users may want to use in their applications and that -# # we might want to test in the testsuite -# # -# # TODO: Actually we have to export the test results somehow. :-] -# # + IF(DEAL_II_USE_CXX11) + CHECK_CXX_SOURCE_COMPILES( + " + #include + int main(){ std::is_trivially_copyable bob; } + " + DEAL_II_HAVE_CXX11_IS_TRIVIALLY_COPYABLE) + +# Currently unused: # # CHECK_CXX_SOURCE_COMPILES( # " @@ -212,7 +210,7 @@ IF(DEAL_II_HAVE_CXX11_FLAG) # SET(DEAL_II_CAN_USE_ADDITIONAL_CXX1X_FEATURES) # ENDIF() # -# ENDIF() + ENDIF() POP_TEST_FLAG() diff --git a/deal.II/cmake/checks/check_02_system_features.cmake b/deal.II/cmake/checks/check_02_system_features.cmake index 22e6a8a88b..7980378ed2 100644 --- a/deal.II/cmake/checks/check_02_system_features.cmake +++ b/deal.II/cmake/checks/check_02_system_features.cmake @@ -119,49 +119,22 @@ IF(CMAKE_SYSTEM_NAME MATCHES "Windows") # Shared library handling: # IF(CMAKE_CXX_COMPILER_ID MATCHES "GNU") - # # With MinGW we're lucky: - # ENABLE_IF_LINKS(DEAL_II_LINKER_FLAGS "-Wl,--export-all-symbols") ENABLE_IF_LINKS(DEAL_II_LINKER_FLAGS "-Wl,--enable-auto-import") ENABLE_IF_LINKS(DEAL_II_LINKER_FLAGS "-Wl,--allow-multiple-definition") - - # - # Workaround for a miscompilation and linkage issue with shared libraries - # with MinGW. Replacing -O0 with -O1 seems to help.. - # - REPLACE_FLAG(DEAL_II_CXX_FLAGS_DEBUG "-O0" "-O1") - ELSE() - - # # Otherwise disable shared libraries: - # MESSAGE(WARNING "\n" "BUILD_SHARED_LIBS forced to OFF\n\n" ) SET(BUILD_SHARED_LIBS OFF CACHE BOOL "" FORCE) ENDIF() - # - # Disable -ggdb and -g on Windows/MinGW targets for the moment until the - # compilation issues with too big files are resolved - # - # - Matthias Maier, 2012 - # - STRIP_FLAG(DEAL_II_CXX_FLAGS_DEBUG "-ggdb") - STRIP_FLAG(DEAL_II_LINKER_FLAGS_DEBUG "-ggdb") - STRIP_FLAG(DEAL_II_CXX_FLAGS_DEBUG "-g") - STRIP_FLAG(DEAL_II_LINKER_FLAGS_DEBUG "-g") ENDIF() -IF(CMAKE_SYSTEM_NAME MATCHES "CYGWIN") - # - # Workaround for a miscompilation and linkage issue with shared libraries - # under Cygwin. Replacing -O0 with -O1 helps. - # - # - Matthias Maier, 2013 - # - REPLACE_FLAG(DEAL_II_CXX_FLAGS_DEBUG "-O0" "-O1") +IF( CMAKE_SYSTEM_NAME MATCHES "CYGWIN" + OR CMAKE_SYSTEM_NAME MATCHES "Windows" ) + # TODO: Bailout if current compiler is not gcc-4.8.1 or newer ENDIF() diff --git a/deal.II/cmake/checks/check_03_compiler_bugs.cmake b/deal.II/cmake/checks/check_03_compiler_bugs.cmake index bb63ba0740..f2cc702cb0 100644 --- a/deal.II/cmake/checks/check_03_compiler_bugs.cmake +++ b/deal.II/cmake/checks/check_03_compiler_bugs.cmake @@ -320,6 +320,21 @@ IF(DEAL_II_ICC_NUMERICLIMITS_BUG) ) STRIP_FLAG(CMAKE_CXX_FLAGS "${DEAL_II_CXX11_FLAG}") SET(DEAL_II_CAN_USE_CXX1X FALSE) - SET(DEAL_II_CAN_USE_CXX11 FALSE) + SET(DEAL_II_USE_CXX11 FALSE) ENDIF() +# +# gcc-4.8.1 has some problems with the constexpr "vertices_per_cell" in the +# definition of alternating_form_at_vertices. +# +# TODO: Write a unit test. +# +# For now, just enable the workaround for Windows targets +# +# - Matthias Maier, 2013 +# + +IF( CMAKE_SYSTEM_NAME MATCHES "CYGWIN" + OR CMAKE_SYSTEM_NAME MATCHES "Windows" ) + SET(DEAL_II_CONSTEXPR_BUG TRUE) +ENDIF() diff --git a/deal.II/cmake/config/CMakeLists.txt b/deal.II/cmake/config/CMakeLists.txt index d640217ef4..a84bc40978 100644 --- a/deal.II/cmake/config/CMakeLists.txt +++ b/deal.II/cmake/config/CMakeLists.txt @@ -22,7 +22,7 @@ # deal.IIVersionConfig.cmake # # and copies it (a) to the build directory and (b) prepares it for later -# installation (the copy in CMAKE_CURRENT_BINARY_DIR). +# installation. # # @@ -335,18 +335,23 @@ ENDIF() # # Finally, add a target to create the "binary" file in -# ${DEAL_II_PROJECT_CONFIG_RELDIR} and add it to the library target: +# ${DEAL_II_PROJECT_CONFIG_RELDIR} and add it to the "all" target: # ADD_CUSTOM_TARGET(setup_build_dir ALL COMMAND ${CMAKE_COMMAND} -E touch ${CMAKE_BINARY_DIR}/${DEAL_II_PROJECT_CONFIG_RELDIR}/binary COMMAND ${CMAKE_COMMAND} -E touch ${CMAKE_BINARY_DIR}/${DEAL_II_PROJECT_CONFIG_RELDIR}/${DEAL_II_PROJECT_CONFIG_NAME}Config.cmake + COMMENT "Update build directory" ) -ADD_DEPENDENCIES(library setup_build_dir) +FOREACH(_build ${DEAL_II_BUILD_TYPES}) + ADD_DEPENDENCIES(setup_build_dir ${DEAL_II_BASE_NAME}${DEAL_II_${_build}_SUFFIX}) +ENDFOREACH() # # And a script to remove it upon installation from the install prefix: +# This is necessary if somebody wants to install into the build directory +# (yes this is a valid use case...). # INSTALL(CODE " diff --git a/deal.II/cmake/config/Config.cmake.in b/deal.II/cmake/config/Config.cmake.in index 53417f2bdf..8d5cd1a74d 100644 --- a/deal.II/cmake/config/Config.cmake.in +++ b/deal.II/cmake/config/Config.cmake.in @@ -173,6 +173,7 @@ SET(DEAL_II_LIBRARIES "@CONFIG_LIBRARIES@") # IF(DEAL_II_BUILD_DIR) + SET(DEAL_II_EXECUTABLE_CONFIG "${DEAL_II_PATH}/${DEAL_II_PROJECT_CONFIG_RELDIR}/${DEAL_II_PROJECT_CONFIG_NAME}Executables.cmake") SET(DEAL_II_TARGET_CONFIG "${DEAL_II_PATH}/${DEAL_II_PROJECT_CONFIG_RELDIR}/${DEAL_II_PROJECT_CONFIG_NAME}BuildTargets.cmake") ELSE() SET(DEAL_II_TARGET_CONFIG "${DEAL_II_PATH}/${DEAL_II_PROJECT_CONFIG_RELDIR}/${DEAL_II_PROJECT_CONFIG_NAME}Targets.cmake") diff --git a/deal.II/cmake/configure/configure_1_mpi.cmake b/deal.II/cmake/configure/configure_1_mpi.cmake index 5140848840..cd92d2707c 100644 --- a/deal.II/cmake/configure/configure_1_mpi.cmake +++ b/deal.II/cmake/configure/configure_1_mpi.cmake @@ -54,23 +54,21 @@ MACRO(FEATURE_MPI_FIND_EXTERNAL var) ENDIF() FIND_PACKAGE(MPI) - IF(NOT MPI_CXX_FOUND) + IF(NOT MPI_CXX_FOUND AND DEAL_II_WITH_MPI) # # CMAKE_CXX_COMPILER is apparently not an mpi wrapper. - # So, let's be a bit more aggressive in finding MPI if DEAL_II_WITH_MPI - # is set. + # So, let's be a bit more aggressive in finding MPI (and if + # DEAL_II_WITH_MPI is set). # - IF(DEAL_II_WITH_MPI) - MESSAGE(STATUS - "MPI not found but DEAL_II_WITH_MPI is set to TRUE." - " Try again with more aggressive search paths:" - ) - SET(MPI_FOUND) # clear this value so that FIND_PACKAGE runs again. - UNSET(MPI_CXX_COMPILER CACHE) - UNSET(MPI_C_COMPILER CACHE) - UNSET(MPI_Fortran_COMPILER CACHE) - FIND_PACKAGE(MPI) - ENDIF() + MESSAGE(STATUS + "MPI not found but DEAL_II_WITH_MPI is set to TRUE." + " Try again with more aggressive search paths:" + ) + SET(MPI_FOUND) # clear this value so that FIND_PACKAGE runs again. + UNSET(MPI_CXX_COMPILER CACHE) + UNSET(MPI_C_COMPILER CACHE) + UNSET(MPI_Fortran_COMPILER CACHE) + FIND_PACKAGE(MPI) ENDIF() # @@ -82,12 +80,65 @@ MACRO(FEATURE_MPI_FIND_EXTERNAL var) ENDIF() ENDFOREACH() - # Hide some variables: - MARK_AS_ADVANCED(MPI_EXTRA_LIBRARY MPI_LIBRARY) - IF(MPI_CXX_FOUND) + # + # Manually assemble some version information: + # + FIND_FILE(MPI_MPI_H NAMES mpi.h + HINTS ${MPI_INCLUDE_PATH} + ) + + IF(NOT MPI_MPI_H MATCHES "-NOTFOUND" AND NOT DEFINED MPI_VERSION) + FILE(STRINGS "${MPI_MPI_H}" MPI_VERSION_MAJOR_STRING + REGEX "#define.*MPI_VERSION") + STRING(REGEX REPLACE "^.*MPI_VERSION.*([0-9]+).*" "\\1" + MPI_VERSION_MAJOR "${MPI_VERSION_MAJOR_STRING}" + ) + FILE(STRINGS ${MPI_MPI_H} MPI_VERSION_MINOR_STRING + REGEX "#define.*MPI_SUBVERSION") + STRING(REGEX REPLACE "^.*MPI_SUBVERSION.*([0-9]+).*" "\\1" + MPI_VERSION_MINOR "${MPI_VERSION_MINOR_STRING}" + ) + SET(MPI_VERSION "${MPI_VERSION_MAJOR}.${MPI_VERSION_MINOR}") + IF("${MPI_VERSION}" STREQUAL ".") + SET(MPI_VERSION) + SET(MPI_VERSION_MAJOR) + SET(MPI_VERSION_MINOR) + ENDIF() + + # OMPI specific version number: + FILE(STRINGS ${MPI_MPI_H} OMPI_VERSION_MAJOR_STRING + REGEX "#define.*OMPI_MAJOR_VERSION") + STRING(REGEX REPLACE "^.*OMPI_MAJOR_VERSION.*([0-9]+).*" "\\1" + OMPI_VERSION_MAJOR "${OMPI_VERSION_MAJOR_STRING}" + ) + FILE(STRINGS ${MPI_MPI_H} OMPI_VERSION_MINOR_STRING + REGEX "#define.*OMPI_MINOR_VERSION") + STRING(REGEX REPLACE "^.*OMPI_MINOR_VERSION.*([0-9]+).*" "\\1" + OMPI_VERSION_MINOR "${OMPI_VERSION_MINOR_STRING}" + ) + FILE(STRINGS ${MPI_MPI_H} OMPI_VERSION_RELEASE_STRING + REGEX "#define.*OMPI_RELEASE_VERSION") + STRING(REGEX REPLACE "^.*OMPI_RELEASE_VERSION.*([0-9]+).*" "\\1" + OMPI_VERSION_SUBMINOR "${OMPI_VERSION_RELEASE_STRING}" + ) + SET(OMPI_VERSION + "${OMPI_VERSION_MAJOR}.${OMPI_VERSION_MINOR}.${OMPI_VERSION_SUBMINOR}" + ) + IF("${OMPI_VERSION}" STREQUAL "..") + SET(OMPI_VERSION) + SET(OMPI_VERSION_MAJOR) + SET(OMPI_VERSION_MINOR) + SET(OMPI_VERSION_SUBMINOR) + ENDIF() + ENDIF() + SET(${var} TRUE) ENDIF() + + # Hide some variables: + MARK_AS_ADVANCED(MPI_EXTRA_LIBRARY MPI_LIBRARY MPI_MPI_H) + ENDMACRO() diff --git a/deal.II/cmake/configure/configure_1_threads.cmake b/deal.II/cmake/configure/configure_1_threads.cmake index e6ddef4076..52e16e0ab8 100644 --- a/deal.II/cmake/configure/configure_1_threads.cmake +++ b/deal.II/cmake/configure/configure_1_threads.cmake @@ -22,7 +22,7 @@ # -# Set up genereal threading: +# Set up general threading: # The macro will be included in CONFIGURE_FEATURE_THREADS_EXTERNAL/BUNDLED. # MACRO(SETUP_THREADING) @@ -164,6 +164,16 @@ MACRO(FEATURE_THREADS_CONFIGURE_EXTERNAL) ENDIF() + # + # Workaround for an issue with C++11 mode, non gcc-compilers and missing + # template std::ist_trivially_copyable + # + IF( DEAL_II_USE_CXX11 AND + NOT DEAL_II_HAVE_CXX11_IS_TRIVIALLY_COPYABLE AND + NOT CMAKE_CXX_COMPILER_ID MATCHES "GNU" ) + LIST(APPEND DEAL_II_DEFINITIONS "TBB_IMPLEMENT_CPP0X=1") + LIST(APPEND DEAL_II_USER_DEFINITIONS "TBB_IMPLEMENT_CPP0X=1") + ENDIF() SETUP_THREADING() ENDMACRO() @@ -190,6 +200,17 @@ MACRO(FEATURE_THREADS_CONFIGURE_BUNDLED) ) ENDIF() + # + # Workaround for an issue with C++11 mode, non gcc-compilers and missing + # template std::ist_trivially_copyable + # + IF( DEAL_II_USE_CXX11 AND + NOT DEAL_II_HAVE_CXX11_IS_TRIVIALLY_COPYABLE AND + NOT CMAKE_CXX_COMPILER_ID MATCHES "GNU" ) + LIST(APPEND DEAL_II_DEFINITIONS "TBB_IMPLEMENT_CPP0X=1") + LIST(APPEND DEAL_II_USER_DEFINITIONS "TBB_IMPLEMENT_CPP0X=1") + ENDIF() + # # tbb uses dlopen/dlclose, so link against libdl.so as well: # diff --git a/deal.II/cmake/configure/configure_2_trilinos.cmake b/deal.II/cmake/configure/configure_2_trilinos.cmake index ac15b34051..6693073042 100644 --- a/deal.II/cmake/configure/configure_2_trilinos.cmake +++ b/deal.II/cmake/configure/configure_2_trilinos.cmake @@ -160,7 +160,7 @@ MACRO(FEATURE_TRILINOS_FIND_EXTERNAL var) # with the -std=c++0x flag of GCC, see deal.II FAQ. # Test whether that is indeed the case # - IF(DEAL_II_CAN_USE_CXX11 AND NOT TRILINOS_SUPPORTS_CPP11) + IF(DEAL_II_USE_CXX11 AND NOT TRILINOS_SUPPORTS_CPP11) IF(TRILINOS_HAS_C99_TR1_WORKAROUND) LIST(APPEND DEAL_II_DEFINITIONS "HAS_C99_TR1_CMATH") diff --git a/deal.II/cmake/configure/configure_p4est.cmake b/deal.II/cmake/configure/configure_p4est.cmake index 9780127bc6..de91ef6ea1 100644 --- a/deal.II/cmake/configure/configure_p4est.cmake +++ b/deal.II/cmake/configure/configure_p4est.cmake @@ -24,6 +24,22 @@ MACRO(FEATURE_P4EST_FIND_EXTERNAL var) FIND_PACKAGE(P4EST) IF(P4EST_FOUND) + SET(${var} TRUE) + + # + # We require at least version 0.3.4.1 + # + IF(P4EST_VERSION VERSION_LESS "0.3.4.1") + MESSAGE(STATUS "Insufficient p4est installation found: " + "At least version 0.3.4.1 is required." + ) + SET(P4EST_ADDITIONAL_ERROR_STRING + "Insufficient p4est installation found!\n" + "At least version 0.3.4.1 is required.\n" + ) + SET(${var} FALSE) + ENDIF() + # # Check whether p4est supports mpi: # @@ -32,10 +48,17 @@ MACRO(FEATURE_P4EST_FIND_EXTERNAL var) "p4est has to be configured with MPI enabled." ) SET(P4EST_ADDITIONAL_ERROR_STRING + ${P4EST_ADDITIONAL_ERROR_STRING} "Insufficient p4est installation found!\n" "p4est has to be configured with MPI enabled.\n" ) + SET(${var} FALSE) + ENDIF() + # + # Reset configuration: + # + IF(NOT ${var}) UNSET(P4EST_LIBRARY_OPTIMIZED CACHE) UNSET(P4EST_LIBRARY_DEBUG CACHE) UNSET(P4EST_INCLUDE_DIR CACHE) @@ -46,12 +69,12 @@ MACRO(FEATURE_P4EST_FIND_EXTERNAL var) "An optional hint to a p4est installation/directory" ) MARK_AS_ADVANCED(CLEAR P4EST_DIR) - ELSE() - SET(${var} TRUE) ENDIF() + ENDIF() ENDMACRO() + MACRO(FEATURE_P4EST_CONFIGURE_EXTERNAL) INCLUDE_DIRECTORIES(${P4EST_INCLUDE_DIRS}) diff --git a/deal.II/cmake/configure/configure_zlib.cmake b/deal.II/cmake/configure/configure_zlib.cmake index 250210beac..772f66acbc 100644 --- a/deal.II/cmake/configure/configure_zlib.cmake +++ b/deal.II/cmake/configure/configure_zlib.cmake @@ -19,3 +19,8 @@ # CONFIGURE_FEATURE(ZLIB) + +# Export ZLIB_VERSION: +IF(DEFINED ZLIB_VERSION_STRING) + SET(ZLIB_VERSION ${ZLIB_VERSION_STRING}) +ENDIF() diff --git a/deal.II/cmake/macros/macro_add_test.cmake b/deal.II/cmake/macros/macro_add_test.cmake new file mode 100644 index 0000000000..0bda009a4c --- /dev/null +++ b/deal.II/cmake/macros/macro_add_test.cmake @@ -0,0 +1,249 @@ +## --------------------------------------------------------------------- +## $Id$ +## +## Copyright (C) 2013 by the deal.II authors +## +## This file is part of the deal.II library. +## +## The deal.II library is free software; you can use it, redistribute +## it, and/or modify it under the terms of the GNU Lesser General +## Public License as published by the Free Software Foundation; either +## version 2.1 of the License, or (at your option) any later version. +## The full text of the license can be found in the file LICENSE at +## the top level of the deal.II distribution. +## +## --------------------------------------------------------------------- + +# +# A Macro to set up tests for the testsuite +# +# The following variables must be set: +# +# TEST_DIFF +# - specifying the executable and command line of the diff command to use +# +# TEST_TIME_LIMIT +# - specifying the maximal wall clock time in seconds a test is allowed +# to run +# +# +# Usage: +# DEAL_II_ADD_TEST(category test_name comparison_file [ARGN]) +# +# This macro assumes that a source file "./tests/category/.cc" +# as well as the comparison file "./tests/category/" is +# available in the testsuite. The output of compiled source file is +# compared against the file comparison file. +# +# [ARGN] is an optional list of additional output lines passed down to the +# run_test.cmake script and printed at the beginning of the test output. +# +# This macro gets the following options from the comparison file name (have +# a look at the testsuite documentation for details): +# - usage of mpirun and number of simultaneous processes +# - valid build configurations +# - expected test stage +# + +MACRO(DEAL_II_ADD_TEST _category _test_name _comparison_file) + + IF(NOT DEAL_II_PROJECT_CONFIG_INCLUDED) + MESSAGE(FATAL_ERROR + "\nDEAL_II_ADD_TEST can only be called in external test subprojects after " + "the inclusion of deal.IIConfig.cmake. It is not intended for " + "internal use.\n\n" + ) + ENDIF() + + GET_FILENAME_COMPONENT(_file ${_comparison_file} NAME) + + # + # Determine valid build configurations for this test: + # + + SET(_configuration) + IF(_file MATCHES "debug") + SET(_configuration DEBUG) + ELSEIF(_file MATCHES "release") + SET(_configuration RELEASE) + ENDIF() + + # + # Determine whether the test should be run with mpirun: + # + + STRING(REGEX MATCH "mpirun=([0-9]*)" _n_cpu ${_file}) + IF("${_n_cpu}" STREQUAL "") + SET(_n_cpu 0) # 0 indicates that no mpirun should be used + ELSE() + STRING(REGEX REPLACE "^mpirun=([0-9]*)$" "\\1" _n_cpu ${_n_cpu}) + ENDIF() + + # + # Determine the expected build stage of this test: + # + + STRING(REGEX MATCH "expect=([a-z]*)" _expect ${_file}) + IF("${_expect}" STREQUAL "") + SET(_expect "PASSED") + ELSE() + STRING(REGEX REPLACE "^expect=([a-z]*)$" "\\1" _expect ${_expect}) + STRING(TOUPPER ${_expect} _expect) + ENDIF() + + + FOREACH(_build ${DEAL_II_BUILD_TYPES}) + + ITEM_MATCHES(_match "${_build}" ${_configuration}) + IF(_match OR "${_configuration}" STREQUAL "") + + # + # Setup a bunch of variables describing the test: + # + STRING(TOLOWER ${_build} _build_lowercase) + SET(_target ${_test_name}.${_build_lowercase}) # target name + + # If _n_cpu is equal to "0", a normal, sequental test will be run, + # otherwise run the test with mpirun: + IF("${_n_cpu}" STREQUAL "0") + + SET(_diff_target ${_target}.diff) # diff target name + SET(_test_full ${_category}/${_test_name}.${_build_lowercase}) # full test name + SET(_test_directory ${CMAKE_CURRENT_BINARY_DIR}/${_target}) # directory to run the test in + SET(_run_command ${_target}) # the command to issue + + ELSE() + + SET(_diff_target ${_test_name}.mpirun${_n_cpu}.${_build_lowercase}.diff) # diff target name + SET(_test_full ${_category}/${_test_name}.mpirun=${_n_cpu}.${_build_lowercase}) # full test name + SET(_test_directory ${CMAKE_CURRENT_BINARY_DIR}/${_target}/mpirun=${_n_cpu}) # directory to run the test in + SET(_run_command mpirun -np ${_n_cpu} ${CMAKE_CURRENT_BINARY_DIR}/${_target}/${_target}) # the command to issue + + ENDIF() + + FILE(MAKE_DIRECTORY ${_test_directory}) + + # + # Add an executable for the current test and set up compile + # definitions and the full link interface: + # + IF(NOT TARGET ${_target}) + # only add the target once + + ADD_EXECUTABLE(${_target} EXCLUDE_FROM_ALL ${_test_name}.cc) + + SET_TARGET_PROPERTIES(${_target} PROPERTIES + LINK_FLAGS "${DEAL_II_LINKER_FLAGS} ${DEAL_II_LINKER_FLAGS_${_build}}" + COMPILE_DEFINITIONS "${DEAL_II_USER_DEFINITIONS};${DEAL_II_USER_DEFINITIONS_${_build}}" + COMPILE_FLAGS "${DEAL_II_CXX_FLAGS} ${DEAL_II_CXX_FLAGS_${_build}}" + LINKER_LANGUAGE "CXX" + RUNTIME_OUTPUT_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/${_target}" + ) + SET_PROPERTY(TARGET ${_target} APPEND PROPERTY + INCLUDE_DIRECTORIES "${DEAL_II_INCLUDE_DIRS}" + ) + SET_PROPERTY(TARGET ${_target} APPEND PROPERTY + COMPILE_DEFINITIONS + SOURCE_DIR="${CMAKE_CURRENT_SOURCE_DIR}" + ) + TARGET_LINK_LIBRARIES(${_target} ${DEAL_II_TARGET_${_build}}) + ENDIF() + + # + # Add a top level target to run and compare the test: + # + + ADD_CUSTOM_COMMAND(OUTPUT ${_test_directory}/output + COMMAND rm -f ${_test_directory}/failing_output + COMMAND touch ${_test_directory}/output + COMMAND + ${_run_command} + || (mv ${_test_directory}/output + ${_test_directory}/failing_output + && echo "${_test_full}: BUILD successful." + && echo "${_test_full}: RUN failed. ------ Result: ${_test_directory}/failing_output" + && echo "${_test_full}: RUN failed. ------ Partial output:" + && cat ${_test_directory}/failing_output + && exit 1) + COMMAND + ${PERL_EXECUTABLE} -pi ${DEAL_II_SOURCE_DIR}/cmake/scripts/normalize.pl + ${_test_directory}/output + WORKING_DIRECTORY + ${_test_directory} + DEPENDS + ${_target} + ${DEAL_II_SOURCE_DIR}/cmake/scripts/normalize.pl + ) + ADD_CUSTOM_COMMAND(OUTPUT ${_test_directory}/diff + COMMAND rm -f ${_test_directory}/failing_diff + COMMAND touch ${_test_directory}/diff + COMMAND + ${TEST_DIFF} + ${_test_directory}/output + ${_comparison_file} + > ${_test_directory}/diff + || (mv ${_test_directory}/diff + ${_test_directory}/failing_diff + && echo "${_test_full}: BUILD successful." + && echo "${_test_full}: RUN successful." + && echo "${_test_full}: DIFF failed. ------ Source: ${_comparison_file}" + && echo "${_test_full}: DIFF failed. ------ Result: ${_test_directory}/output" + && echo "${_test_full}: DIFF failed. ------ Diff: ${_test_directory}/failing_diff" + && echo "${_test_full}: DIFF failed. ------ Diffs as follows:" + && cat ${_test_directory}/failing_diff + && exit 1) + WORKING_DIRECTORY + ${_test_directory} + DEPENDS + ${_test_directory}/output + ${_comparison_file} + ) + + ADD_CUSTOM_TARGET(${_diff_target} DEPENDS ${_test_directory}/diff + COMMAND + echo "${_test_full}: BUILD successful." + && echo "${_test_full}: RUN successful." + && echo "${_test_full}: DIFF successful." + && echo "${_test_full}: PASSED." + ) + + # + # And finally add the test: + # + + ADD_TEST(NAME ${_test_full} + COMMAND ${CMAKE_COMMAND} + -DTRGT=${_diff_target} + -DTEST=${_test_full} + -DEXPECT=${_expect} + -DADDITIONAL_OUTPUT=${ARGN} + -DDEAL_II_BINARY_DIR=${CMAKE_BINARY_DIR} + -P ${DEAL_II_SOURCE_DIR}/cmake/scripts/run_test.cmake + WORKING_DIRECTORY ${_test_directory} + ) + SET_TESTS_PROPERTIES(${_test_full} PROPERTIES + LABEL "${_category}" + TIMEOUT ${TEST_TIME_LIMIT} + ) + + # + # We have to be careful not to run different mpirun settings for the + # same executable in parallel because this triggers a race condition + # when compiling the not yet existent executable that is shared + # between the different tests. + # + # Luckily CMake has a mechanism to force a test to be run when + # another has finished (and both are scheduled): + # + IF(NOT "${_n_cpu}" STREQUAL "0") + IF(DEFINED TEST_DEPENDENCIES_${_target}) + SET_TESTS_PROPERTIES(${_test_full} PROPERTIES + DEPENDS ${TEST_DEPENDENCIES_${_target}} + ) + ENDIF() + SET(TEST_DEPENDENCIES_${_target} ${_test_full}) + ENDIF() + + ENDIF() + ENDFOREACH() +ENDMACRO() diff --git a/deal.II/cmake/macros/macro_deal_ii_insource_setup_target.cmake b/deal.II/cmake/macros/macro_deal_ii_insource_setup_target.cmake new file mode 100644 index 0000000000..daa070b2ae --- /dev/null +++ b/deal.II/cmake/macros/macro_deal_ii_insource_setup_target.cmake @@ -0,0 +1,51 @@ +## --------------------------------------------------------------------- +## $Id$ +## +## Copyright (C) 2012 - 2013 by the deal.II authors +## +## This file is part of the deal.II library. +## +## The deal.II library is free software; you can use it, redistribute +## it, and/or modify it under the terms of the GNU Lesser General +## Public License as published by the Free Software Foundation; either +## version 2.1 of the License, or (at your option) any later version. +## The full text of the license can be found in the file LICENSE at +## the top level of the deal.II distribution. +## +## --------------------------------------------------------------------- + +# +# This file provides an insource version of the DEAL_II_SETUP_TARGET macro. +# +# Usage: +# DEAL_II_INSOURCE_SETUP_TARGET(target build) +# +# This appends necessary include directories, linker flags, compile +# definitions and the deal.II library link interface to the given target. +# +# + +MACRO(DEAL_II_INSOURCE_SETUP_TARGET _target _build) + + SET_TARGET_PROPERTIES(${_target} PROPERTIES + LINK_FLAGS "${DEAL_II_LINKER_FLAGS} ${DEAL_II_LINKER_FLAGS_${_build}}" + COMPILE_DEFINITIONS "${DEAL_II_DEFINITIONS};${DEAL_II_DEFINITIONS_${_build}}" + COMPILE_FLAGS "${DEAL_II_CXX_FLAGS_${_build}}" + LINKER_LANGUAGE "CXX" + RUNTIME_OUTPUT_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/${_test_short}" + ) + SET_PROPERTY(TARGET ${_target} APPEND PROPERTY + INCLUDE_DIRECTORIES + "${CMAKE_BINARY_DIR}/include" + "${CMAKE_SOURCE_DIR}/include" + "${CMAKE_SOURCE_DIR}/include/deal.II/" + ) + +GET_PROPERTY(_type TARGET ${_target} PROPERTY TYPE) +IF(NOT "${_type}" STREQUAL "OBJECT_LIBRARY") + TARGET_LINK_LIBRARIES(${_target} + ${DEAL_II_BASE_NAME}${DEAL_II_${_build}_SUFFIX} + ) +ENDIF() + +ENDMACRO() diff --git a/deal.II/cmake/macros/macro_deal_ii_setup_target.cmake b/deal.II/cmake/macros/macro_deal_ii_setup_target.cmake index 28b88dd1d0..df0a3a1387 100644 --- a/deal.II/cmake/macros/macro_deal_ii_setup_target.cmake +++ b/deal.II/cmake/macros/macro_deal_ii_setup_target.cmake @@ -121,7 +121,10 @@ MACRO(DEAL_II_SETUP_TARGET _target) # # Set up the link interface: # - TARGET_LINK_LIBRARIES(${_target} ${DEAL_II_TARGET}) + GET_PROPERTY(_type TARGET ${_target} PROPERTY TYPE) + IF(NOT "${_type}" STREQUAL "OBJECT_LIBRARY") + TARGET_LINK_LIBRARIES(${_target} ${DEAL_II_TARGET}) + ENDIF() # # If DEAL_II_STATIC_EXECUTABLE is set, switch the final link type to diff --git a/deal.II/cmake/macros/macro_pickup_tests.cmake b/deal.II/cmake/macros/macro_pickup_tests.cmake new file mode 100644 index 0000000000..954c615d17 --- /dev/null +++ b/deal.II/cmake/macros/macro_pickup_tests.cmake @@ -0,0 +1,94 @@ +## --------------------------------------------------------------------- +## $Id$ +## +## Copyright (C) 2013 by the deal.II authors +## +## This file is part of the deal.II library. +## +## The deal.II library is free software; you can use it, redistribute +## it, and/or modify it under the terms of the GNU Lesser General +## Public License as published by the Free Software Foundation; either +## version 2.1 of the License, or (at your option) any later version. +## The full text of the license can be found in the file LICENSE at +## the top level of the deal.II distribution. +## +## --------------------------------------------------------------------- + +# +# A macro to pick up all tests in a test subdirectory +# +# If TEST_PICKUP_REGEX is set, only tests matching the regex will be +# processed. +# +# If TEST_OVERRIDE_LOCATION is set, a comparison file category/test.output +# will be substituted by ${TEST_OVERRIDE_LOCATION}/category/test.output if +# the latter exists. +# +# Usage: +# DEAL_II_PICKUP_TESTS() +# + +MACRO(DEAL_II_PICKUP_TESTS) + SET_IF_EMPTY(TEST_PICKUP_REGEX "$ENV{TEST_PICKUP_REGEX}") + + GET_FILENAME_COMPONENT(_category ${CMAKE_CURRENT_SOURCE_DIR} NAME) + + SET(DEAL_II_SOURCE_DIR) # avoid a bogus warning + + FILE(GLOB _tests "*.output") + FOREACH(_test ${_tests}) + SET(_comparison ${_test}) + GET_FILENAME_COMPONENT(_test ${_test} NAME) + + # + # Respect TEST_PICKUP_REGEX: + # + + IF( "${TEST_PICKUP_REGEX}" STREQUAL "" OR + "${_category}/${_test}" MATCHES "${TEST_PICKUP_REGEX}" ) + SET(_define_test TRUE) + ELSE() + SET(_define_test FALSE) + ENDIF() + + # + # Query configuration and check whether we support it. Otherwise + # set _define_test to FALSE: + # + + STRING(REGEX MATCHALL + "with_([0-9]|[a-z]|_)*=(on|off|yes|no|true|false)" _matches ${_test} + ) + FOREACH(_match ${_matches}) + STRING(REGEX REPLACE + "^(with_([0-9]|[a-z]|_)*)=(on|off|yes|no|true|false)$" "\\1" + _feature ${_match} + ) + STRING(TOUPPER ${_feature} _feature) + STRING(REGEX MATCH "(on|off|yes|no|true|false)$" _boolean ${_match}) + + IF( (DEAL_II_${_feature} AND NOT ${_boolean}) OR + (NOT DEAL_II_${_feature} AND ${_boolean}) ) + SET(_define_test FALSE) + ENDIF() + ENDFOREACH() + + # + # Respect TEST_OVERRIDE_LOCATION: + # + + SET(_add_output) + IF(EXISTS ${TEST_OVERRIDE_LOCATION}/${_category}/${_test}) + SET(_add_output + "!!NOTE!! Comparison file overriden by ${TEST_OVERRIDE_LOCATION}/${_category}/${_test}" + ) + SET(_comparison "${TEST_OVERRIDE_LOCATION}/${_category}/${_test}") + ENDIF() + + IF(_define_test) + STRING(REGEX REPLACE "\\..*" "" _test ${_test}) + DEAL_II_ADD_TEST(${_category} ${_test} ${_comparison} ${_add_output}) + ENDIF() + + ENDFOREACH() +ENDMACRO() diff --git a/deal.II/cmake/macros/macro_set_if_empty.cmake b/deal.II/cmake/macros/macro_set_if_empty.cmake index 43f0125445..35acf55729 100644 --- a/deal.II/cmake/macros/macro_set_if_empty.cmake +++ b/deal.II/cmake/macros/macro_set_if_empty.cmake @@ -17,9 +17,9 @@ # # If 'variable' is empty it will be set to 'value' # -MACRO(SET_IF_EMPTY _variable _value) +MACRO(SET_IF_EMPTY _variable) IF("${${_variable}}" STREQUAL "") - SET(${_variable} ${_value}) + SET(${_variable} ${ARGN}) ENDIF() ENDMACRO() diff --git a/deal.II/cmake/modules/FindDEALII_LAPACK.cmake b/deal.II/cmake/modules/FindDEALII_LAPACK.cmake index b1c7630265..be470cec36 100644 --- a/deal.II/cmake/modules/FindDEALII_LAPACK.cmake +++ b/deal.II/cmake/modules/FindDEALII_LAPACK.cmake @@ -88,18 +88,23 @@ IF(LAPACK_FOUND) # Well, in case of static archives we have to manually pick up the # complete link interface. *sigh* # - # Do this unconditionally for the most common case: - # TODO: Non-GNU setups... + # If CMAKE_Fortran_IMPLICIT_LINK_LIBRARIES is not available, do it + # unconditionally for the most common case (gfortran). # # Switch the library preference back to prefer dynamic libraries if # DEAL_II_PREFER_STATIC_LIBS=TRUE but DEAL_II_STATIC_EXECUTABLE=FALSE. In # this case system libraries should be linked dynamically. # + SET(_fortran_libs ${CMAKE_Fortran_IMPLICIT_LINK_LIBRARIES}) + SET_IF_EMPTY(_fortran_libs gfortran m quadmath c) + SWITCH_LIBRARY_PREFERENCE() - FOREACH(_lib gfortran m quadmath) + FOREACH(_lib ${_fortran_libs}) FIND_LIBRARY(${_lib}_LIBRARY NAMES ${_lib} - HINTS ${CMAKE_CXX_IMPLICIT_LINK_DIRECTORIES}) + HINTS + ${CMAKE_Fortran_IMPLICIT_LINK_DIRECTORIES} + ${CMAKE_CXX_IMPLICIT_LINK_DIRECTORIES}) MARK_AS_ADVANCED(${_lib}_LIBRARY) IF(NOT ${_lib}_LIBRARY MATCHES "-NOTFOUND") diff --git a/deal.II/cmake/modules/FindMETIS.cmake b/deal.II/cmake/modules/FindMETIS.cmake index 29c9be531f..005905aecc 100644 --- a/deal.II/cmake/modules/FindMETIS.cmake +++ b/deal.II/cmake/modules/FindMETIS.cmake @@ -78,7 +78,14 @@ MARK_AS_ADVANCED( IF(METIS_FOUND) - IF(NOT PARMETIS_LIBRARY MATCHES "-NOTFOUND") + # + # Sanity check: Only include parmetis library if it is in the same + # directory as the metis library... + # + GET_FILENAME_COMPONENT(_path1 "${PARMETIS_LIBRARY}" PATH) + GET_FILENAME_COMPONENT(_path2 "${ETIS_LIBRARY}" PATH) + IF( NOT PARMETIS_LIBRARY MATCHES "-NOTFOUND" + AND "${_path1}" STREQUAL "${_path2}" ) SET(METIS_LIBRARIES ${PARMETIS_LIBRARY}) ENDIF() diff --git a/deal.II/cmake/modules/FindMUMPS.cmake b/deal.II/cmake/modules/FindMUMPS.cmake index 24e48975de..6099238908 100644 --- a/deal.II/cmake/modules/FindMUMPS.cmake +++ b/deal.II/cmake/modules/FindMUMPS.cmake @@ -22,6 +22,10 @@ # MUMPS_INCLUDE_DIRS # MUMPS_LIBRARIES # MUMPS_LINKER_FLAGS +# MUMPS_VERSION +# MUMPS_VERSION_MAJOR +# MUMPS_VERSION_MINOR +# MUMPS_VERSION_SUBMINOR # SET_IF_EMPTY(MUMPS_DIR "$ENV{MUMPS_DIR}") @@ -72,6 +76,23 @@ IF(PORD_LIBRARY MATCHES "-NOTFOUND") UNSET(PORD_LIBRARY CACHE) ENDIF() +IF(EXISTS ${MUMPS_INCLUDE_DIR}/dmumps_c.h) + FILE(STRINGS "${MUMPS_INCLUDE_DIR}/dmumps_c.h" MUMPS_VERSION_STRING + REGEX "#define.*MUMPS_VERSION") + STRING(REGEX REPLACE "^.*MUMPS_VERSION.*\"(.+)\".*" "\\1" + MUMPS_VERSION "${MUMPS_VERSION_STRING}" + ) + STRING(REGEX REPLACE + "([0-9]+)\\..*" "\\1" MUMPS_VERSION_MAJOR "${MUMPS_VERSION}" + ) + STRING(REGEX REPLACE + "^[0-9]+\\.([0-9]+).*" "\\1" MUMPS_VERSION_MINOR "${MUMPS_VERSION}" + ) + STRING(REGEX REPLACE + "^[0-9]+\\.[0-9]+\\.([0-9]+).*" "\\1" MUMPS_VERSION_SUBMINOR "${MUMPS_VERSION}" + ) +ENDIF() + SET(_output ${DMUMPS_LIBRARY} ${MUMPS_COMMON_LIBRARY} ${PORD_LIBRARY}) FIND_PACKAGE_HANDLE_STANDARD_ARGS(MUMPS DEFAULT_MSG _output # Cosmetic: Gives nice output diff --git a/deal.II/cmake/modules/FindTBB.cmake b/deal.II/cmake/modules/FindTBB.cmake index 4c45373cec..82f407417e 100644 --- a/deal.II/cmake/modules/FindTBB.cmake +++ b/deal.II/cmake/modules/FindTBB.cmake @@ -22,13 +22,16 @@ # TBB_LIBRARIES # TBB_INCLUDE_DIRS # TBB_WITH_DEBUGLIB +# TBB_VERSION +# TBB_VERSION_MAJOR +# TBB_VERSION_MINOR # INCLUDE(FindPackageHandleStandardArgs) SET_IF_EMPTY(TBB_DIR "$ENV{TBB_DIR}") -FIND_PATH(TBB_INCLUDE_DIR tbb/parallel_reduce.h +FIND_PATH(TBB_INCLUDE_DIR tbb/tbb_stddef.h HINTS ${TBB_DIR} PATH_SUFFIXES include include/tbb tbb @@ -59,6 +62,24 @@ FIND_PACKAGE_HANDLE_STANDARD_ARGS(TBB DEFAULT_MSG TBB_INCLUDE_DIR ) +IF(NOT TBB_INCLUDE_DIR MATCHES "-NOTFOUND") + FILE(STRINGS "${TBB_INCLUDE_DIR}/tbb/tbb_stddef.h" TBB_VERSION_MAJOR_STRING + REGEX "#define.*TBB_VERSION_MAJOR") + STRING(REGEX REPLACE "^.*TBB_VERSION_MAJOR.*([0-9]+).*" "\\1" + TBB_VERSION_MAJOR "${TBB_VERSION_MAJOR_STRING}" + ) + + FILE(STRINGS "${TBB_INCLUDE_DIR}/tbb/tbb_stddef.h" TBB_VERSION_MINOR_STRING + REGEX "#define.*TBB_VERSION_MINOR") + STRING(REGEX REPLACE "^.*TBB_VERSION_MINOR.*([0-9]+).*" "\\1" + TBB_VERSION_MINOR "${TBB_VERSION_MINOR_STRING}" + ) + + SET(TBB_VERSION + "${TBB_VERSION_MAJOR}.${TBB_VERSION_MINOR}" + ) +ENDIF() + MARK_AS_ADVANCED( TBB_LIBRARY TBB_DEBUG_LIBRARY diff --git a/deal.II/cmake/modules/FindUMFPACK.cmake b/deal.II/cmake/modules/FindUMFPACK.cmake index 746fdf0143..8f5ed61440 100644 --- a/deal.II/cmake/modules/FindUMFPACK.cmake +++ b/deal.II/cmake/modules/FindUMFPACK.cmake @@ -22,6 +22,10 @@ # UMFPACK_LIBRARIES # UMFPACK_INCLUDE_DIRS # UMFPACK_LINKER_FLAGS +# UMFPACK_VERSION +# UMFPACK_VERSION_MAJOR +# UMFPACK_VERSION_MINOR +# UMFPACK_VERSION_SUBMINOR # INCLUDE(FindPackageHandleStandardArgs) @@ -110,6 +114,27 @@ FIND_UMFPACK_LIBRARY(CCOLAMD ccolamd) FIND_UMFPACK_LIBRARY(CAMD camd) FIND_UMFPACK_LIBRARY(SuiteSparse_config suitesparseconfig) +IF(EXISTS ${UMFPACK_INCLUDE_DIR}/umfpack.h) + FILE(STRINGS "${UMFPACK_INCLUDE_DIR}/umfpack.h" UMFPACK_VERSION_MAJOR_STRING + REGEX "#define.*UMFPACK_MAIN_VERSION") + STRING(REGEX REPLACE "^.*UMFPACK_MAIN_VERSION.*([0-9]+).*" "\\1" + UMFPACK_VERSION_MAJOR "${UMFPACK_VERSION_MAJOR_STRING}" + ) + FILE(STRINGS "${UMFPACK_INCLUDE_DIR}/umfpack.h" UMFPACK_VERSION_MINOR_STRING + REGEX "#define.*UMFPACK_SUB_VERSION") + STRING(REGEX REPLACE "^.*UMFPACK_SUB_VERSION.*([0-9]+).*" "\\1" + UMFPACK_VERSION_MINOR "${UMFPACK_VERSION_MINOR_STRING}" + ) + FILE(STRINGS "${UMFPACK_INCLUDE_DIR}/umfpack.h" UMFPACK_VERSION_SUBMINOR_STRING + REGEX "#define.*UMFPACK_SUBSUB_VERSION") + STRING(REGEX REPLACE "^.*UMFPACK_SUBSUB_VERSION.*([0-9]+).*" "\\1" + UMFPACK_VERSION_SUBMINOR "${UMFPACK_VERSION_SUBMINOR_STRING}" + ) + SET(UMFPACK_VERSION + "${UMFPACK_VERSION_MAJOR}.${UMFPACK_VERSION_MINOR}.${UMFPACK_VERSION_SUBMINOR}" + ) +ENDIF() + SET(_output ${UMFPACK_LIBRARY} ${CHOLMOD_LIBRARY} ${CCOLAMD_LIBRARY} ${COLAMD_LIBRARY} ${CAMD_LIBRARY} ${AMD_LIBRARY} ${SuiteSparse_config_LIBRARY}) FIND_PACKAGE_HANDLE_STANDARD_ARGS(UMFPACK DEFAULT_MSG _output # Cosmetic: Gives nice output diff --git a/deal.II/cmake/scripts/CMakeLists.txt b/deal.II/cmake/scripts/CMakeLists.txt index 299cc0257c..6e000ece9b 100644 --- a/deal.II/cmake/scripts/CMakeLists.txt +++ b/deal.II/cmake/scripts/CMakeLists.txt @@ -19,17 +19,16 @@ IF(NOT CMAKE_CROSSCOMPILING) IF(DEAL_II_COMPONENT_COMPAT_FILES) SET_TARGET_PROPERTIES(expand_instantiations - PROPERTIES RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/common/scripts + PROPERTIES RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/${DEAL_II_COMMON_RELDIR}/scripts ) INSTALL(TARGETS expand_instantiations - EXPORT ${DEAL_II_PROJECT_CONFIG_NAME}Targets DESTINATION ${DEAL_II_COMMON_RELDIR}/scripts COMPONENT compat_files ) ENDIF() EXPORT(TARGETS expand_instantiations - FILE ${CMAKE_BINARY_DIR}/importExecutables.cmake + FILE ${CMAKE_BINARY_DIR}/${DEAL_II_PROJECT_CONFIG_RELDIR}/${DEAL_II_PROJECT_CONFIG_NAME}Executables.cmake ) ENDIF() @@ -90,12 +89,15 @@ IF(DEAL_II_COMPONENT_COMPAT_FILES) ENDIF() SET_TARGET_PROPERTIES(make_dependencies report_features - PROPERTIES RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/common/scripts + PROPERTIES RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/${DEAL_II_COMMON_RELDIR}/scripts ) INSTALL(TARGETS make_dependencies report_features - EXPORT ${DEAL_II_PROJECT_CONFIG_NAME}Targets DESTINATION ${DEAL_II_COMMON_RELDIR}/scripts COMPONENT compat_files ) + EXPORT(TARGETS make_dependencies report_features + FILE ${CMAKE_BINARY_DIR}/${DEAL_II_PROJECT_CONFIG_RELDIR}/${DEAL_II_PROJECT_CONFIG_NAME}Executables.cmake + APPEND + ) ENDIF() diff --git a/deal.II/cmake/scripts/normalize.pl b/deal.II/cmake/scripts/normalize.pl new file mode 100644 index 0000000000..b29534ec1b --- /dev/null +++ b/deal.II/cmake/scripts/normalize.pl @@ -0,0 +1,47 @@ +###################################################################### +# $Id$ +# +# Copyright (C) 2001, 2003, 2005, 2010, 2011, 2012, 2013, the deal.II authors +# +# Remove insignificant volatile data from output files of tests +# +# Data affected: +# JobID line (containing date) +# line number of exceptions +# start and final residual in iterations +# small doubles +###################################################################### + +# Remove JobID + +s/JobId.*//; + +# Remove Input File Name: + +s/# Input file name:.*//; + +# Several date and time strings + +s/%%Creation Date:.*//; +s/\"created\".*//; +s/# Time =.*//; +s/# Date =.*//; +s/^\s+Time =.*//; +s/^\s+Date =.*//; +s/Time tag:.*//g; +s/by the deal.II library on.*//; + +# Exceptions + +s/line <\d+> of file <.*\//file /run_testsuite.cmake +# +# The following configuration variables can be overwritten with +# +# ctest -D= [...] +# +# +# CTEST_SOURCE_DIRECTORY +# - The source directory of deal.II (usually ending in "[...]/deal.II" +# (equivalent to https://svn.dealii.org/trunk/deal.II) +# Note: This is _not_ the test directory ending in "[...]/tests" +# - If unspecified, "../deal.II" relative to the location of this +# script is used. If this is not a source directory, an error is +# thrown. +# +# CTEST_BINARY_DIRECTORY +# - The designated build directory (already configured, empty, or non +# existent - see the information about TRACKs what will happen) +# - If unspecified the current directory is used. If the current +# directory is equal to CTEST_SOURCE_DIRECTORY or the "tests" +# directory, an error is thrown. +# +# CTEST_CMAKE_GENERATOR +# - The CMake Generator to use (e.g. "Unix Makefiles", or "Ninja", see +# $ man cmake) +# - If unspecified the generator of a configured build directory will +# be used, otherwise "Unix Makefiles". +# +# TRACK +# - The track the test should be submitted to. Defaults to +# "Experimental". Possible values are: +# +# "Experimental" - all tests that are not specifically "build" or +# "regression" tests should go into this track +# +# "Build Tests" - Build tests that configure and build in a +# clean directory and run the build tests +# "build_tests/*" +# +# "Nightly" - Reserved for nightly regression tests for +# build bots on various architectures +# +# "Regression Tests" - Reserved for the regression tester +# +# CONFIG_FILE +# - A configuration file (see ../deal.II/docs/development/Config.sample) +# that will be used during the configuration stage (invokes +# # cmake -C ${CONFIG_FILE}). This only has an effect if +# CTEST_BINARY_DIRECTORY is empty. +# +# COVERAGE +# - If set to TRUE deal.II will be configured with +# DEAL_II_SETUP_COVERAGE=TRUE, CMAKE_BUILD_TYPE=Debug and the +# CTEST_COVERAGE() stage will be run. Test results must go into the +# "Experimental" section. +# +# Furthermore, the following variables controlling the testsuite can be set +# and will be automatically handed down to cmake: +# +# TEST_DIFF +# TEST_TIME_LIMIT +# TEST_PICKUP_REGEX +# TEST_OVERRIDE_LOCATION +# NUMDIFF_DIR +# +# For details, consult the ./README file. +# + +CMAKE_MINIMUM_REQUIRED(VERSION 2.8.8) +MESSAGE("-- This is CTest ${CMAKE_VERSION}") + +# +# TRACK: Default to Experimental: +# + +IF("${TRACK}" STREQUAL "") + SET(TRACK "Experimental") +ENDIF() + +IF( NOT "${TRACK}" STREQUAL "Experimental" + AND NOT "${TRACK}" STREQUAL "Build Tests" + AND NOT "${TRACK}" STREQUAL "Nightly" + AND NOT "${TRACK}" STREQUAL "Regression Tests" ) + MESSAGE(FATAL_ERROR " +Unknown TRACK \"${TRACK}\" - see the manual for valid values. +" + ) +ENDIF() + +MESSAGE("-- TRACK: ${TRACK}") + +# +# CTEST_SOURCE_DIRECTORY: +# + +IF("${CTEST_SOURCE_DIRECTORY}" STREQUAL "") + # + # If CTEST_SOURCE_DIRECTORY is not set we just assume that this script + # was called residing under cmake/scipts in the source directory + # + GET_FILENAME_COMPONENT(_path "${CMAKE_CURRENT_LIST_DIR}" PATH) + GET_FILENAME_COMPONENT(CTEST_SOURCE_DIRECTORY "${_path}" PATH) + + IF(NOT EXISTS ${CTEST_SOURCE_DIRECTORY}/CMakeLists.txt) + MESSAGE(FATAL_ERROR " +Could not find a suitable source directory. There is no source directory +\"../deal.II\" or \"../../\" relative to the location of this script. +Please, set CTEST_SOURCE_DIRECTORY manually to the appropriate source +directory. +" + ) + ENDIF() +ENDIF() + +MESSAGE("-- CTEST_SOURCE_DIRECTORY: ${CTEST_SOURCE_DIRECTORY}") + +# +# Read in custom config files: +# + +CTEST_READ_CUSTOM_FILES(${CTEST_SOURCE_DIRECTORY}) + +# +# CTEST_BINARY_DIRECTORY: +# + +IF("${CTEST_BINARY_DIRECTORY}" STREQUAL "") + # + # If CTEST_BINARY_DIRECTORY is not set we just use the current directory + # except if it is equal to CTEST_SOURCE_DIRECTORY in which case we fail. + # + SET(CTEST_BINARY_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) + + IF( "${CTEST_BINARY_DIRECTORY}" STREQUAL "${CTEST_SOURCE_DIRECTORY}") + MESSAGE(FATAL_ERROR " +ctest was invoked in the source directory (or test source directory) and CTEST_BINARY_DIRECTORY is not set. +Please either call ctest from within a designated build directory, or set CTEST_BINARY_DIRECTORY accordingly. +" + ) + ENDIF() +ENDIF() + +# +# Read in custom config files: +# + +CTEST_READ_CUSTOM_FILES(${CTEST_BINARY_DIRECTORY}) + +# Make sure that for a build test the directory is empty: +FILE(GLOB _test ${CTEST_BINARY_DIRECTORY}/*) +IF( "${TRACK}" STREQUAL "Build Tests" + AND NOT "${_test}" STREQUAL "" ) + MESSAGE(FATAL_ERROR " +TRACK was set to \"Build Tests\" which require an empty build directory. +But files were found in \"${CTEST_BINARY_DIRECTORY}\" +" + ) +ENDIF() + +MESSAGE("-- CTEST_BINARY_DIRECTORY: ${CTEST_BINARY_DIRECTORY}") + +# +# CTEST_CMAKE_GENERATOR: +# + +# Query Generator from build directory (if possible): +IF(EXISTS ${CTEST_BINARY_DIRECTORY}/CMakeCache.txt) + FILE(STRINGS ${CTEST_BINARY_DIRECTORY}/CMakeCache.txt _generator + REGEX "^CMAKE_GENERATOR:" + ) + STRING(REGEX REPLACE "^.*=" "" _generator ${_generator}) +ENDIF() + +IF("${CTEST_CMAKE_GENERATOR}" STREQUAL "") + IF(NOT "${_generator}" STREQUAL "") + SET(CTEST_CMAKE_GENERATOR ${_generator}) + ELSE() + # default to "Unix Makefiles" + SET(CTEST_CMAKE_GENERATOR "Unix Makefiles") + ENDIF() +ELSE() + # ensure that CTEST_CMAKE_GENERATOR (that was apparantly set) is + # compatible with the build directory: + IF( NOT "${CTEST_CMAKE_GENERATOR}" STREQUAL "${_generator}" + AND NOT "${_generator}" STREQUAL "" ) + MESSAGE(FATAL_ERROR " +The build directory is already set up with Generator \"${_generator}\", but +CTEST_CMAKE_GENERATOR was set to a different Generator \"${CTEST_CMAKE_GENERATOR}\". +" + ) + ENDIF() +ENDIF() + +MESSAGE("-- CTEST_CMAKE_GENERATOR: ${CTEST_CMAKE_GENERATOR}") + +# +# CTEST_SITE: +# + +FIND_PROGRAM(HOSTNAME_COMMAND NAMES hostname) +EXEC_PROGRAM(${HOSTNAME_COMMAND} OUTPUT_VARIABLE _hostname) +SET(CTEST_SITE "${_hostname}") + +MESSAGE("-- CTEST_SITE: ${CTEST_SITE}") + +IF( "${TRACK}" STREQUAL "Regression Tests" + AND NOT CTEST_SITE MATCHES "c0541" ) + MESSAGE(FATAL_ERROR " +I'm sorry ${CTEST_SITE}, I'm afraid I can't do that. +The TRACK \"Regression Tests\" is not for you. +" + ) +ENDIF() + +# +# Assemble configuration options, we need it now: +# + +IF(NOT "${CONFIG_FILE}" STREQUAL "") + SET(_options "-C${CONFIG_FILE}") +ENDIF() + +IF("${TRACK}" STREQUAL "Build Tests") + SET(TEST_PICKUP_REGEX "^build_tests") +ENDIF() + +# Pass all relevant "TEST_" variables down to configure: +GET_CMAKE_PROPERTY(_variables VARIABLES) +FOREACH(_var ${_variables}) + IF(_var MATCHES + "^(TEST_DIFF|TEST_TIME_LIMIT|TEST_PICKUP_REGEX|TEST_OVERRIDE_LOCATION|NUMDIFF_DIR)$" + ) + LIST(APPEND _options "-D${_var}=${${_var}}") + ENDIF() +ENDFOREACH() + +IF(COVERAGE) + LIST(APPEND _options "-DDEAL_II_SETUP_COVERAGE=TRUE") + LIST(APPEND _options "-DCMAKE_BUILD_TYPE=Debug") +ENDIF() + +# +# CTEST_BUILD_NAME: +# + +# Append compiler information to CTEST_BUILD_NAME: +IF(NOT EXISTS ${CTEST_BINARY_DIRECTORY}/detailed.log) + # Apparently, ${CTEST_BINARY_DIRECTORY} is not a configured build + # directory. In this case we need a trick: set up a dummy project and + # query it for the compiler information. + FILE(WRITE ${CTEST_BINARY_DIRECTORY}/query_for_compiler/CMakeLists.txt " +FILE(WRITE ${CTEST_BINARY_DIRECTORY}/detailed.log + \"# CMAKE_CXX_COMPILER: \${CMAKE_CXX_COMPILER_ID} \${CMAKE_CXX_COMPILER_VERSION} on platform \${CMAKE_SYSTEM_NAME} \${CMAKE_SYSTEM_PROCESSOR}\" + )" + ) + EXECUTE_PROCESS( + COMMAND ${CMAKE_COMMAND} ${_options} "-G${CTEST_CMAKE_GENERATOR}" . + OUTPUT_QUIET ERROR_QUIET + WORKING_DIRECTORY ${CTEST_BINARY_DIRECTORY}/query_for_compiler + ) + FILE(REMOVE_RECURSE ${CTEST_BINARY_DIRECTORY}/query_for_compiler) +ENDIF() + +IF(EXISTS ${CTEST_BINARY_DIRECTORY}/detailed.log) + FILE(STRINGS ${CTEST_BINARY_DIRECTORY}/detailed.log _compiler_id + REGEX "CMAKE_CXX_COMPILER:" + ) + STRING(REGEX REPLACE + "^.*CMAKE_CXX_COMPILER: \(.*\) on platform.*$" "\\1" + _compiler_id ${_compiler_id} + ) + STRING(REGEX REPLACE "^\(.*\) .*$" "\\1" _compiler_name ${_compiler_id}) + STRING(REGEX REPLACE "^.* " "" _compiler_version ${_compiler_id}) + STRING(REGEX REPLACE " " "-" _compiler_id ${_compiler_id}) + IF( NOT "${_compiler_id}" STREQUAL "" OR + _compiler_id MATCHES "CMAKE_CXX_COMPILER" ) + SET(CTEST_BUILD_NAME "${_compiler_id}") + ENDIF() +ENDIF() + +# +# Append subversion branch to CTEST_BUILD_NAME: +# +FIND_PACKAGE(Subversion QUIET) +EXECUTE_PROCESS( + COMMAND ${Subversion_SVN_EXECUTABLE} info ${CTEST_SOURCE_DIRECTORY} + OUTPUT_QUIET ERROR_QUIET + RESULT_VARIABLE _result + ) +IF(${_result} EQUAL 0) + Subversion_WC_INFO(${CTEST_SOURCE_DIRECTORY} _svn) + STRING(REGEX REPLACE "^${_svn_WC_ROOT}/" "" _branch ${_svn_WC_URL}) + STRING(REGEX REPLACE "^branches/" "" _branch ${_branch}) + STRING(REGEX REPLACE "/deal.II$" "" _branch ${_branch}) + SET(CTEST_BUILD_NAME "${CTEST_BUILD_NAME}-${_branch}") +ENDIF() + +# +# Append config file name to CTEST_BUILD_NAME: +# + +IF(NOT "${CONFIG_FILE}" STREQUAL "") + GET_FILENAME_COMPONENT(_conf ${CONFIG_FILE} NAME_WE) + STRING(REGEX REPLACE "#.*$" "" _conf ${_conf}) + SET(CTEST_BUILD_NAME "${CTEST_BUILD_NAME}-${_conf}") +ENDIF() + +MESSAGE("-- CTEST_BUILD_NAME: ${CTEST_BUILD_NAME}") + +# +# We require valid svn information for build tests: +# + +IF( "${TRACK}" STREQUAL "Build Tests" + AND NOT DEFINED _svn_WC_REVISION ) + MESSAGE(FATAL_ERROR " +TRACK was set to \"Build Tests\" which requires the source directory to be +under Subversion version control. +" + ) +ENDIF() + +# +# Write revision log: +# + +IF(DEFINED _svn_WC_REVISION) + FILE(WRITE ${CTEST_BINARY_DIRECTORY}/revision.log +"### +# +# SVN information: +# SVN_WC_URL: ${_svn_WC_URL} +# SVN_WC_REVISION: ${_svn_WC_REVISION} +# SVN_WC_LAST_CHANGED_DATE: ${_svn_WC_LAST_CHANGED_DATE} +# +###" + ) +ELSE() + FILE(WRITE ${CTEST_BINARY_DIRECTORY}/revision.log +"### +# +# No SVN information available. +# +###" + ) +ENDIF() + +# +# Declare files that should be submitted as notes: +# + +SET(CTEST_NOTES_FILES + ${CTEST_BINARY_DIRECTORY}/revision.log + ${CTEST_BINARY_DIRECTORY}/summary.log + ${CTEST_BINARY_DIRECTORY}/detailed.log + ${CTEST_BINARY_DIRECTORY}/include/deal.II/base/config.h + ) + +# +# Setup coverage: +# +IF(COVERAGE) + IF(NOT TRACK MATCHES "Experimental") + MESSAGE(FATAL_ERROR " +TRACK must be set to \"Experimental\" if Coverage is enabled via +COVERAGE=TRUE. +" + ) + ENDIF() + + FIND_PROGRAM(GCOV_COMMAND NAMES gcov) + + IF(GCOV_COMMAND MATCHES "-NOTFOUND") + MESSAGE(FATAL_ERROR " +Coverage enabled but could not find the gcov executable. Please install +gcov, which is part of the GNU Compiler Collection. +" + ) + ENDIF() + + SET(CTEST_COVERAGE_COMMAND "${GCOV_COMMAND}") +ENDIF() + +MESSAGE("-- COVERAGE: ${COVERAGE}") + + +MACRO(CREATE_TARGETDIRECTORIES_TXT) + # + # It gets tricky: Fake a TargetDirectories.txt containing _all_ target + # directories (of the main project and all subprojects) so that the + # CTEST_COVERAGE() actually picks everything up... + # + EXECUTE_PROCESS(COMMAND ${CMAKE_COMMAND} -E copy + ${CTEST_BINARY_DIRECTORY}/CMakeFiles/TargetDirectories.txt + ${CTEST_BINARY_DIRECTORY}/CMakeFiles/TargetDirectories.txt.bck + ) + FILE(GLOB _subprojects ${CTEST_BINARY_DIRECTORY}/tests/*) + FOREACH(_subproject ${_subprojects}) + IF(EXISTS ${_subproject}/CMakeFiles/TargetDirectories.txt) + FILE(READ ${_subproject}/CMakeFiles/TargetDirectories.txt _var) + FILE(APPEND ${CTEST_BINARY_DIRECTORY}/CMakeFiles/TargetDirectories.txt ${_var}) + ENDIF() + ENDFOREACH() +ENDMACRO() + +MACRO(CLEAR_TARGETDIRECTORIES_TXT) + EXECUTE_PROCESS(COMMAND ${CMAKE_COMMAND} -E rename + ${CTEST_BINARY_DIRECTORY}/CMakeFiles/TargetDirectories.txt.bck + ${CTEST_BINARY_DIRECTORY}/CMakeFiles/TargetDirectories.txt + ) +ENDMACRO() + + +######################################################################## +# # +# Run the testsuite: # +# # +######################################################################## + +IF(NOT "${_branch}" STREQUAL "") + SET_PROPERTY(GLOBAL PROPERTY SubProject ${_branch}) +ENDIF() + +CTEST_START(Experimental TRACK ${TRACK}) + +MESSAGE("-- Running CTEST_CONFIGURE()") +CTEST_CONFIGURE(OPTIONS "${_options}" RETURN_VALUE _res) + +IF("${_res}" STREQUAL "0") + # Only run the build stage if configure was successful: + + MESSAGE("-- Running CTEST_BUILD()") + CTEST_BUILD(TARGET NUMBER_ERRORS _res) + + IF("${_res}" STREQUAL "0") + # Only run tests if the build was successful: + + MESSAGE("-- Running make setup_tests") + EXECUTE_PROCESS(COMMAND ${CMAKE_COMMAND} + --build ${CTEST_BINARY_DIRECTORY} --target setup_tests + OUTPUT_QUIET RESULT_VARIABLE _res + ) + IF(NOT "${_res}" STREQUAL "0") + MESSAGE(FATAL_ERROR " +\"setup_tests\" target exited with an error. Bailing out. +" + ) + ENDIF() + + MESSAGE("-- Running CTEST_TESTS()") + CTEST_TEST() + + IF(COVERAGE) + CREATE_TARGETDIRECTORIES_TXT() + MESSAGE("-- Running CTEST_COVERAGE()") + CTEST_COVERAGE() + CLEAR_TARGETDIRECTORIES_TXT() + ENDIF(COVERAGE) + + ENDIF() +ENDIF() + +# +# Inject compiler information and svn revision into xml files: +# + +FILE(STRINGS ${CTEST_BINARY_DIRECTORY}/Testing/TAG _tag LIMIT_COUNT 1) +SET(_path "${CTEST_BINARY_DIRECTORY}/Testing/${_tag}") +IF(NOT EXISTS ${_path}) + MESSAGE(FATAL_ERROR " +Unable to determine test submission files from TAG. Bailing out. +" + ) +ENDIF() +FILE(GLOB _xml_files ${_path}/*.xml) +EXECUTE_PROCESS(COMMAND sed -i -e + s/CompilerName=\"\"/CompilerName=\"${_compiler_name}\"\\n\\tCompilerVersion=\"${_compiler_version}\"/g + ${_xml_files} + OUTPUT_QUIET RESULT_VARIABLE _res + ) +IF(NOT "${_res}" STREQUAL "0") + MESSAGE(FATAL_ERROR " +\"sed\" failed. Bailing out. +" + ) +ENDIF() + +IF(NOT "${_svn_WC_REVISION}" STREQUAL "") + FILE(WRITE ${_path}/Update.xml +" + + ${CTEST_SITE} + ${CTEST_BUILD_NAME} + ${_tag}-${TRACK} + SVN + ${_svn_WC_REVISION} + ${_branch} +" + ) +ENDIF() + +# +# And finally submit: +# + +MESSAGE("-- Running CTEST_SUBMIT()") +CTEST_SUBMIT(RETURN_VALUE _res) + +IF("${_res}" STREQUAL "0") + MESSAGE("-- Submission successful. Goodbye!") +ENDIF() + +# .oO( This script is freaky 541 lines long... ) diff --git a/deal.II/cmake/setup_cached_variables.cmake b/deal.II/cmake/setup_cached_variables.cmake index c2e04fe20d..8d476fb437 100644 --- a/deal.II/cmake/setup_cached_variables.cmake +++ b/deal.II/cmake/setup_cached_variables.cmake @@ -35,6 +35,7 @@ # CMAKE_BUILD_TYPE # DEAL_II_ALLOW_PLATFORM_INTROSPECTION # DEAL_II_SETUP_DEFAULT_COMPILER_FLAGS +# DEAL_II_SETUP_COVERAGE # BUILD_SHARED_LIBS # DEAL_II_PREFER_STATIC_LIBS # DEAL_II_STATIC_EXECUTABLE @@ -152,6 +153,12 @@ OPTION(DEAL_II_SETUP_DEFAULT_COMPILER_FLAGS ) MARK_AS_ADVANCED(DEAL_II_SETUP_DEFAULT_COMPILER_FLAGS) +OPTION(DEAL_II_SETUP_COVERAGE + "Setup debug compiler flags to provide additional test coverage information. Currently only gprof is supported." + OFF + ) +MARK_AS_ADVANCED(DEAL_II_SETUP_DEFAULT_COMPILER_FLAGS) + SET(BUILD_SHARED_LIBS "ON" CACHE BOOL "Build a shared library" ) diff --git a/deal.II/cmake/setup_compiler_flags_gnu.cmake b/deal.II/cmake/setup_compiler_flags_gnu.cmake index 14884e265d..177473121c 100644 --- a/deal.II/cmake/setup_compiler_flags_gnu.cmake +++ b/deal.II/cmake/setup_compiler_flags_gnu.cmake @@ -133,7 +133,6 @@ IF (CMAKE_BUILD_TYPE MATCHES "Release") ENABLE_IF_SUPPORTED(DEAL_II_CXX_FLAGS_RELEASE "-funroll-loops") ENABLE_IF_SUPPORTED(DEAL_II_CXX_FLAGS_RELEASE "-funroll-all-loops") ENABLE_IF_SUPPORTED(DEAL_II_CXX_FLAGS_RELEASE "-fstrict-aliasing") - ENABLE_IF_SUPPORTED(DEAL_II_CXX_FLAGS_RELEASE "-felide-constructors") ENABLE_IF_SUPPORTED(DEAL_II_CXX_FLAGS_RELEASE "-Wno-unused") ENDIF() @@ -167,5 +166,15 @@ IF (CMAKE_BUILD_TYPE MATCHES "Debug") ENABLE_IF_SUPPORTED(DEAL_II_CXX_FLAGS_DEBUG "-g") ENABLE_IF_SUPPORTED(DEAL_II_LINKER_FLAGS_DEBUG "-g") ENDIF() + + IF(DEAL_II_SETUP_COVERAGE) + # + # Enable test coverage + # + ENABLE_IF_SUPPORTED(DEAL_II_CXX_FLAGS_RELEASE "-fno-elide-constructors") + ENABLE_IF_SUPPORTED(DEAL_II_CXX_FLAGS_DEBUG "-ftest-coverage -fprofile-arcs") + ENABLE_IF_SUPPORTED(DEAL_II_LINKER_FLAGS_DEBUG "-ftest-coverage -fprofile-arcs") + ENDIF() + ENDIF() diff --git a/deal.II/cmake/setup_compiler_flags_intel.cmake b/deal.II/cmake/setup_compiler_flags_intel.cmake index 98afb1943b..44a3188c7e 100644 --- a/deal.II/cmake/setup_compiler_flags_intel.cmake +++ b/deal.II/cmake/setup_compiler_flags_intel.cmake @@ -108,6 +108,11 @@ IF(DEAL_II_STATIC_EXECUTABLE) ENABLE_IF_SUPPORTED(DEAL_II_LINKER_FLAGS "-static-intel") ENABLE_IF_SUPPORTED(DEAL_II_LINKER_FLAGS "-static-gcc") ENABLE_IF_SUPPORTED(DEAL_II_LINKER_FLAGS "-pthread") +ELSE() + # + # Explicitly link intel support libraries dynamically: + # + ENABLE_IF_SUPPORTED(DEAL_II_LINKER_FLAGS "-shared-intel") ENDIF() @@ -122,9 +127,7 @@ IF (CMAKE_BUILD_TYPE MATCHES "Release") # General optimization flags: # - IF(CMAKE_CXX_COMPILER_VERSION VERSION_LESS "13.0" ) - ADD_FLAGS(DEAL_II_CXX_FLAGS_RELEASE "-O2") - ELSE() + IF(CMAKE_CXX_COMPILER_VERSION MATCHES "^13\\.") # # Disable aggressive optimization for intel-13* compilers, until we # resolve a lot of funny miscompilations... @@ -132,13 +135,16 @@ IF (CMAKE_BUILD_TYPE MATCHES "Release") # - Maier, 2013 # ADD_FLAGS(DEAL_II_CXX_FLAGS_RELEASE "-O1") + ELSE() + ADD_FLAGS(DEAL_II_CXX_FLAGS_RELEASE "-O2") ENDIF() + # equivalent to -fno-strict-aliasing: + ENABLE_IF_SUPPORTED(DEAL_II_CXX_FLAGS_RELEASE "-no-ansi-alias") + ENABLE_IF_SUPPORTED(DEAL_II_CXX_FLAGS_RELEASE "-ip") ENABLE_IF_SUPPORTED(DEAL_II_CXX_FLAGS_RELEASE "-funroll-loops") - # equivalent to -fno-strict-aliasing: - ENABLE_IF_SUPPORTED(DEAL_II_CXX_FLAGS_RELEASE "-no-ansi-alias") ENDIF() diff --git a/deal.II/cmake/setup_custom_targets.cmake b/deal.II/cmake/setup_custom_targets.cmake index 864ebc955b..65399c645b 100644 --- a/deal.II/cmake/setup_custom_targets.cmake +++ b/deal.II/cmake/setup_custom_targets.cmake @@ -17,29 +17,32 @@ # # Add convenience targets that build and install only a specific component: # +# library +# compat_files +# documentation +# examples +# mesh_converter +# parameter_gui +# -FOREACH(_component library) - ADD_CUSTOM_TARGET(${_component} - COMMAND ${CMAKE_COMMAND} - -DCOMPONENT="${_component}" -P cmake_install.cmake - COMMENT "Build and install component \"${_component}\"." - WORKING_DIRECTORY ${CMAKE_BINARY_DIR} - ) -ENDFOREACH() +# The library can always be installed ;-) +ADD_CUSTOM_TARGET(library + COMMAND ${CMAKE_COMMAND} + -DCOMPONENT="library" -P cmake_install.cmake + COMMENT "Build and install component \"library\"." + WORKING_DIRECTORY ${CMAKE_BINARY_DIR} + ) FOREACH(_component compat_files documentation examples mesh_converter parameter_gui) STRING(TOUPPER "${_component}" _component_uppercase) IF(DEAL_II_COMPONENT_${_component_uppercase}) - ADD_CUSTOM_TARGET(${_component} COMMAND ${CMAKE_COMMAND} -DCOMPONENT="${_component}" -P cmake_install.cmake COMMENT "Build and install component \"${_component}\"." WORKING_DIRECTORY ${CMAKE_BINARY_DIR} ) - ELSE() - STRING(TOUPPER ${_component} _componentuppercase) ADD_CUSTOM_TARGET(${_component} COMMAND @@ -53,7 +56,44 @@ FOREACH(_component compat_files documentation examples mesh_converter parameter_ && ${CMAKE_COMMAND} -E echo '' && false ) - ENDIF() ENDFOREACH() +# +# Provide an "info" target to print a help message: +# + +FILE(WRITE ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/print_info.cmake +"MESSAGE( +\"### +# +# The following targets are available (invoke by $ make ): +# +# all - compiles the library and all enabled components +# clean - removes all generated files +# install - installs into CMAKE_INSTALL_PREFIX +# help - prints a list of valid top level targets +# info - prints this help message +# +# edit_cache - runs ccmake for changing (cached) configuration variables +# and reruns the configure and generate phases of CMake +# rebuild_cache - reruns the configure and generate phases of CMake +# +# compat_files - builds and installs the 'compat_files' component +# documentation - builds and installs the 'documentation' component +# examples - builds and installs the 'examples' component +# library - builds and installs the 'library' component +# mesh_converter - builds and installs the 'mesh_converter' component +# parameter_gui - builds and installs the 'parameter_gui' component +# +# test - runs a minimal set of tests +# +# setup_tests - sets up the testsuite subprojects +# clean_tests - runs the 'clean' target in every testsuite subproject +# prune_tests - removes all testsuite subprojects +# +###\")" + ) +ADD_CUSTOM_TARGET(info + COMMAND ${CMAKE_COMMAND} -P ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/print_info.cmake + ) diff --git a/deal.II/cmake/setup_deal_ii.cmake b/deal.II/cmake/setup_deal_ii.cmake index a07c54f465..58fbd0e16e 100644 --- a/deal.II/cmake/setup_deal_ii.cmake +++ b/deal.II/cmake/setup_deal_ii.cmake @@ -18,7 +18,8 @@ # Set up deal.II specific definitions # # This file defines a long list of uncached variables, used throughout the -# configuration to determine paths, locations and names. +# configuration to determine paths, locations and names. Some linkage and +# crosscompilation setup happens also in here. # # Definitions marked with *) can be overriden by defining them to cache # prior to the call of this file. This is done with the help of the @@ -135,7 +136,6 @@ ELSE() SET_IF_EMPTY(DEAL_II_PROJECT_CONFIG_RELDIR "${DEAL_II_LIBRARY_RELDIR}/cmake/${DEAL_II_PROJECT_CONFIG_NAME}") ENDIF() - IF(CMAKE_BUILD_TYPE MATCHES "Debug") LIST(APPEND DEAL_II_BUILD_TYPES "DEBUG") ENDIF() @@ -144,3 +144,37 @@ IF(CMAKE_BUILD_TYPE MATCHES "Release") LIST(APPEND DEAL_II_BUILD_TYPES "RELEASE") ENDIF() + +######################################################################## +# # +# Setup static linkage and crosscompilation: # +# # +######################################################################## + +# +# Library search order: +# +IF(DEAL_II_PREFER_STATIC_LIBS) + # Invert the search order for libraries when DEAL_II_PREFER_STATIC_LIBS + # is set. This will prefer static archives instead of shared libraries: + LIST(REVERSE CMAKE_FIND_LIBRARY_SUFFIXES) +ENDIF() + +# +# Cross compilation stuff: +# +IF(CMAKE_CROSSCOMPILING) + # Disable platform introspection when cross compiling + SET(DEAL_II_ALLOW_PLATFORM_INTROSPECTION OFF CACHE BOOL "" FORCE) + + # Import native expand_instantiations for use in cross compilation: + SET(DEAL_II_NATIVE "DEAL_II_NATIVE-NOTFOUND" CACHE FILEPATH + "A pointer to a native deal.Ii build directory" + ) + IF(DEAL_II_NATIVE MATCHES "-NOTFOUND") + MESSAGE(FATAL_ERROR + "Please set the CMake variable DEAL_II_NATIVE to a valid path that points to a native deal.II build directory" + ) + ENDIF() + INCLUDE(${DEAL_II_NATIVE}/${DEAL_II_PROJECT_CONFIG_RELDIR}/${DEAL_II_PROJECT_CONFIG_NAME}Executables.cmake) +ENDIF() diff --git a/deal.II/cmake/setup_finalize.cmake b/deal.II/cmake/setup_finalize.cmake index bcb40185ee..4c4fc377ff 100644 --- a/deal.II/cmake/setup_finalize.cmake +++ b/deal.II/cmake/setup_finalize.cmake @@ -121,6 +121,10 @@ _both( # CMAKE_BINARY_DIR: ${CMAKE_BINARY_DIR} # CMAKE_CXX_COMPILER: ${CMAKE_CXX_COMPILER_ID} ${CMAKE_CXX_COMPILER_VERSION} on platform ${CMAKE_SYSTEM_NAME} ${CMAKE_SYSTEM_PROCESSOR} # ${CMAKE_CXX_COMPILER} +" + ) +_detailed( +"# CMAKE_GENERATOR: ${CMAKE_GENERATOR} " ) IF(CMAKE_CROSSCOMPILING) @@ -200,6 +204,12 @@ FOREACH(_var ${_features}) # IF(DEFINED ${_feature}_VERSION) _detailed("# ${_feature}_VERSION = ${${_feature}_VERSION}\n") + ELSEIF(_feature MATCHES "THREADS" AND DEFINED TBB_VERSION) + _detailed("# TBB_VERSION = ${TBB_VERSION}\n") + ENDIF() + + IF(_feature MATCHES "MPI" AND DEFINED OMPI_VERSION) + _detailed("# OMPI_VERSION = ${OMPI_VERSION}\n") ENDIF() # @@ -216,9 +226,9 @@ FOREACH(_var ${_features}) IF( # MPI: _var2 MATCHES "^${_feature}_CXX_(COMPILER|COMPILE_FLAGS|LINK_FLAGS|LIBRARIES|INCLUDE_PATH)$" OR # Boost: - ( _feature MATCHES "BOOST" AND _var2 MATCHES "^Boost(_LIBRARIES|_INCLUDE_DIRS)$" ) OR + ( _feature MATCHES "BOOST" AND _var2 MATCHES "^Boost_(LIBRARIES|INCLUDE_DIRS)$" ) OR # TBB: - ( _feature MATCHES "THREADS" AND _var2 MATCHES "^TBB(_LIBRARIES|_INCLUDE_DIRS)$" ) OR + ( _feature MATCHES "THREADS" AND _var2 MATCHES "^TBB_(LIBRARIES|INCLUDE_DIRS)$" ) OR # Generic: ( (NOT _var2 MATCHES "^(MPI|Boost)") AND _var2 MATCHES "^${_feature}_(INCLUDE_DIRS|LIBRARIES|LINKER_FLAGS)$" ) @@ -258,7 +268,8 @@ FOREACH(_var ${_components}) ENDFOREACH() _summary( - "#\n# Detailed information (compiler flags, feature configuration) can be found in detailed.log\n" +"#\n# Detailed information (compiler flags, feature configuration) can be found in detailed.log +#\n# Run $ make info to print a help message with a list of top level targets\n" ) _both("#\n###") diff --git a/deal.II/cmake/setup_post_project_call.cmake b/deal.II/cmake/setup_post_project_call.cmake deleted file mode 100644 index 60aad894df..0000000000 --- a/deal.II/cmake/setup_post_project_call.cmake +++ /dev/null @@ -1,58 +0,0 @@ -## --------------------------------------------------------------------- -## $Id$ -## -## Copyright (C) 2013 by the deal.II authors -## -## This file is part of the deal.II library. -## -## The deal.II library is free software; you can use it, redistribute -## it, and/or modify it under the terms of the GNU Lesser General -## Public License as published by the Free Software Foundation; either -## version 2.1 of the License, or (at your option) any later version. -## The full text of the license can be found in the file LICENSE at -## the top level of the deal.II distribution. -## -## --------------------------------------------------------------------- - - -######################################################################## -# # -# Setup that has to happen after the call to PROJECT(): # -# # -######################################################################## - -# -# Library search order: -# -IF(DEAL_II_PREFER_STATIC_LIBS) - # - # Invert the search order for libraries when DEAL_II_PREFER_STATIC_LIBS - # is set. This will prefer static archives instead of shared libraries: - # - # TODO: Does this work on a Windows or CYGWIN target? - LIST(REVERSE CMAKE_FIND_LIBRARY_SUFFIXES) -ENDIF() - - -# -# Cross compilation stuff: -# -IF(CMAKE_CROSSCOMPILING) - # - # Disable platform introspection when cross compiling - # - SET(DEAL_II_ALLOW_PLATFORM_INTROSPECTION OFF CACHE BOOL "" FORCE) - - # - # Import native expand_instantiations for use in cross compilation: - # - SET(DEAL_II_NATIVE "DEAL_II_NATIVE-NOTFOUND" CACHE FILEPATH - "A pointer to a native deal.Ii build directory" - ) - IF(DEAL_II_NATIVE MATCHES "-NOTFOUND") - MESSAGE(FATAL_ERROR - "Please set the CMake variable DEAL_II_NATIVE to a valid path that points to a native deal.II build directory" - ) - ENDIF() - INCLUDE(${DEAL_II_NATIVE}/importExecutables.cmake) -ENDIF() diff --git a/deal.II/cmake/setup_testsuite.cmake b/deal.II/cmake/setup_testsuite.cmake new file mode 100644 index 0000000000..cd334b751e --- /dev/null +++ b/deal.II/cmake/setup_testsuite.cmake @@ -0,0 +1,108 @@ +## --------------------------------------------------------------------- +## $Id$ +## +## Copyright (C) 2013 by the deal.II authors +## +## This file is part of the deal.II library. +## +## The deal.II library is free software; you can use it, redistribute +## it, and/or modify it under the terms of the GNU Lesser General +## Public License as published by the Free Software Foundation; either +## version 2.1 of the License, or (at your option) any later version. +## The full text of the license can be found in the file LICENSE at +## the top level of the deal.II distribution. +## +## --------------------------------------------------------------------- + +# +# Setup necessary configuration in the testsuite subprojects. +# This file is directly included by the test subprojects and not by the +# main project. +# +# It is assumed that the following variables are set: +# +# DEAL_II_BINARY_DIR +# DEAL_II_SOURCE_DIR +# - pointing to a source and binary directory of a deal.II build +# +# This file sets up the following options, that can be overwritten by +# environment or command line: +# +# TEST_DIFF +# TEST_OVERRIDE_LOCATION +# TEST_PICKUP_REGEX +# TEST_TIME_LIMIT +# + +# +# Load all macros: +# +FILE(GLOB _macro_files ${DEAL_II_SOURCE_DIR}/cmake/macros/*.cmake) +FOREACH(_file ${_macro_files}) + INCLUDE(${_file}) +ENDFOREACH() + +# +# Pick up values from environment: +# +SET_IF_EMPTY(DEAL_II_BINARY_DIR $ENV{DEAL_II_BINARY_DIR}) +SET_IF_EMPTY(DEAL_II_BINARY_DIR $ENV{DEAL_II_DIR}) +SET_IF_EMPTY(DEAL_II_SOURCE_DIR $ENV{DEAL_II_SOURCE_DIR}) +SET_IF_EMPTY(TEST_DIFF $ENV{TEST_DIFF}) +SET_IF_EMPTY(TEST_TIME_LIMIT $ENV{TEST_TIME_LIMIT}) +SET_IF_EMPTY(TEST_PICKUP_REGEX $ENV{TEST_PICKUP_REGEX}) +SET_IF_EMPTY(TEST_OVERRIDE_LOCATION $ENV{TEST_OVERRIDE_LOCATION}) + +# +# We need deal.II and Perl as external packages: +# +FIND_PACKAGE(deal.II 8.0 REQUIRED + HINTS ${DEAL_II_BINARY_DIR} ${DEAL_II_DIR} + ) +SET(CMAKE_CXX_COMPILER ${DEAL_II_CXX_COMPILER} CACHE STRING "CXX Compiler.") + +FIND_PACKAGE(Perl REQUIRED) + +# +# We need a diff tool, preferably numdiff: +# +FIND_PROGRAM(DIFF_EXECUTABLE + NAMES diff + ) + +FIND_PROGRAM(NUMDIFF_EXECUTABLE + NAMES numdiff + HINTS ${NUMDIFF_DIR} + PATH_SUFFIXES bin + ) + +MARK_AS_ADVANCED(DIFF_EXECUTABLE NUMDIFF_EXECUTABLE) + +IF( NUMDIFF_EXECUTABLE MATCHES "-NOTFOUND" + AND DIFF_EXECUTABLE MATCHES "-NOTFOUND" ) + MESSAGE(FATAL_ERROR + "Could not find diff or numdiff. One of those are required for running the testsuite." + ) +ENDIF() + +IF("${TEST_DIFF}" STREQUAL "") + IF(NOT NUMDIFF_EXECUTABLE MATCHES "-NOTFOUND") + SET(TEST_DIFF ${NUMDIFF_EXECUTABLE} -a 1e-6 -s ' \\t\\n:') + ELSE() + SET(TEST_DIFF ${DIFF_EXECUTABLE}) + ENDIF() +ELSE() + # TODO: I have no idea how to prepare a custom string comming possibly + # through two layers of command line into a list... + SEPARATE_ARGUMENTS(TEST_DIFF ${TEST_DIFF}) +ENDIF() + +# +# Set a default time limit of 600 seconds: +# +SET_IF_EMPTY(TEST_TIME_LIMIT 600) + +# +# And finally, enable testing: +# +ENABLE_TESTING() diff --git a/deal.II/contrib/mesh_conversion/CMakeLists.txt b/deal.II/contrib/mesh_conversion/CMakeLists.txt index 72e26ef2c4..f17569af7d 100644 --- a/deal.II/contrib/mesh_conversion/CMakeLists.txt +++ b/deal.II/contrib/mesh_conversion/CMakeLists.txt @@ -32,7 +32,11 @@ IF("${DEAL_II_EXECUTABLE_RELDIR}" STREQUAL "") ENDIF() INSTALL(TARGETS mesh_converter_exe - RUNTIME - DESTINATION ${DEAL_II_EXECUTABLE_RELDIR} + RUNTIME DESTINATION ${DEAL_II_EXECUTABLE_RELDIR} COMPONENT mesh_converter ) +EXPORT(TARGETS mesh_converter_exe + FILE + ${CMAKE_BINARY_DIR}/${DEAL_II_PROJECT_CONFIG_RELDIR}/${DEAL_II_PROJECT_CONFIG_NAME}Executables.cmake + APPEND + ) diff --git a/deal.II/contrib/parameter_gui/CMakeLists.txt b/deal.II/contrib/parameter_gui/CMakeLists.txt index 7ff3f24f7e..c06c0823dc 100644 --- a/deal.II/contrib/parameter_gui/CMakeLists.txt +++ b/deal.II/contrib/parameter_gui/CMakeLists.txt @@ -59,4 +59,8 @@ INSTALL(TARGETS parameter_gui_exe RUNTIME DESTINATION ${DEAL_II_EXECUTABLE_RELDIR} COMPONENT parameter_gui ) +EXPORT(TARGETS parameter_gui_exe + FILE ${CMAKE_BINARY_DIR}/${DEAL_II_PROJECT_CONFIG_RELDIR}/${DEAL_II_PROJECT_CONFIG_NAME}Executables.cmake + APPEND + ) diff --git a/deal.II/contrib/test_affinity/CMakeLists.txt b/deal.II/contrib/test_affinity/CMakeLists.txt deleted file mode 100644 index 83075153c0..0000000000 --- a/deal.II/contrib/test_affinity/CMakeLists.txt +++ /dev/null @@ -1,46 +0,0 @@ -## --------------------------------------------------------------------- -## $Id$ -## -## Copyright (C) 2013 by the deal.II authors -## -## This file is part of the deal.II library. -## -## The deal.II library is free software; you can use it, redistribute -## it, and/or modify it under the terms of the GNU Lesser General -## Public License as published by the Free Software Foundation; either -## version 2.1 of the License, or (at your option) any later version. -## The full text of the license can be found in the file LICENSE at -## the top level of the deal.II distribution. -## -## --------------------------------------------------------------------- - -CMAKE_MINIMUM_REQUIRED(VERSION 2.8) - -INCLUDE_DIRECTORIES( - BEFORE - ${CMAKE_SOURCE_DIR}/include/ - ${CMAKE_BINARY_DIR}/include/ - ) - -ADD_EXECUTABLE(test_affinity - test_affinity.cc - ) - -IF(CMAKE_BUILD_TYPE MATCHES "Debug") - SET(_build "DEBUG") -ELSE() - SET(_build "RELEASE") -ENDIF() - -SET_TARGET_PROPERTIES(test_affinity PROPERTIES - LINK_FLAGS "${DEAL_II_LINKER_FLAGS} ${DEAL_II_LINKER_FLAGS_${_build}}" - COMPILE_DEFINITIONS "${DEAL_II_DEFINITIONS};${DEAL_II_DEFINITIONS_${_build}}" - COMPILE_FLAGS "${DEAL_II_CXX_FLAGS_${_build}}" - LINKER_LANGUAGE "CXX" - ) - -TARGET_LINK_LIBRARIES(test_affinity ${DEAL_II_BASE_NAME}${DEAL_II_${_build}_SUFFIX}) - -IF(NOT CMAKE_CROSSCOMPILING) - ADD_CUSTOM_TARGET(run_test_affinity COMMAND test_affinity) -ENDIF() diff --git a/deal.II/contrib/utilities/build_test b/deal.II/contrib/utilities/build_test index b2c7487e43..ef00405480 100755 --- a/deal.II/contrib/utilities/build_test +++ b/deal.II/contrib/utilities/build_test @@ -27,7 +27,7 @@ # SOURCEDIR=$(CURDIR) LOGDIR=$(PWD) -TMPDIR=/tmp +TMPDIR?=/tmp CMAKE=cmake SVN=svn info $(SOURCEDIR) diff --git a/deal.II/doc/developers/cmake-internals.html b/deal.II/doc/developers/cmake-internals.html index b568a0527f..ea9c6a97b7 100644 --- a/deal.II/doc/developers/cmake-internals.html +++ b/deal.II/doc/developers/cmake-internals.html @@ -40,6 +40,7 @@
  • ./cmake/config/CMakeLists.txt
  • + diff --git a/deal.II/doc/developers/testsuite.html b/deal.II/doc/developers/testsuite.html index 54229442f2..9eb8dd343b 100644 --- a/deal.II/doc/developers/testsuite.html +++ b/deal.II/doc/developers/testsuite.html @@ -17,347 +17,537 @@

    The deal.II Testsuite

    -

    The deal.II testsuite consists of two parts, the - build tests and the - regression tests. While the build tests - just check if the - library can be compiled on different systems and with different (versions - of) compilers, the regression tests are actually run and their output - compared with previously stored. These two testsuites are - described below.

    - - -

    The build tests

    +

    + The deal.II testsuite consists of two parts: + build tests and the + regression testsuite. While the build tests + are used to check that the + library can be compiled on different systems and with different (versions + of) compilers, the regression tests are actually run and their output + compared with previously stored output files to verify that what + worked yesterday still works today. These two testsuites are + described below. +

    - With our build tests, we check if deal.II can be compiled on - different systems and with different compilers as well as - different configuration options. Results are collected in a - database and can be accessed online.

    + deal.II has a testsuite that has, at the time this article is written + (mid-2013), some 2,900 small programs (growing by roughly one per day) + that we run every time we make a change to make sure that no existing + functionality is broken. The expected output for every test is stored in + our subversion archive, and when you run a test you are notified if a + test produces different output. These days, every time we add a + significant piece of functionality, we add at least one new test to the + testsuite, and we also do so if we fix a bug, in both cases to make sure + that future changes do not break what we have just checked in. Machines + running tests send results + back home and these are then converted into + a webpage showing the status of our regression tests. +

    -

    Running the build test suite is simple and we encourage deal.II - users with configurations not found on the test suite page to - participate. Assuming you checked out deal.II into the directory - dealtest, running it is as simple as: +

    + + +

    Quick instructions

    + +

    + If you're impatient, use the following commands:

     
    -    cd dealtest
    -    svn update
    -    ./contrib/utilities/build_test
    -    mail build-tests@dealii.org < *.log
    -  ( rm *.log )
    +    $ mkdir new_directory
    +    $ cd new_directory
    +    $ svn checkout https://svn.dealii.org/trunk .
    +    $ mkdir build
    +    $ cd build
    +    $ cmake ../deal.II
    +    $ make -j16
    +    $ make -j16 setup_tests
    +    $ ctest -j16
           
    + The exact meaning of all of these commands will be explained in much + greater detail below. +

    + + +

    Setting up the testsuite

    + +

    In order to run it, you need to download and set up the testsuite + first. The following paragraphs detail how to do that.

    + +

    Downloading the testsuite

    +

    - The build_test script supports the following options: + To download the testsuite, check it out from the subversion repository, + along with deal.II. To this end, go to an empty directory where you + want to test deal.II and do this:

     
    -    SOURCEDIR     - the source directory to use (otherwise the current directory is used)
    -    CONFIGFILE    - A cmake configuration file for the build test
    -    LOGDIR        - directory for the log file
    -    LOGFILE       - the logfile to use, defaults to
    -                        $LOGDIR/$BRANCH.$CONFIGFILE..log
    -
    -    CMAKE         - the cmake executable to use
    -    SVN           - svn info command to use, defaults to
    -                        svn info $(SOURCEDIR)
    -    TMPDIR        - defaults to "/tmp"
    -    CLEAN_TMPDIR  - defaults to "true"
    -    RUN_EXAMPLES  - defaults to "true"
    +    $ svn checkout https://svn.dealii.org/trunk .
           
    - An example configuration file can be found here. Options can be passed either via - environment + (The period at the end puts everything from under trunk/ + into the current directory, rather than creating a + new trunk/ directory.) You will then have + two folders:
     
    -    export CONFIGFILE=MyConfiguration.conf
    -    ./contrib/utilities/build_test
    +    ./deal.II
    +    ./tests
           
    - or directly on the command line: +

    + +

    + Note: CMake will pick up any testsuite that is located in a + tests folder next to the source directory + (../tests). If your test directory is at a different + location you have to hint during configuration by specifying + TEST_DIR:

     
    -    ./contrib/utilities/build_test CONFIGFILE=myConfiguration.conf
    +    $ cmake -DTEST_DIR=<...>
           

    + +

    Preparing the testsuite

    +

    - A status indicator should appear on the build test website after some - time (results are collected and processed by a program that is run - periodically, but not immediately after a mail has been received). -

    + To enable the testsuite, configure and build deal.II in a build + directory as normal (installation is not necessary). After that you + can setup the testsuite via the "setup_tests" target:s +
     
    +    $ make setup_tests
    +      
    + This will set up all tests supported by the current configuration. + The testsuite can now be run in the current build directory as + described below. +

    -

    Dedicated build tests

    +

    + Setup can be fine-tuned using the following commands: +

     
    -    There is a detailed example for dedicated build tests on the wiki.
    +    $ make clean_tests - runs the 'clean' target in every testsuite subproject
     
    -    
    -    

    The regression tests

    + $ make prune_tests - removes all testsuite subprojects +

    - deal.II has a testsuite that, at the time this article is written (mid-2013), - has some 2,900 small programs (growing by roughly one per day) that we run - every time we make a - change to make sure that no existing functionality is broken. The - expected output is also stored in our subversion archive, and when you - run a test you are notified if a test fails. These days, every - time we add a significant piece of functionality, we add at least - one new test to the testsuite, and we also do so if we fix a bug, - in both cases to make sure that future changes do not break what - we have just checked in. In addition, some machines run the tests - every night and send the results back home; this is then converted - into a webpage showing the status of our regression - tests. + In addition, when setting up the testsuite, the following environment + variables can be used to override default behavior when + calling make setup_tests: +

    +
    +    TEST_DIFF
    +      - The diff tool and command line to use for comparison. If numdiff is
    +        available it defaults to "numdiff -a 1e-6 -q", otherwise plain diff
    +        is used.
    +
    +    TEST_TIME_LIMIT
    +      - The time limit (in seconds) a single test is allowed to take. Defaults
    +        to 180 seconds
    +
    +    TEST_PICKUP_REGEX
    +      - A regular expression to select only a subset of tests during setup.
    +        An empty string is interpreted as a catchall (this is the default).
    +
    +    TEST_OVERRIDE_LOCATION
    +      - If TEST_OVERRIDE_LOCATION is set, a comparison file category/test.output
    +        will be substituted by ${TEST_OVERRIDE_LOCATION}/category/test.output if
    +        the latter exists.
    +      

    - If you develop parts of deal.II, want to add something, or fix a - bug in it, we encourage you to use our testsuite. This page - documents some aspects of it. + Note: Specifying these options via environment variables is + volatile, i.e. if make setup_tests is invoked a second + time without the variables set in environment, the option will be + reset to the default value. If you want to set these options + permanently, set them via cmake as CMake variable in the build + directory: +

    +
    +    $ cmake -DTEST_PICKUP_REGEX="<regular expression>" .
    +      
    + A variable set via cmake always overrides one + set via environment.

    + +

    Running the testsuite

    + +

    + The testsuite can now be run in the build directory via +

     
    -    

    Running it

    + $ ctest [-j N] +
    + Here, N is the number of concurrent tests that should be + run, in the same way as you can say make -jN. The testsuite + is huge and will need around 12h on current computers + running single threaded. +

    - To run the testsuite, go to the directory where you want to test deal.II - and do this: -

    +      If you only want to run a subset of tests
    +      matching a regular expression, or if you want to exclude tests matching
    +      a regular expression, you can use
    +      
     
    -       svn checkout https://svn.dealii.org/trunk/tests
    -       cd tests
    -       DEAL_II_DIR=/a/b/c ./configure
    -    
    - where /a/b/c is the installation directory you have told - CMake to install deal.II into previously. + $ ctest [-j N] -R '<positive regular expression>' + $ ctest [-j N] -E '<negative regular expression>' +
    +

    + +

    + Note: + Not all tests succeed on every machine even if all computations are + correct, because your machine generates slightly different floating + point outputs. To increase the number of tests that work correctly, + install the + numdiff tool that compares + stored and newly created output files based on floating point + tolerances. To use it, simply export where the numdiff + executable can be found via the PATH + environment variable so that it can be found during + make setup_tests.

    + +

    How to interpret the output

    +

    - Not all tests succeed on every machine even if all computations are - correct, because you might not have configured with all the required - packages (for example PETSc or Trilinos), or because your machine - generates slightly different floating point outputs. To increase the - number of tests that work correctly, install the - numdiff toold that compares - stored and newly created output files based on floating point - tolerances. To use it, simply export the environment variable - export DEAL_II_DIFF="numdiff -a 1e-6 -q" - before running the testsuite. + A typical output of a ctest invocation looks like: +

    +
    +    $ ctest -j4 -R "base/thread_validity"
    +    Test project /tmp/trunk/build
    +          Start 747: base/thread_validity_01.debug
    +          Start 748: base/thread_validity_01.release
    +          Start 775: base/thread_validity_05.debug
    +          Start 776: base/thread_validity_05.release
    +     1/24 Test #776: base/thread_validity_05.release ...   Passed    1.89 sec
    +     2/24 Test #748: base/thread_validity_01.release ...   Passed    1.89 sec
    +          Start 839: base/thread_validity_03.debug
    +          Start 840: base/thread_validity_03.release
    +     3/24 Test #747: base/thread_validity_01.debug .....   Passed    2.68 sec
    +    [...]
    +          Start 1077: base/thread_validity_08.debug
    +          Start 1078: base/thread_validity_08.release
    +    16/24 Test #1078: base/thread_validity_08.release ...***Failed    2.86 sec
    +    18/24 Test #1077: base/thread_validity_08.debug .....***Failed    3.97 sec
    +    [...]
    +
    +    92% tests passed, 2 tests failed out of 24
    +
    +    Total Test time (real) =  20.43 sec
    +
    +    The following tests FAILED:
    +            1077 - base/thread_validity_08.debug (Failed)
    +            1078 - base/thread_validity_08.release (Failed)
    +    Errors while running CTest
    +      
    + If a test failed (like base/thread_validity_08.debug in above + example output), you might want to find out what exactly went wrong. To + this end, you can search + through Testing/Temporary/LastTest.log for the exact output + of the test, or you can rerun this one test, specifying -V + to select verbose output of tests: +
    +
    +    $ ctest -V -R "base/thread_validity_08.debug"
    +    [...]
    +    test 1077
    +        Start 1077: base/thread_validity_08.debug
    +
    +    1077: Test command: [...]
    +    1077: Test timeout computed to be: 600
    +    1077: Test base/thread_validity_08.debug: RUN
    +    1077: ===============================   OUTPUT BEGIN  ===============================
    +    1077: Built target thread_validity_08.debug
    +    1077: Generating thread_validity_08.debug/output
    +    1077: terminate called without an active exception
    +    1077: /bin/sh: line 1: 18030 Aborted [...]/thread_validity_08.debug
    +    1077: base/thread_validity_08.debug: BUILD successful.
    +    1077: base/thread_validity_08.debug: RUN failed. Output:
    +    1077: DEAL::OK.
    +    1077: gmake[3]: *** [thread_validity_08.debug/output] Error 1
    +    1077: gmake[2]: *** [CMakeFiles/thread_validity_08.debug.diff.dir/all] Error 2
    +    1077: gmake[1]: *** [CMakeFiles/thread_validity_08.debug.diff.dir/rule] Error 2
    +    1077: gmake: *** [thread_validity_08.debug.diff] Error 2
    +    1077:
    +    1077:
    +    1077: base/thread_validity_08.debug: ******    RUN failed    *******
    +    1077:
    +    1077: ===============================    OUTPUT END   ===============================
    +      
    + So this specific test aborted in the RUN stage.

    - Once you have done this, you may simply type - make. This runs all the tests there are, but stops at - the first one that fails to either execute properly or for which - the output does not match the expected output found in the subversion - archive. This is helpful if you want to figure out if any test is - failing at all. Typical output looks like this: -

    +      The general output for a successful test <test> in
    +      category <category> for build type
    +      <build> is
    +      
     
    -      deal.II/tests> make
    -      cd base ; make
    -      make[1]: Entering directory `/ices/bangerth/p/deal.II/1/deal.II/tests/base'
    -      =====linking======= logtest.exe
    -      =====Running======= logtest.exe
    -      =====Checking====== logtest.output
    -      =====OK============ logtest.OK
    -      =====linking======= reference.exe
    -      =====Running======= reference.exe
    -      =====Checking====== reference.output
    -      =====OK============ reference.OK
    -      =====linking======= quadrature_test.exe
    -      ...
    -    
    - Be aware that because of the number of tests we have, running the entire - testsuite takes approximately 10 hours (as of early 2013), even on a fast - system. (On the other hand, of couse only a large testsuite can offer - comprehensive coverage of a software as big as deal.II.) This time can be - reduced, however, on multicore machines if you use the command - make -jN where N is an integer equal to or - slightly larger than the number of processor cores you have, as this - instructs make to run several tests at the same time. -

    - -

    - Sometimes, you know that for whatever reason one test - always fails on your system, or has already failed before you made - any changes to the library that could have caused tests to - fail. We also sometimes check in tests that we know presently - fail, just to remind us that we need to work on a fix, if we don't - have the time to debug the problem properly right away. In this - case, you will not want the testsuite to stop at the first test - that fails, but will want to run all tests first and then inspect - the output to find any fails. There are make targets for this - as well. The usual way we use the testsuite is to run all tests - like this - (the same applies as above: make -jN can be used on multicore - machines): -

    +    xx: Test <category>/<test>.<build>: PASSED
    +    xx: ===============================   OUTPUT BEGIN  ===============================
    +    xx: [...]
    +    xx: <category>/<test>.<build>: PASSED.
    +    xx: ===============================    OUTPUT END   ===============================
    +      
    + And for a test that fails in stage <stage>: +
     
    -      deal.II/tests> make report | tee report
    -    
    - which produces the file report ( here in the test directory a-framework) -
    +    xx: Test <category>/<test>.<build>: <stage>
    +    xx: ===============================   OUTPUT BEGIN  ===============================
    +    xx: [...]
    +    xx: <category>/<test>.<build>: <stage> failed. [...]
    +    xx:
    +    xx: <category>/<test>.<build>: ******    <stage> failed    *******
    +    xx: ===============================    OUTPUT END   ===============================
    +      
    + Hereby, <stage> indicates the stage in which the + test failed: +
      +
    • + CONFIGURE: only for test in the "build_tests" + category: The test project failed in the configuration stage +
    • +
    • + BUILD: compilation error occured +
    • +
    • + RUN: the test executable could not be run / aborted +
    • +
    • + DIFF: the test output differs from the reference output +
    • +
    • + PASSED: the test run successful +
    • +
    + Typically, tests fail because the output has changed, and you will see + this in the DIFF phase of the test. +

    - =====Checking====== miscompare/output - +++++Error+++++++++ miscompare/OK (miscompare/cmp/generic) Use make verbose=on for the diffs - =====linking======= compile/exe - =====Running======= link/exe - =====debug========= fail.cc - make[1]: Leaving directory `/home/kanschat/deal/tests/a-framework' - Revision: 21455 - Date: 2010 187 2010-07-06 27-2 - Id: kanschat@odin - 2010-07-06 16:39 1 a-framework/compile - 2010-07-06 16:39 0 a-framework/fail - 2010-07-06 16:39 2 a-framework/link - 2010-07-06 16:39 3 a-framework/miscompare - 2010-07-06 16:39 + a-framework/run -
    - The last lines are the ones we are looking for: they show the time at which - the tests was run, an indicator of success, and the name of a - test. The indicator is either a plus, which means that the test - compiled and linked successfully and that the output compared - successfully against the stored results. Otherwise, it is any of the - numbers 0 to 3, indicating failure at different levels: -
      -
    • 0: compiling failed -
    • 1: linking failed -
    • 2: the program crashed -
    • 3: output differs from stored result -
    • +: test succeeded -
    - If you only want to see the tests that failed, after the previous command, - issue -
     
    -      grep -v + report
    -    
    + +

    Testsuite development

    + +

    + The following outlines what you need to know if you want to understand + how the testsuite actually works, for example because you may want to + add tests along with the functionality you are currently developing.

    + + + +

    General layout

    +

    - If you want to do a little more than just that, you should - consider running -

    +      A test usually consists of a source file and an output file for
    +      comparison (under the testsuite directory tests):
    +      
     
    -      make report+mail | tee report
    -    
    - instead. This does all the same stuff, but also mails the test - result to our central mail result server which will in regular - intervals (at least once a day) munge these mails and present them - on our test site. This way, people can - get an overview of what tests fail. You may even consider running - tests nightly through a cron-job with this command, to have - regular test runs. + category/test.cc + category/test.output +
    + category will be one of the existing subdirectory + under tests/, e.g., lac/, base/, + or mpi/. Historically, we have grouped tests into the + directories base/, lac/, deal.II/ depending on their + functionality, and bits/ if they were small unit tests, but + in practice we have not always followed this rigidly. There are also + more specialized directories trilinos/, petsc/, + serialization/, mpi/ etc, whose meaning is more obvious. + test.cc must be a regular executable (i.e. having an + int main() routine). It will be compiled, linked and + run. The executable should not output anything to cout + (at least under normal circumstances, i.e. no error condition), + instead the executable should output to a file output + in the current working directory. In practice, we rarely write the + source files completely from scratch, but we find an existing test that + already does something similar and copy/modify it to fit our needs.

    +

    + For a normal test, ctest will typically run the following 3 + stages: +

      +
    • + BUILD: The build stage generates an executable in + BUILD_DIR/tests/<category>/<test>. +
    • +
    • + RUN: The run stage then invokes the executable in the + directory where it is located. By convention, each test puts its + output into a file simply called output, which will + then be located in + BUILD_DIR/tests/<category>/<test>/output. + If the run fails (e.g. because the program aborts with an error + code) the file output is renamed to + failing_output. +
    • +
    • + DIFF: As a last stage the generated output file will + be compared to + SOURCE_DIR/tests/<category>/<test>[...].output. + and stored in + BUILD_DIR/tests/<category>/<test>/diff. + If the diff fails the file diff is renamed to + failing_diff. +
    • +
    +

    + + + +

    Restricting tests for build configurations

    - To get a quick overview you can run -

    +      Comparison file can actually be named in a more complex way than
    +      just category/test.output:
    +      
     
    -      make report+summary
    -    
    - instead. This runs all the tests and outputs a table in the following format - at the end: -
    +    category/test.[with_<feature>=<on|off>.]*[mpirun=<x>.][expect=<y>.][<debug|release>.]output
    +      
    + Normally, a test will be set up so that it runs twice, once in debug and + once in release configuration. + If a specific test can only be run in debug or release configurations but + not in both it is possible to restrict the setup by prepeding + .debug or .release directly before + .output, e.g.: +
     
    -                Compiling Linking Running   Check      OK     all
    -         a-framework	1	1	1	1	1	5
    -                base	0	0	0	2	185	187
    -                 lac	0	0	0	0	117	117
    -                  fe	0	0	0	4	114	118
    -             deal.II	0	0	0	2	291	293
    -         integrators	0	0	0	0	15	15
    -           multigrid	0	0	0	0	35	35
    -		 ...
    -    
    + category/test.debug.output +
    + This way, the test will only be set up to build and run against the debug + library. If a test should run in both configurations but, for some + reason, produces different output (e.g., because it triggers an + assertion in debug mode), then you can just provide two different output + files: +
    +
    +    category/test.debug.output
    +    category/test.release.output
    +      

    + + +

    Restricting tests for feature configurations

    - If a test failed, you have to find out what exactly went - wrong. For this, you will want to go into the directory of that - test, and figure out in more detail what went wrong. For example, - if above test hierarchical would have failed, you - would want to go into the base directory (this is - given in the line with the equals signs; there are tests in other - directories as well) and then type -

    +      In a similar vain as for build configurations, it is possible to restrict
    +      tests to specific feature configurations, e.g.:
    +      
     
    -      make hierarchical/exe
    -    
    - to compile and link the executable. (For each test there is a not - only a file with suffic .cc but also a subdirectory with the - same name, in which we store among other things the executable for that - test, under the name exe.) If this fails, i.e. if - you can't compile or link, then you probably already know where - the problem is, and how to fix it. If you could compile and link - the test, you will want to make sure that it executes correctly - and produces an output file: -
    +    category/test.with_umfpack=on.output, or
    +    category/test.with_zlib=off.output
    +      
    + These tests will only be set up if the specified feature was configured. + It is possible to provide different output files for disabled/enabled + features, e.g. +
     
    -      make hierarchical/output
    -    
    - (As you see, the output file is also stored in the subdirectory with the - test's name.) If this produces errors or triggers assertions, then you will - want to use a debugger on the executable to figure out what happens. On - the other hand, if you are sure that this also worked, you will - want to compare the output with the stored output from subversion: -
    +    category/test.with_64bit_indices=on.output
    +    category/test.with_64bit_indices=off.output
    +      
    + It is also possible to declare multiple constraints subsequently, e.g. +
     
    -      make hierarchical/OK
    -    
    - If the output isn't equal, then you'll see something like - this: -
    +    category/test.with_umfpack=on.with_zlib=on.output
    +      
    +

    +

    + Note: The tests in some subdirectories of tests/ are + automatically run only if some feature is enabled. In this case a + feature constraint encoded in the output file name is + redundant and should be avoided. In particular, this holds for + subdirectories + distributed_grids, lapack, + metis, petsc, slepc, + trilinos, umfpack, gla, and + mpi +

    - =====Checking====== hierarchical/output - +++++Error+++++++++ hierarchical/OK. Use make verbose=on for the diffs -
    - Because the diffs between the output we get and the output we - expected can sometimes be very large, you don't get to see it by - default. However, following the suggestion printed, if you type -
     
    -      make hierarchical/OK verbose=on
    -    
    - you get to see it all: -
    +    
    +    

    Running tests with MPI

    +

    + If a test should be run with MPI in parallel, the number of MPI + processes N with which a program needs to be run for + comparison with a given output file is specified as follows: +

     
    -      =====Checking====== hierarchical/output
    -      12c12
    -      < DEAL::0.333 1.667 0.333 -0.889 0.296 -0.988 0.329 -0.999 0.333 -1.000 0.333 -1.000
    -      ---
    -      > DEAL::0.333 0.667 0.333 -0.889 0.296 -0.988 0.329 -0.999 0.333 -1.000 0.333 -1.000
    -      +++++Error+++++++++ hierarchical/OK
    -    
    - In this case, the second number on line 12 is off by one. To find - the reason for this, you again should use a debugger or other - suitable means, but that of course depends on what changes you - have made last and that could have caused this discrepancy. + category/test.mpirun=N.output +
    + It is quite typical for an MPI-enabled test to have multiple output + files for different numbers of MPI processes. +

    + + +

    Changing condition for success

    +

    + Normally a test is considered to be successful if all test stages + could be run and the test reached the PASSED stage (see + the output description section for details). + If (for some reason) the test should succeed ending at a specific + test stage different than PASSED you can specify it via + expect=<stage>, e.g.: +

    +
    +    category/test.expect=run.output
    +      

    +

    Adding new tests

    - As mentioned above, we add a new test every - time we add new functionality to the library or fix a bug. If you - want to contribute code to the library, you should do this - as well. Here's how: you need a testcase, - a subdirectory with the same name as the test, and a file with the - expected output. + We typically add one or more new tests every + time we add new functionality to the library or fix a bug. If you + want to contribute code to the library, you should do this + as well. Here's how: you need a testcase and a file with the + expected output.

    The testcase

    - For the testcase, we usually start from a template like this: + For the testcase, we usually start from one of the existing tests, copy + and modify it to where it does what we'd like to test. Alternatively, + you can also start from a template like this:

     
     // ---------------------------------------------------------------------
    @@ -380,15 +570,15 @@
     // a short (a few lines) description of what the program does
     
     #include "../tests.h"
    -#include 
    -#include 
    +#include <iostream>
    +#include <fstream>
     
     // all include files you need here
     
     
     int main ()
     {
    -  std::ofstream logfile("my_new_test/output");
    +  std::ofstream logfile("output");
       deallog.attach(logfile);
       deallog.depth_console(0);
     
    @@ -400,17 +590,14 @@ int main ()
     }
         
    -

    You open an output file in a directory with the same - name as your test, and then write - all output you generate to it, - through the deallog stream. The deallog - stream works like any - other std::ostream except that it does a few more - things behind the scenes that are helpful in this context. In - above case, we only write a zero to the output - file. Most tests actually write computed data to the output file - to make sure that whatever we compute is what we got when the - test was first written. +

    This code opens an output file output in the current working + directory and then writes all output you generate to it, through the + deallog stream. The deallog stream works like + any other std::ostream except that it does a few more + things behind the scenes that are helpful in this context. In above + case, we only write a zero to the output file. Most tests of course + write computed data to the output file to make sure that whatever we + compute is what we got when the test was first written.

    @@ -419,181 +606,220 @@ int main () have traditionally been into the base/, lac/, deal.II/, fe/, hp/, or multigrid/ directories, depending on - where the classes that are tested are located. + where the classes that are tested are located. More atomic tests often go + into bits/. There are also + directories for PETSc and Trilinos wrapper functionality.

    +

    An expected output

    +

    - We have started to create more atomic tests which - are usually very small and test only a single aspect of the - library, often only a single function. These tests go into the - bits/ directory and often have names that are - composed of the name of the class being tested and a two-digit - number, e.g., dof_tools_11. There are - directories for PETSc and Trilinos wrapper functionality. -

    + In order to run your new test, copy it to an appropriate category and + create an empty comparison file for it: +
     
    -    

    A directory with the same name as the test

    + category/my_new_test.cc + category/my_new_test.output +
    + Now, rerun +
     
    -    

    You have to create a subdirectory - with the same name as your test to hold the output from the test. + $ make setup_tests +

    + so that your new test is picked up. After that it is possible to + invoke it with +
     
    -    

    One convenient way to create this subdirectory with the correct - properties is to use svn copy. -

    +    $ ctest -V -R "category/my_new_test"
    +      
    +

    - svn copy existing_test_directory my_new_test -
    +

    + If you run your new test executable this way, the test should compile + and run successfully but fail in the diff stage (because of the empty + comparison file). You will get an output file + BUILD_DIR/category/my_new_test/output. Take a look at it to + make sure that the output is what you had expected. (For complex tests, + it may sometimes be impossible to say whether the output is correct, and + in this case we sometimes just take it to make + sure that future invokations of the test yield the same results.) +

    - Once you have done this, you can try to run -

    +      The next step is to copy and rename this output file to the source
    +      directory and replace the original comparison file with it:
    +      
     
    -      make my_new_test/output
    -    
    - This should compile, link, and run your test. Running your test - should generate the desired output file. -

    + category/my_new_test.output +
    + At this point running the test again should be successful: +
     
    +    $ ctest -V -R "category/my_new_test"
    +      
    +

    -

    An expected output

    +

    Checking in

    - If you run your new test executable, you will get an output file - mytestname/output that should be used to compare all future - runs with. If the test - is relatively simple, it is often a good idea to look at the - output and make sure that the output is actually what you had - expected. However, if you do complex operations, this may - sometimes be impossible, and in this case we are quite happy with - any reasonable output file just to make sure that future - invokations of the test yield the same results. -

    + Tests are a way to make sure everything keeps working. If they + aren't automated, they are no good. We are therefore very + interested in getting new tests. If you have subversion write access + already, you can add the new test and the expected output + file: +
     
    -    

    - The next step is to copy this output file to the place where the - scripts can find it when they compare with newer runs. For this, you first - have to understand how correct results are verified. It works in the - following way: for each test, we have subdirectories - testname/cmp where we store the expected results in a file - testname/cmp/generic. If you create a new test, you should - therefore create this directory, and copy the output of your program, - testname/output to testname/cmp/generic. + svn add category/my_new_test.cc + svn add category/my_new_test.output + svn commit -m "New test" +

    + If you don't have subversion write access, talk to us in the + discussion group; writing testcases is a worthy and laudable task, + and we would like to encourage it by giving people the opportunity to + contribute!

    -

    - Why generic? The reason is that sometimes test results - differ slightly from platform to platform, for example because numerical - roundoff is different due to different floating point implementations on - different CPUs. What this means is that sometimes a single stored output is - not enough to verify that a test functioned properly: if you happen to be - on a platform different from the one on which the generic output was - created, your test will always fail even though it produces almost exactly - the same output. -

    + + + +

    Submitting test results

    - To avoid this, what the makefiles do is to first check whether an output - file is stored for this test and your particular configuration (platform - and compiler). If this isn't the case, it goes through a hierarchy of files - with related configurations, and only if none of them does it take the - generic output file. It then compares the output of your test run with the - first file it found in this process. To make things a bit clearer, if you - are, for example, on a i686-pc-linux-gnu box and use - gcc4.0 as your compiler, then the following files will be - sought (in this order): -

    +      To submit test results to our CDash
    +      instance just invoke ctest within a build directory (or designated
    +      build directory) with the -S option pointing to the
    +      run_testsuite.cmake script: 
     
    -testname/cmp/i686-pc-linux-gnu+gcc4.0
    -testname/cmp/i686-pc-linux-gnu+gcc3.4
    -testname/cmp/i686-pc-linux-gnu+gcc3.3
    -testname/cmp/generic
    -    
    - (This list is generated by the tests/hierarchy.pl script.) - Your output will then be compared with the first one that is actually - found. The virtue of this is that we don't have to store the output files - from all possible platforms (this would amount to gigabytes of data), but - that we only have store an output file for gcc4.0 if it differs from that - of gcc3.4, and for gcc3.4 if it differs from gcc3.3. If all of them are the - same, we would only have the generic output file. + $ ctest [...] -V -S ../tests/run_testsuite.cmake +
    + The script will run configure, build and ctest and submit the results + to the CDash server. It does not matter whether the configure, build + or ctest stages were run before that. Also in script mode, you can + specify the same options for ctest as explained above.

    - Most of the time, you will be able to generate output files only - for your own platform and compiler, and that's alright: someone - else will create the output files for other platforms - eventually. You only have to copy your output file to - testname/cmp/generic. + Note: The default output in script mode is very minimal. + Therefore, it is recommended to specify -V which will + give the same level of verbosity as the non-script mode.

    - At this point you can run -

    +      Note: The following variables can be set to via
    +      
     
    -      make my_new_test/OK
    -    
    - which should compare the present output with what you have just - copied into the compare directory. This should, of course, - succeed, since the two files should be identical. + ctest -D<variable>=<value> [...] +
    + to control the behaviour of the run_testsuite.cmake + script: +
    +
    +    CTEST_SOURCE_DIRECTORY
    +      - The source directory of deal.II (usually ending in "[...]/deal.II"
    +        (equivalent to https://svn.dealii.org/trunk/deal.II)
    +        Note: This is _not_ the test directory ending in "[...]/tests"
    +      - If unspecified, "../deal.II" and "../../$ relative to the location
    +        of this script is used. If this is not a source directory, an error
    +        thrown.
    +
    +    CTEST_BINARY_DIRECTORY
    +      - The designated build directory (already configured, empty, or non
    +        existent - see the information about TRACKs what will happen)
    +      - If unspecified the current directory is used. If the current
    +        directory is equal to CTEST_SOURCE_DIRECTORY or the "tests"
    +        directory, an error is thrown.
    +
    +    CTEST_CMAKE_GENERATOR
    +      - The CMake Generator to use (e.g. "Unix Makefiles", or "Ninja", see
    +        $ man cmake)
    +      - If unspecified the current generator of a configured build directory
    +        will be used, otherwise "Unix Makefiles".
    +
    +    TRACK
    +      - The track the test should be submitted to. Defaults to "Experimental".
    +        Possible values are:
    +
    +        "Experimental"     - all tests that are not specifically "build" or
    +                             "regression" tests should go into this track
    +
    +        "Build Tests"      - Build tests that configure and build in a
    +                             clean directory and run the build tests
    +                             "build_tests/*"
    +
    +        "Nightly"          - Reserved for nightly regression tests for
    +                             build bots on various architectures
    +
    +        "Regression Tests" - Reserved for the regression tester
    +
    +    CONFIG_FILE
    +      - A configuration file (see docs/development/Config.sample)
    +        that will be used during the configuration stage (invokes
    +        $ cmake -C ${CONFIG_FILE}). This only has an effect if
    +        CTEST_BINARY_DIRECTORY is empty.
    +      
    + Furthermore, the variables described above can also be + set and will be handed automatically down to cmake.

    -

    - On the other hand, if you realize that an existing test fails on your - system, but that the differences (as shown when running with - verbose=on, see above) are only marginal and around the 6th or - 8th digit, then you should check in your output file for the platform you - work on. For this, you could copy testname/output to - testname/cmp/myplatform+compiler, but your life can be easier - if you simply type -

     
    -      make my_new_test/ref
    -    
    - which takes your output and copies it to the right place automatically. -

    + +

    Build tests

    +

    + Build tests are used to check that deal.II can be compiled on + different systems and with different compilers as well as + different configuration options. Results are collected in the "Build + Tests" track in CDash.

    +

    Running the build test suite is simple and we encourage deal.II + users with configurations not found on the test suite page to + participate. Assuming you checked out deal.II into the directory + deal.II, running it is as simple as: +

     
    -    

    Checking in

    + cd deal.II + mkdir build + cd build + ctest -j4 -S ../cmake/scripts/run_buildtest.cmake +
    +

    - Tests are a way to make sure everything keeps working. If they - aren't automated, they are no good. We are therefore very - interested in getting new tests. If you have subversion write access - already, you can add the new test and the expected output - file: -

    +      What this does is to compile and build deal.II in the directory
    +      build, try to configure, build (and run a subset) of all
    +      tutorial programs supported by the current configuration and send the
    +      results to the CDash instance.
    +    

    - svn add bits/my_new_test.cc - svn add bits/my_new_test - svn add bits/my_new_test/cmp - svn add bits/my_new_test/cmp/generic - svn commit -m "New test" bits/my_new_test* -
    - In addition, you should do the following in order to avoid that the files - generated while running the testsuite show up in the output of svn - status commands: -
    +    

    + Note: Build tests require the designated build directory to be + completely empty and the source directory to be under subversion + version control. If you want to specify a build configuration for + cmake use a configuration file + to preseed the cache as explained above: +

     
    -      svn propset svn:ignore "obj.*
    -        exe
    -        output
    -        status
    -        OK" bits/my_new_test
    -      svn commit -m "Ignore generated files." bits/my_new_test
    -    
    - Note that the list of files given in quotes to the propset command extends - over several lines. + $ ctest -DCONFIG_FILE="[...]/Config.sample" [...] +

    + + + +

    Dedicated build tests

    +

    - If you don't have subversion write access, talk to us in the discussion group; - writing testcases is a worthy and laudable task, and we would - like to encourage it by giving people the opportunity to - contribute! + Build tests work best if they run automatically and periodically. + There is a detailed example for such dedicated build tests on the wiki.

    +
    The deal.II Authors diff --git a/deal.II/doc/license.html b/deal.II/doc/license.html index dac21d73d7..96d18b2cfc 100644 --- a/deal.II/doc/license.html +++ b/deal.II/doc/license.html @@ -18,7 +18,7 @@

    The deal.II library has been placed under an Open Source license, in the sense advocated by the Open Source + href="http://opensource.org" target="_top">Open Source Initiative. You are thus free to copy and use it, and you have free access to all source code.

    diff --git a/deal.II/doc/news/changes.h b/deal.II/doc/news/changes.h index 54d0a2412b..42be735793 100644 --- a/deal.II/doc/news/changes.h +++ b/deal.II/doc/news/changes.h @@ -24,6 +24,23 @@ inconvenience this causes.

      +
    1. + Changed: The ghost handling of the parallel::distributed::Vector class has + been reworked: The vector now carries a global state that stores whether + ghost elements have been updated or not. If a vector has ghost elements, it + does not allow calls to compress() any more. Instead, a compress operation + can now only be done when the ghost entries have been cleared before by + calling zero_out_ghosts() or operator=0. The state can be queried by the new + method has_ghost_elements(). This change avoids spurious entries to be + inserted with compress(), but requires some change in user codes. The + behavior of a ghosted vector is now very similar to ghosted PETSc and + Trilinos vectors. The only difference is that the same vector can + also be used as a non-ghosted vector which is designed for use in assembly + routines. +
      + (Martin Kronbichler, 2013/10/18) +
    2. +
    3. Removed: GridTools::collect_periodic_face_pairs. This function is superseded by GridTools::collect_periodic_faces which exports an @@ -61,6 +78,12 @@ inconvenience this causes.
        +
      1. New: The testsuite is now ported to + CMake and uses CTest as test driver. +
        + (Wolfgang Bangerth, Timo Heister, Matthias Maier, Bruno Turcksin, 2013/10/20) +
      2. +
      3. Changed: multithreadinfo::n_default_threads is now deprecated. Use the new n_threads() function instead, which works correctly with TBB. @@ -145,6 +168,30 @@ inconvenience this causes.

        Specific improvements

          +
        1. + ~Subscriptor and ~GrowingVectorMemory no longer throw an exception (the + former if disable_abort_on_exception was called) to be compatible with the + C++11 standard which otherwise requires the program to immediately call + std::terminate. This was done with a new macro "AssertNothrow". +
          + (Wolfgang Bangerth, Matthias Maier, Bruno Turcksin 2013/10/22) +
        2. + +
        3. + dealii::SolverControl::NoConvergence now inherits dealii::ExceptionBase and + is thrown via AssertThrow(false, ... ). +
          + (Matthias Maier, 2013/10/20) +
        4. + +
        5. + New: parallel::distributed::BlockVector has now methods update_ghost_values, + compress, set_out_ghosts, and has_ghost_elements that do the respective + operation on each block of parallel::distributed::Vector. +
          + (Martin Kronbichler, 2013/10/18) +
        6. +
        7. Fixed: When deriving from DataOut to filter the cells where output is generated, there were two different bugs that result in segmentation faults or wrong cells written (example, step-18).
          diff --git a/deal.II/doc/news/news.html b/deal.II/doc/news/news.html index 9fa47a1f14..cfdbd129bc 100644 --- a/deal.II/doc/news/news.html +++ b/deal.II/doc/news/news.html @@ -557,7 +557,7 @@
          The principal authors of deal.II—Wolfgang Bangerth, Ralf Hartmann and Guido Kanscht—have received the J. H. Wilkinson + href="http://www.mcs.anl.gov/WilkinsonPrize/">J. H. Wilkinson Prize for Numerical Software for their creation of and work on the deal.II library. The prize committee particularly noted the reliability and usability of the software and the quality of the @@ -905,7 +905,7 @@ Library (HSL)
          - The Harwell Subroutine Library (HSL) is a library that, among much other functionality, offers some sparse direct solvers. We have added support for an diff --git a/deal.II/doc/publications/index.html b/deal.II/doc/publications/index.html index 73bc2acfd7..e7aa5b43a4 100644 --- a/deal.II/doc/publications/index.html +++ b/deal.II/doc/publications/index.html @@ -107,7 +107,7 @@
        8. W. Bangerth, - T. Heister and A finite basis grid analysis of the Hartree-Fock wavefunction method for one- and two-electron atoms
          - AIP Conf. Proc., Accepted, 2013. + AIP Conf. Proc., vol. 1558, pp. 1524, 2013.
        9. M. Anderson, W. Bangerth, G. Carey + target="_top">W. Bangerth, G. Carey
          Analysis of parameter sensitivity and experimental design for a class of nonlinear partial differential @@ -4183,7 +4181,7 @@ A Goal Oriented Software Library for Solving PDEs and Optimization Problems with J. Sci. Comput., vol. 24, pp. 321-341, 2005
          (This paper uses AFEPack which in turn uses deal.II's linear algebra library.)
        10. @@ -4233,8 +4231,7 @@ A Goal Oriented Software Library for Solving PDEs and Optimization Problems with Ph.D. thesis, University of Twente, The Netherlands, 2005 -
        11. R. B. Schulz, G. Echner, H. Ruehle, +
        12. R. B. Schulz, G. Echner, H. Ruehle, W. Stroh, J. Vierling, T. Vogt, J. Peter, W. Semmler
          Development of a fully rotational non-contact fluorescence tomographer @@ -4246,7 +4243,7 @@ A Goal Oriented Software Library for Solving PDEs and Optimization Problems with
        13. -
        14. R. B. Schulz, J. Peter, W. Semmler, C. D'andrea, G. Valentini, R. Cubeddu +
        15. R. B. Schulz, J. Peter, W. Semmler, C. D'andrea, G. Valentini, R. Cubeddu
          Quantifiability and Image Quality in Noncontact Fluorescence Tomography @@ -4298,8 +4295,7 @@ A Goal Oriented Software Library for Solving PDEs and Optimization Problems with
        16. -
        17. G. Carey, M. Anderson, B. Carnes, B. Kirk +
        18. G. Carey, M. Anderson, B. Carnes, B. Kirk
          Some aspects of adaptive grid technology related to boundary and interior layers @@ -4309,8 +4305,7 @@ A Goal Oriented Software Library for Solving PDEs and Optimization Problems with pp. 55-86, 2004
        19. -
        20. G. Carey, W. Barth, J. A. Woods, B. Kirk, +
        21. G. Carey, W. Barth, J. A. Woods, B. Kirk, M. Anderson, S. Chow, W. Bangerth
          @@ -4416,10 +4411,8 @@ A Goal Oriented Software Library for Solving PDEs and Optimization Problems with Modelling Simul. Mater. Sci. Eng., vol. 12, pp. 1293-1306, 2004
        22. -
        23. R. B. - Schulz, R. B. + Schulz, W. Bangerth, J. Peter, W. Semmler
          Independent modeling of @@ -5028,7 +5021,7 @@ A Goal Oriented Software Library for Solving PDEs and Optimization Problems with
          - The deal.II Authors + The deal.II Authors
          diff --git a/deal.II/doc/readme.html b/deal.II/doc/readme.html index 15ff24a35d..3a10fc35e4 100644 --- a/deal.II/doc/readme.html +++ b/deal.II/doc/readme.html @@ -78,22 +78,17 @@ platforms:

          +

          Most other combinations of POSIX-style operating systems and C++ Standard compliant compilers should also work. If they don't, @@ -147,11 +142,11 @@

        24. The library generates output in formats readable by - GNUPLOT, - GMV + GNUPLOT, + GMV (general mesh viewer), - Tecplot (ASCII and binary), - Tecplot (ASCII and binary), + Visualization Toolkit (Vtk), AVS Explorer, Open DX, @@ -168,7 +163,7 @@ of excellent programs that can read vtk and vtu, such as Visit, - ParaView, + ParaView, as well as others. Povray is freely available for almost all platforms. AVS is a commercial program available for most Unix flavors. Tecplot is a commercial program available for Windows @@ -282,7 +277,7 @@ possibly with different arguments. However, this sometimes leads to surprising results and you may not get exactly what you were hoping for. For more information, - see here. + see here.

        25. diff --git a/deal.II/doc/screen.css b/deal.II/doc/screen.css index 51bfd69bac..e840e05ddd 100644 --- a/deal.II/doc/screen.css +++ b/deal.II/doc/screen.css @@ -118,30 +118,27 @@ h1, h2, h3, h4, h5, h6 { border-bottom: 1px solid #aaaaaa; } -h1 { - font-size: 175%; - counter-reset: section; -} +h1 { font-size: 175%; + counter-reset: section; } -h2 { - font-size: 150%; - counter-reset: subsection; -} +h2 { font-size: 150%; + padding-top: 1.5em; + counter-reset: subsection; } h3 { font-size: 140%; - padding-top: 1.5em; + padding-top: 0.5em; padding-bottom: 0.17em; border-bottom: 1px dashed #aaaaaa; } -h4 { font-size: 110%; +h4 { font-size: 110%; border-bottom: none; font-weight: bold; } -h5 { font-size: 100%; +h5 { font-size: 100%; border-bottom: none; font-weight: bold; } -h6 { font-size: 80%; +h6 { font-size: 80%; border-bottom: none; font-weight: bold; } diff --git a/deal.II/doc/users/Config.sample b/deal.II/doc/users/Config.sample index df14dc07d3..06b915700b 100644 --- a/deal.II/doc/users/Config.sample +++ b/deal.II/doc/users/Config.sample @@ -1,8 +1,8 @@ ## ## -# Example configuration file # +# Example configuration file # # # -# See doc/readme.html and doc/development/cmake.html for further # -# details on how to use the cmake build system of deal.II. # +# See doc/readme.html and doc/users/cmake.html for further # +# details on how to use the cmake build system of deal.II. # ## ## @@ -75,6 +75,10 @@ # "Configure sensible default CFLAGS and CXXFLAGS depending on platform, compiler and build target." # ) # +# SET(DEAL_II_SETUP_COVERAGE OFF CACHE BOOL +# "Setup debug compiler flags to provide additional test coverage information. Currently only gprof is supported." +# ) +# # SET(CMAKE_CXX_COMPILER "" CACHE STRING # "CXX Compiler." # ) @@ -334,6 +338,9 @@ # SET(MPI_CXX_INCLUDE_PATH "include directory for mpi" CACHE STRING "") # SET(MPI_CXX_COMPILE_FLAGS "..." CACHE STRING "") # SET(MPI_CXX_LINK_FLAGS "..." CACHE STRING "") +# SET(MPI_VERSION "x.y" CACHE STRING "") +# SET(MPI_VERSION_MAJOR "x" CACHE STRING "") +# SET(MPI_VERSION_MINOR "y" CACHE STRING "") # # Additionally: # @@ -362,6 +369,10 @@ # SET(MUMPS_LIBRARIES "library;and;semicolon;separated;list;of;link;interface" CACHE STRING "") # SET(MUMPS_INCLUDE_DIRS "semicolon;separated;list;of;include;dirs" CACHE STRING "") # SET(MUMPS_LINKER_FLAGS "..." CACHE STRING "") +# SET(MUMPS_VERSION "x.y.z" CACHE STRING "") +# SET(MUMPS_VERSION_MAJOR "x" CACHE STRING "") +# SET(MUMPS_VERSION_MINOR "y" CACHE STRING "") +# SET(MUMPS_VERSION_SUBMINOR "z" CACHE STRING "") # @@ -430,11 +441,11 @@ # SET(PETSC_FOUND TRUE CACHE BOOL "") # SET(PETSC_LIBRARIES "library;and;semicolon;separated;list;of;link;interface" CACHE STRING "") # SET(PETSC_INCLUDE_DIRS "semicolon;separated;list;of;include;dirs" CACHE STRING "") -# SET(PETSC_VERSION "x.y.z." CACHE STRING "") +# SET(PETSC_VERSION "x.y.z.p" CACHE STRING "") # SET(PETSC_VERSION_MAJOR "x" CACHE STRING "") # SET(PETSC_VERSION_MINOR "y" CACHE STRING "") # SET(PETSC_VERSION_SUBMINOR "z" CACHE STRING "") -# SET(PETSC_VERSION_PATCH "" CACHE STRING "") +# SET(PETSC_VERSION_PATCH "p" CACHE STRING "") # SET(PETSC_WITH_MPI_UNI FALSE CACHE BOOL "") # @@ -456,11 +467,11 @@ # SET(SLEPC_FOUND TRUE CACHE BOOL "") # SET(SLEPC_LIBRARIES "library;and;semicolon;separated;list;of;link;interface" CACHE STRING "") # SET(SLEPC_INCLUDE_DIRS "semicolon;separated;list;of;include;dirs" CACHE STRING "") -# SET(SLEPC_VERSION "x.y.z." CACHE STRING "") +# SET(SLEPC_VERSION "x.y.z.p" CACHE STRING "") # SET(SLEPC_VERSION_MAJOR "x" CACHE STRING "") # SET(SLEPC_VERSION_MINOR "y" CACHE STRING "") # SET(SLEPC_VERSION_SUBMINOR "z" CACHE STRING "") -# SET(SLEPC_VERSION_PATCH "" CACHE STRING "") +# SET(SLEPC_VERSION_PATCH "p" CACHE STRING "") # SET(SLEPC_WITH_MPI_UNI FALSE CACHE BOOL "") # @@ -487,6 +498,9 @@ # SET(TBB_LIBRARIES "library;and;semicolon;separated;list;of;link;interface" CACHE STRING "") # SET(TBB_INCLUDE_DIRS "semicolon;separated;list;of;include;dirs" CACHE STRING "") # SET(TBB_WITH_DEBUGLIB TRUE CACHE BOOL "") +# SET(TBB_VERSION "x.y" CACHE STRING "") +# SET(TBB_VERSION_MAJOR "x" CACHE STRING "") +# SET(TBB_VERSION_MINOR "y" CACHE STRING "") # @@ -507,6 +521,7 @@ # SET(TRILINOS_FOUND TRUE CACHE BOOL "") # SET(TRILINOS_LIBRARIES "library;and;semicolon;separated;list;of;link;interface" CACHE STRING "") # SET(TRILINOS_INCLUDE_DIRS "semicolon;separated;list;of;include;dirs" CACHE STRING "") +# SET(TRILINOS_VERSION "x.y.z" CACHE STRING "") # SET(TRILINOS_VERSION_MAJOR "x" CACHE STRING "") # SET(TRILINOS_VERSION_MINOR "y" CACHE STRING "") # SET(TRILINOS_VERSION_SUBMINOR "z" CACHE STRING "") @@ -537,6 +552,10 @@ # SET(UMFPACK_LIBRARIES "library;and;semicolon;separated;list;of;link;interface" CACHE STRING "") # SET(UMFPACK_INCLUDE_DIRS "semicolon;separated;list;of;include;dirs" CACHE STRING "") # SET(UMFPACK_LINKER_FLAGS "..." CACHE STRING "") +# SET(UMFPACK_VERSION "x.y.z" CACHE STRING "") +# SET(UMFPACK_VERSION_MAJOR "x" CACHE STRING "") +# SET(UMFPACK_VERSION_MINOR "y" CACHE STRING "") +# SET(UMFPACK_VERSION_SUBMINOR "z" CACHE STRING "") # @@ -597,7 +616,7 @@ # -# Obnoxiously adavanced configuration about paths, install locations and +# Obnoxiously advanced configuration about paths, install locations and # names: # # SET(DEAL_II_PACKAGE_NAME "deal.II" CACHE STRING diff --git a/deal.II/doc/users/cmake.html b/deal.II/doc/users/cmake.html index 5524e0eda8..c4df8f5d04 100644 --- a/deal.II/doc/users/cmake.html +++ b/deal.II/doc/users/cmake.html @@ -33,6 +33,15 @@
        26. Shortcuts
      4. +
      5. Configuring and Building deal.II +
          +
        1. Primary build targets
        2. +
        3. Information about current configuration
        4. +
        5. Out-of-source versus in-source builds
        6. +
        7. CMake Generators
        8. +
        9. Installation
        10. +
        +
      6. Configuration options
        1. Feature configuration
        2. @@ -49,7 +58,7 @@
        3. Compiling only certain parts
        - +

        Operating cmake

        @@ -194,6 +203,235 @@ will be expanded to the full DEAL_II_COMPONENT_* variable name).

        + + + +

        Configuring and Building deal.II

        + +

        + This section provides some further details and advanced topics with + respect to configuration and building that is not covered in the README. +

        + + +

        Primary build targets

        + +

        The current list of primary build targets can be queried via + make info: +

        +
        +    ###
        +    #
        +    #  The following targets are available (invoke via $ make <target>):
        +    #
        +    #    all            - compiles the library and all enabled components
        +    #    clean          - removes all generated files
        +    #    install        - installs into CMAKE_INSTALL_PREFIX
        +    #    help           - prints a list of valid top level targets
        +    #    info           - prints this help message
        +    #
        +    #    edit_cache     - runs ccmake for changing (cached) configuration variables
        +    #                     and reruns the configure and generate phases of CMake
        +    #    rebuild_cache  - reruns the configure and generate phases of CMake
        +    #
        +    #    compat_files   - builds and installs the 'compat_files' component
        +    #    documentation  - builds and installs the 'documentation' component
        +    #    examples       - builds and installs the 'examples' component
        +    #    library        - builds and installs the 'library' component
        +    #    mesh_converter - builds and installs the 'mesh_converter' component
        +    #    parameter_gui  - builds and installs the 'parameter_gui' component
        +    #
        +    #    test           - runs a minimal set of tests
        +    #
        +    #    setup_test     - sets up the testsuite subprojects
        +    #    clean_test     - runs the 'clean' target in every testsuite subproject
        +    #    prune_test     - removes all testsuite subprojects
        +    #
        +    ###
        +      
        + + + +

        Information about current configuration

        + +

        + A configuration run of cmake (or ccmake) + writes a short summary of the current configuration into + CMAKE_BUILD_DIR/summary.log: +

        +
        +    ###
        +    #
        +    #  deal.II configuration:
        +    #        CMAKE_BUILD_TYPE:       DebugRelease
        +    #        BUILD_SHARED_LIBS:      ON
        +    #        CMAKE_INSTALL_PREFIX:   /tmp/deal.II/install
        +    #        CMAKE_SOURCE_DIR:       /tmp/deal.II/deal.II (Version 8.1.pre)
        +    #        CMAKE_BINARY_DIR:       /tmp/deal.II/build
        +    #        CMAKE_CXX_COMPILER:     GNU 4.7.3 on platform Linux x86_64
        +    #                                /usr/bin/c++
        +    #
        +    #  Configured Features (DEAL_II_ALLOW_BUNDLED = ON, DEAL_II_ALLOW_AUTODETECTION = ON):
        +    #      ( DEAL_II_WITH_64BIT_INDICES = OFF )
        +    #        DEAL_II_WITH_ARPACK set up with external dependencies
        +    #        DEAL_II_WITH_BOOST set up with external dependencies
        +    #        [...]
        +    #
        +    #  Component configuration:
        +    #        DEAL_II_COMPONENT_COMPAT_FILES
        +    #      ( DEAL_II_COMPONENT_DOCUMENTATION = OFF )
        +    #        DEAL_II_COMPONENT_EXAMPLES
        +    #        DEAL_II_COMPONENT_MESH_CONVERTER
        +    #      ( DEAL_II_COMPONENT_PARAMETER_GUI = OFF )
        +    #
        +    #  Detailed information (compiler flags, feature configuration) can be found in detailed.log
        +    #
        +    #  Run  $ make info  to print a help message with a list of top level targets
        +    #
        +    ###
        +      
        + This summary is also printed at the end of the configuration phase. + It tells you about build and install directory locations, feature + configuration (whether a feature is enabled with external/internal + dependencies, disabled or forced) and component configuration. + A more detailed version can be found in + CMAKE_BUILD_DIR/detailed.log that also includes detailed + information about feature configuration, e.g. +
        +
        +    #        DEAL_II_WITH_BOOST set up with external dependencies
        +    #            BOOST_VERSION = 1.52.0
        +    #            BOOST_DIR = 
        +    #            Boost_INCLUDE_DIRS = /usr/include
        +    #            Boost_LIBRARIES = /usr/lib64/libboost_serialization-mt.so;/usr/lib64/libboost_system-mt.so;
        +    # [...]
        +      
        + If this information is not sufficient, you might want to have a look + at the following files in CMAKE_BUILD_DIR +
          +
        • CMakeCache.txt: The file CMake stores its cached + variables in. Do not change directly. +
        • include/deal.II/base/config.h: The generated + config.h file +
        • lib/cmake/deal.II/deal.IIConfig.cmake: The project + configuration file for external projects. +
        +

        + + + + +

        Out-of-source versus in-source builds

        + +

        + A so called out-of-source build is a setup where the build directory + (the directory containing intermediate and generated files) is + different from the source directory (the directory containing the + source code). With CMake an out-of-source build is set up by invoking + cmake (or ccmake) from the designated build + directory, so for example (a build directory under the source + directory): +

        +
        +      $ mkdir build
        +      $ cd build
        +      $ cmake ..
        +      
        + The big advantage is that source files and intermediate files are + strictly separated (highly desired for version control) and that you + can have multiple build directories (with different configuration) at + the same time. +

        + +

        + Note: However, under rare occasions an in-source build might be + useful or needed , so it is supported +

        +
        +      $ cmake .
        +      
        + But we highly discourage it! +

        + + +

        CMake Generators

        + +

        + Cmake is a Makefile Generator. This allows to switch the + generator that is used to something different. If you for example want + to automatically generate an Eclipse project of deal.II, you can run +

        +
        +      $ cmake -G"Eclipse CDT4 - Unix Makefiles" [...]
        +      
        + and load up the build directory as a project directly into Eclipse. + Have a look at the Wiki for more information. +

        + +

        + An interesting alternative to (GNU) Make might also be Ninja. Configure via +

        +
        +      $ cmake -GNinja [...]
        +      
        + and run ninja instead of make. +

        + + + +

        Installation

        + +

        + It is not necessary to install the library in order to use deal.II. + Invoking the all or library target will + compile the library and set up all necessary configuration in the + build directory so that external projects can directly use it. + However, we strongly recommend to proceed in the way explained in the + README and install the + library to a designated install directory (different from source and + build directory). +

        + +

        + For installing the library it is necessary to set the CMake variable + CMAKE_INSTALL_PREFIX to the designated install + directory. You can do this by invoking cmake together + with -DCMAKE_INSTALL_PREFIX=<...> or by invoking + ccmake. +

        + +

        + Note: When you don't install deal.II to an install directory + and use it directly from a build directory, + both, the build and source directories have to be kept. +

        + +

        + Note: It is not necessary for the source, build and or install + directory to be different. All combinations are supported. +

        + + +

        Install a single component

        + +

        + If you want to only generate, compile and install a specific + component (most notably the documentation) you can use one of the + following top level targets: +

        +
        +    compat_files   - builds and installs the 'compat_files' component
        +    documentation  - builds and installs the 'documentation' component
        +    examples       - builds and installs the 'examples' component
        +    library        - builds and installs the 'library' component
        +    mesh_converter - builds and installs the 'mesh_converter' component
        +    parameter_gui  - builds and installs the 'parameter_gui' component
        +      
        + +

        Configuration options

        @@ -207,6 +445,7 @@

        +

        Feature configuration

        @@ -679,6 +918,11 @@ DEAL_II_SETUP_DEFAULT_COMPILER_FLAGS to OFF. Beware of the fact that certain features may still pull in necessary compiler flags. + +
      7. + You can setup additional debug compiler flags to provide test + coverage information by setting + DEAL_II_SETUP_COVERAGE to ON.

      @@ -687,9 +931,9 @@

      Installation

      - the location, - where the deal.II library will be - installed to is set with the help of + the location, where the deal.II library will be + installed when invoking make install to is set with the + help of

       
           CMAKE_INSTALL_PREFIX
      @@ -697,7 +941,7 @@
       
             Please note that depending on whether
             DEAL_II_COMPONENT_COMPAT_FILES is set, there will be
      -      different directory structures:
      +      a different default directory structure:
             
      • With DEAL_II_COMPONENT_COMPAT_FILES=ON: @@ -733,7 +977,7 @@

        - The individual target directories can be overwritten by setting the + The default directory structure can be changed by by setting the following variables:

         
        @@ -816,7 +1060,8 @@
               library. Rather, make install will still want to have both
               libraries up to date and will therefore invoke make all
               automatically. To restrict builds in such a way that only one library
        -      will be installed, see this section.
        +      will be installed, see configuration
        +      and installation sections.
             


        diff --git a/deal.II/examples/step-13/step-13.cc b/deal.II/examples/step-13/step-13.cc index fbef4d1660..17abbb2d79 100644 --- a/deal.II/examples/step-13/step-13.cc +++ b/deal.II/examples/step-13/step-13.cc @@ -28,7 +28,7 @@ #include #include #include -#include +#include #include #include #include @@ -56,11 +56,34 @@ #include #include +#ifdef DEAL_II_WITH_THREADS +# include +# include +#endif + // The last step is as in all previous programs: namespace Step13 { using namespace dealii; + namespace Assembler + { + // Dummy structure + struct Scratch + { + Scratch() {} + }; + + struct CopyData + { + CopyData() {} + + unsigned int dofs_per_cell; + FullMatrix cell_matrix; + std::vector local_dof_indices; + }; + } + // @sect3{Evaluation of the solution} // As for the program itself, we first define classes that evaluate the @@ -625,7 +648,7 @@ namespace Step13 // various subobjects, and there is a function that implements a // conjugate gradient method as solver. private: - struct LinearSystem + struct LinearSystem { LinearSystem (const DoFHandler &dof_handler); @@ -637,6 +660,53 @@ namespace Step13 Vector rhs; }; +#ifdef DEAL_II_WITH_THREADS + + // Tasks in TBB must be derived from tbb::task and override tbb::task* + // execute. + // The purpose of HangingNodeTask is to apply execute DoFTools::make_hanging_node_constraints. + struct HangingNodeTask : public tbb::task + { + HangingNodeTask (const DoFHandler &dof_handler,ConstraintMatrix &hanging_node_constraints) : + dof_handler(&dof_handler), + hanging_node_constraints(& hanging_node_constraints) {} + + tbb::task* execute() + { + DoFTools::make_hanging_node_constraints(*dof_handler,*hanging_node_constraints); + + return NULL; + } + + const DoFHandler* dof_handler; + ConstraintMatrix* hanging_node_constraints; + }; + + + + // The purpose of SparsityPatternTask is to create the sparsity pattern. + struct SparsityPatternTask : public tbb::task + { + SparsityPatternTask (const DoFHandler &dof_handler,SparsityPattern &sparsity_pattern) : + dof_handler(&dof_handler), + sparsity_pattern(&sparsity_pattern) {} + + tbb::task* execute() + { + sparsity_pattern->reinit (dof_handler->n_dofs(), + dof_handler->n_dofs(), + dof_handler->max_couplings_between_dofs()); + DoFTools::make_sparsity_pattern (*dof_handler, *sparsity_pattern); + + return NULL; + } + + const DoFHandler* dof_handler; + SparsityPattern* sparsity_pattern; + }; + +#endif + // Finally, there is a pair of functions which will be used to assemble // the actual system matrix. It calls the virtual function assembling // the right hand side, and installs a number threads each running the @@ -646,10 +716,14 @@ namespace Step13 assemble_linear_system (LinearSystem &linear_system); void - assemble_matrix (LinearSystem &linear_system, - const typename DoFHandler::active_cell_iterator &begin_cell, - const typename DoFHandler::active_cell_iterator &end_cell, - Threads::Mutex &mutex) const; + assemble_matrix (const typename DoFHandler::active_cell_iterator &cell, + Assembler::Scratch &scratch, + Assembler::CopyData ©_data) const; + + + void + copy_local_to_global(Assembler::CopyData const ©_data, + LinearSystem &linear_system) const; }; @@ -743,27 +817,19 @@ namespace Step13 // of equal size. The number of blocks is set to the default number of // threads to be used, which by default is set to the number of // processors found in your computer at startup of the program: - const unsigned int n_threads = multithread_info.n_threads(); - std::vector > - thread_ranges - = Threads::split_range (dof_handler.begin_active (), - dof_handler.end (), - n_threads); // These ranges are then assigned to a number of threads which we create // next. Each will assemble the local cell matrices on the assigned // cells, and fill the matrix object with it. Since there is need for // synchronization when filling the same matrix from different threads, // we need a mutex here: - Threads::Mutex mutex; - Threads::ThreadGroup<> threads; - for (unsigned int thread=0; thread::assemble_matrix, - *this, - linear_system, - thread_ranges[thread].first, - thread_ranges[thread].second, - mutex); + + Assembler::Scratch scratch; + Assembler::CopyData copy_data; + WorkStream::run(dof_handler.begin_active(),dof_handler.end(), + std_cxx1x::bind(&Solver::assemble_matrix,this,std_cxx1x::_1,std_cxx1x::_2,std_cxx1x::_3), + std_cxx1x::bind(&Solver::copy_local_to_global,this,std_cxx1x::_1,std_cxx1x::ref(linear_system)), + scratch,copy_data); // While the new threads assemble the system matrix, we can already // compute the right hand side vector in the main thread, and condense @@ -783,7 +849,6 @@ namespace Step13 // If this is done, wait for the matrix assembling threads, and condense // the constraints in the matrix as well: - threads.join_all (); linear_system.hanging_node_constraints.condense (linear_system.matrix); // Now that we have the linear system, we can also treat boundary @@ -803,38 +868,39 @@ namespace Step13 // on it any more, except for one point below. template void - Solver::assemble_matrix (LinearSystem &linear_system, - const typename DoFHandler::active_cell_iterator &begin_cell, - const typename DoFHandler::active_cell_iterator &end_cell, - Threads::Mutex &mutex) const + Solver::assemble_matrix (const typename DoFHandler::active_cell_iterator &cell, + Assembler::Scratch &scratch, + Assembler::CopyData ©_data) const { FEValues fe_values (*fe, *quadrature, update_gradients | update_JxW_values); - const unsigned int dofs_per_cell = fe->dofs_per_cell; + copy_data.dofs_per_cell = fe->dofs_per_cell; const unsigned int n_q_points = quadrature->size(); - FullMatrix cell_matrix (dofs_per_cell, dofs_per_cell); + copy_data.cell_matrix = FullMatrix (copy_data.dofs_per_cell, copy_data.dofs_per_cell); - std::vector local_dof_indices (dofs_per_cell); + copy_data.local_dof_indices.resize(copy_data.dofs_per_cell); - for (typename DoFHandler::active_cell_iterator cell=begin_cell; - cell!=end_cell; ++cell) - { - cell_matrix = 0; + fe_values.reinit (cell); - fe_values.reinit (cell); + for (unsigned int q_point=0; q_pointget_dof_indices (copy_data.local_dof_indices); + } - cell->get_dof_indices (local_dof_indices); + template + void + Solver::copy_local_to_global(Assembler::CopyData const ©_data, + LinearSystem &linear_system) const + { // In the step-9 program, we have shown that you have to use the // mutex to lock the matrix when copying the elements from the local // to the global matrix. This was necessary to avoid that two @@ -862,21 +928,11 @@ namespace Step13 // whether the operation completed successfully or not, whether the // exit path was something we implemented willfully or whether the // function was exited by an exception that we did not foresee. - // - // deal.II implements the scoped locking pattern in the - // Treads::Mutex::ScopedLock class: it takes the mutex in the - // constructor and locks it; in its destructor, it unlocks it - // again. So here is how it is used: - Threads::Mutex::ScopedLock lock (mutex); - for (unsigned int i=0; ilock variable goes out of existence and its - // destructor the mutex is unlocked. - }; + for (unsigned int i=0; i &, - ConstraintMatrix &) - = &DoFTools::make_hanging_node_constraints; - - Threads::Thread<> - mhnc_thread = Threads::new_thread (mhnc_p, - dof_handler, - hanging_node_constraints); +#ifdef DEAL_II_WITH_THREADS + tbb::task_scheduler_init init; + // Create an empty task to be the parent of the two tasks that we need. + tbb::empty_task* empty_task = new (tbb::task::allocate_root()) tbb::empty_task; + // Set the reference count to 3 (number of children+1 because + // wati_for_all returns when ref_count is one). + empty_task->set_ref_count(3); + + HangingNodeTask* hanging_node_task = + new (empty_task->allocate_child()) HangingNodeTask(dof_handler,hanging_node_constraints); + SparsityPatternTask* sparsity_pattern_task = + new (empty_task->allocate_child()) SparsityPatternTask(dof_handler,sparsity_pattern); + + // Spawn the two tasks + empty_task->spawn(*hanging_node_task); + empty_task->spawn(*sparsity_pattern_task); + + // Wait for children to finish + empty_task->wait_for_all(); + // empty_task must be destroy manually because it does not return. + empty_task->destroy(*empty_task); +#else + DoFTools::make_hanging_node_constraints(dof_handler,hanging_node_constraints); sparsity_pattern.reinit (dof_handler.n_dofs(), dof_handler.n_dofs(), dof_handler.max_couplings_between_dofs()); DoFTools::make_sparsity_pattern (dof_handler, sparsity_pattern); +#endif - // Wait until the hanging_node_constraints object is fully - // set up, then close it and use it to condense the sparsity pattern: - mhnc_thread.join (); hanging_node_constraints.close (); hanging_node_constraints.condense (sparsity_pattern); diff --git a/deal.II/examples/step-14/step-14.cc b/deal.II/examples/step-14/step-14.cc index 58359c9e97..ec777aa8ce 100644 --- a/deal.II/examples/step-14/step-14.cc +++ b/deal.II/examples/step-14/step-14.cc @@ -23,7 +23,7 @@ #include #include #include -#include +#include #include #include #include @@ -54,11 +54,33 @@ #include #include +#ifdef DEAL_II_WITH_THREADS +# include +# include +#endif + // The last step is as in all previous programs: namespace Step14 { using namespace dealii; + namespace Assembler + { + struct Scratch + { + Scratch() {} + }; + + struct CopyData + { + CopyData() {} + + unsigned int dofs_per_cell; + FullMatrix cell_matrix; + std::vector local_dof_indices; + }; + } + // @sect3{Evaluating the solution} // As mentioned in the introduction, significant parts of the program have @@ -470,14 +492,59 @@ namespace Step14 Vector rhs; }; +#ifdef DEAL_II_WITH_THREADS + + struct HangingNodeTask : public tbb::task + { + HangingNodeTask (const DoFHandler &dof_handler,ConstraintMatrix &hanging_node_constraints) : + dof_handler(&dof_handler), + hanging_node_constraints(& hanging_node_constraints) {} + + tbb::task* execute() + { + DoFTools::make_hanging_node_constraints(*dof_handler,*hanging_node_constraints); + + return NULL; + } + + const DoFHandler* dof_handler; + ConstraintMatrix* hanging_node_constraints; + }; + + struct SparsityPatternTask : public tbb::task + { + SparsityPatternTask (const DoFHandler &dof_handler,SparsityPattern &sparsity_pattern) : + dof_handler(&dof_handler), + sparsity_pattern(&sparsity_pattern) {} + + tbb::task* execute() + { + sparsity_pattern->reinit (dof_handler->n_dofs(), + dof_handler->n_dofs(), + dof_handler->max_couplings_between_dofs()); + DoFTools::make_sparsity_pattern (*dof_handler, *sparsity_pattern); + + return NULL; + } + + const DoFHandler* dof_handler; + SparsityPattern* sparsity_pattern; + }; + +#endif + void assemble_linear_system (LinearSystem &linear_system); void - assemble_matrix (LinearSystem &linear_system, - const typename DoFHandler::active_cell_iterator &begin_cell, - const typename DoFHandler::active_cell_iterator &end_cell, - Threads::Mutex &mutex) const; + assemble_matrix (const typename DoFHandler::active_cell_iterator &cell, + Assembler::Scratch &scratch, + Assembler::CopyData ©_data) const; + + + void + copy_local_to_global(Assembler::CopyData const ©_data, + LinearSystem &linear_system) const; }; @@ -550,15 +617,13 @@ namespace Step14 dof_handler.end (), n_threads); - Threads::Mutex mutex; - Threads::ThreadGroup<> threads; - for (unsigned int thread=0; thread::assemble_matrix, - *this, - linear_system, - thread_ranges[thread].first, - thread_ranges[thread].second, - mutex); + Assembler::Scratch scratch; + Assembler::CopyData copy_data; + WorkStream::run(dof_handler.begin_active(),dof_handler.end(), + std_cxx1x::bind(&Solver::assemble_matrix,this,std_cxx1x::_1,std_cxx1x::_2,std_cxx1x::_3), + std_cxx1x::bind(&Solver::copy_local_to_global,this,std_cxx1x::_1,std_cxx1x::ref(linear_system)), + scratch,copy_data); + assemble_rhs (linear_system.rhs); linear_system.hanging_node_constraints.condense (linear_system.rhs); @@ -569,7 +634,6 @@ namespace Step14 *boundary_values, boundary_value_map); - threads.join_all (); linear_system.hanging_node_constraints.condense (linear_system.matrix); MatrixTools::apply_boundary_values (boundary_value_map, @@ -581,68 +645,110 @@ namespace Step14 template void - Solver::assemble_matrix (LinearSystem &linear_system, - const typename DoFHandler::active_cell_iterator &begin_cell, - const typename DoFHandler::active_cell_iterator &end_cell, - Threads::Mutex &mutex) const + Solver::assemble_matrix (const typename DoFHandler::active_cell_iterator &cell, + Assembler::Scratch &scratch, + Assembler::CopyData ©_data) const { FEValues fe_values (*fe, *quadrature, update_gradients | update_JxW_values); - const unsigned int dofs_per_cell = fe->dofs_per_cell; + copy_data.dofs_per_cell = fe->dofs_per_cell; const unsigned int n_q_points = quadrature->size(); - FullMatrix cell_matrix (dofs_per_cell, dofs_per_cell); + copy_data.cell_matrix = FullMatrix (copy_data.dofs_per_cell, copy_data.dofs_per_cell); - std::vector local_dof_indices (dofs_per_cell); + copy_data.local_dof_indices.resize(copy_data.dofs_per_cell); - for (typename DoFHandler::active_cell_iterator cell=begin_cell; - cell!=end_cell; ++cell) - { - cell_matrix = 0; + fe_values.reinit (cell); - fe_values.reinit (cell); + for (unsigned int q_point=0; q_pointget_dof_indices (copy_data.local_dof_indices); + } - cell->get_dof_indices (local_dof_indices); - Threads::Mutex::ScopedLock lock (mutex); - for (unsigned int i=0; i + void + Solver::copy_local_to_global(Assembler::CopyData const ©_data, + LinearSystem &linear_system) const + { + for (unsigned int i=0; iThreads::ThreadGroup class here, but rather use + // the one created thread object directly to wait for this particular + // thread's exit. + // + // Note that taking up the address of the + // DoFTools::make_hanging_node_constraints function is a + // little tricky, since there are actually three of them, one for each + // supported space dimension. Taking addresses of overloaded functions is + // somewhat complicated in C++, since the address-of operator + // & in that case returns more like a set of values (the + // addresses of all functions with that name), and selecting the right one + // is then the next step. If the context dictates which one to take (for + // example by assigning to a function pointer of known type), then the + // compiler can do that by itself, but if this set of pointers shall be + // given as the argument to a function that takes a template, the compiler + // could choose all without having a preference for one. We therefore have + // to make it clear to the compiler which one we would like to have; for + // this, we could use a cast, but for more clarity, we assign it to a + // temporary mhnc_p (short for pointer to + // make_hanging_node_constraints) with the right type, and using + // this pointer instead. template Solver::LinearSystem:: LinearSystem (const DoFHandler &dof_handler) { hanging_node_constraints.clear (); - void (*mhnc_p) (const DoFHandler &, - ConstraintMatrix &) - = &DoFTools::make_hanging_node_constraints; - - Threads::Thread<> - mhnc_thread = Threads::new_thread (mhnc_p, - dof_handler, - hanging_node_constraints); +#ifdef DEAL_II_WITH_THREADS + tbb::task_scheduler_init init; + // Create an empty task to be the parent of the two tasks that we need. + tbb::empty_task* empty_task = new (tbb::task::allocate_root()) tbb::empty_task; + // Set the reference count to 3 (number of children+1) + empty_task->set_ref_count(3); + + HangingNodeTask* hanging_node_task = + new (empty_task->allocate_child()) HangingNodeTask(dof_handler,hanging_node_constraints); + SparsityPatternTask* sparsity_pattern_task = + new (empty_task->allocate_child()) SparsityPatternTask(dof_handler,sparsity_pattern); + + empty_task->spawn(*hanging_node_task); + empty_task->spawn(*sparsity_pattern_task); + + // Wait for children to finish + empty_task->wait_for_all(); + empty_task->destroy(*empty_task); +#else + DoFTools::make_hanging_node_constraints(dof_handler,hanging_node_constraints); sparsity_pattern.reinit (dof_handler.n_dofs(), dof_handler.n_dofs(), dof_handler.max_couplings_between_dofs()); DoFTools::make_sparsity_pattern (dof_handler, sparsity_pattern); +#endif - mhnc_thread.join (); hanging_node_constraints.close (); hanging_node_constraints.condense (sparsity_pattern); diff --git a/deal.II/include/deal.II/algorithms/newton.templates.h b/deal.II/include/deal.II/algorithms/newton.templates.h index a57c34d233..b1380d6e4e 100644 --- a/deal.II/include/deal.II/algorithms/newton.templates.h +++ b/deal.II/include/deal.II/algorithms/newton.templates.h @@ -174,11 +174,10 @@ namespace Algorithms } deallog.pop(); - // in case of failure: throw - // exception + // in case of failure: throw exception if (control.last_check() != SolverControl::success) - throw SolverControl::NoConvergence (control.last_step(), - control.last_value()); + AssertThrow(false, SolverControl::NoConvergence (control.last_step(), + control.last_value())); // otherwise exit as normal } } diff --git a/deal.II/include/deal.II/base/config.h.in b/deal.II/include/deal.II/base/config.h.in index ad1681b62e..571abc27f3 100644 --- a/deal.II/include/deal.II/base/config.h.in +++ b/deal.II/include/deal.II/base/config.h.in @@ -115,11 +115,17 @@ * to allow using the standard library classes instead of the corresponding * BOOST classes. */ -#cmakedefine DEAL_II_CAN_USE_CXX11 -#ifdef DEAL_II_CAN_USE_CXX11 +#cmakedefine DEAL_II_USE_CXX11 +#ifdef DEAL_II_USE_CXX11 +# define DEAL_II_CAN_USE_CXX11 # define DEAL_II_CAN_USE_CXX1X #endif +/* Defined if C++11 is enabled and the standard library supports + * template std::is_trivially_copyable + */ +#cmakedefine DEAL_II_HAVE_CXX11_IS_TRIVIALLY_COPYABLE + /* Defined if isnan is available */ #cmakedefine HAVE_ISNAN @@ -185,7 +191,7 @@ /**************************************** - * Configured in check_2_compiler_bugs: * + * Configured in check_3_compiler_bugs: * ****************************************/ /* Defined if we have to work around a bug with some compilers that will not @@ -221,6 +227,12 @@ */ #cmakedefine DEAL_II_BOOST_BIND_COMPILER_BUG +/* Defined if the compiler incorrectly deduces a constexpr as not being a + * constant integral expression under certain optimization (notably + * gcc-4.8.1 on Windows and Mac) + */ +#cmakedefine DEAL_II_CONSTEXPR_BUG + /***************************************** * Configured in configure_arpack.cmake: * diff --git a/deal.II/include/deal.II/base/exceptions.h b/deal.II/include/deal.II/base/exceptions.h index ffea72d31a..3ac055a0ac 100644 --- a/deal.II/include/deal.II/base/exceptions.h +++ b/deal.II/include/deal.II/base/exceptions.h @@ -198,41 +198,40 @@ namespace deal_II_exceptions { /** - * Conditionally abort the program. Depending on whether - * disable_abort_on_exception was called, this function either aborts - * the program flow by printing the error message provided by @p exc - * and calling std::abort(), or throws @p exc instead. + * Conditionally abort the program. + * + * Depending on whether disable_abort_on_exception was called, this + * function either aborts the program flow by printing the error + * message provided by @p exc and calling std::abort(), or + * throws @p exc instead (if @p nothrow is set to false). + * + * If the boolean @p nothrow is set to true and + * disable_abort_on_exception was called, the exception type is just + * printed to deallog and program flow continues. This is useful if + * throwing an exception is prohibited (e.g. in a destructor with + * noexcept(true) or throw()). */ - void abort (const ExceptionBase &exc); + void abort (const ExceptionBase &exc, bool nothrow = false); /** - * This routine does the main work for the exception generation - * mechanism used in the Assert macro. - * - * @ref ExceptionBase + * An enum describing how to treat an exception in issue_error */ - template - void issue_error_abort (const char *file, - int line, - const char *function, - const char *cond, - const char *exc_name, - exc e) + enum ExceptionHandling { - // Fill the fields of the exception object - e.set_fields (file, line, function, cond, exc_name); - - dealii::deal_II_exceptions::internals::abort(e); - } + abort_on_exception, + throw_on_exception, + abort_nothrow_on_exception + }; /** * This routine does the main work for the exception generation - * mechanism used in the AssertThrow macro. + * mechanism used in the Assert macro. * * @ref ExceptionBase */ template - void issue_error (const char *file, + void issue_error (ExceptionHandling handling, + const char *file, int line, const char *function, const char *cond, @@ -241,7 +240,18 @@ namespace deal_II_exceptions { // Fill the fields of the exception object e.set_fields (file, line, function, cond, exc_name); - throw e; + + switch(handling) + { + case abort_on_exception: + dealii::deal_II_exceptions::internals::abort(e); + break; + case abort_nothrow_on_exception: + dealii::deal_II_exceptions::internals::abort(e, /*nothrow =*/ true); + break; + case throw_on_exception: + throw e; + } } } /*namespace internals*/ @@ -261,18 +271,45 @@ namespace deal_II_exceptions * @author Wolfgang Bangerth, 1997, 1998, Matthias Maier, 2013 */ #ifdef DEBUG -#define Assert(cond, exc) \ - { \ - if (!(cond)) \ - ::dealii::deal_II_exceptions::internals:: \ - issue_error_abort (__FILE__, \ - __LINE__, \ - __PRETTY_FUNCTION__, #cond, #exc, exc); \ - } +#define Assert(cond, exc) \ +{ \ + if (!(cond)) \ + ::dealii::deal_II_exceptions::internals:: \ + issue_error(::dealii::deal_II_exceptions::internals::abort_on_exception,\ + __FILE__, __LINE__, __PRETTY_FUNCTION__, #cond, #exc, exc); \ +} +#else +#define Assert(cond, exc) \ +{} +#endif + + +/** + * A variant of the Assert macro above that exhibits the same + * runtime behaviour as long as disable_abort_on_exception was not called. + * + * However, if disable_abort_on_exception was called, this macro merely + * prints the exception that would be thrown to deallog and continues + * normally without throwing an exception. + * + * See the ExceptionBase class for more information. + * + * @ingroup Exceptions + * @author Wolfgang Bangerth, 1997, 1998, Matthias Maier, 2013 + */ +#ifdef DEBUG +#define AssertNothrow(cond, exc) \ +{ \ + if (!(cond)) \ + ::dealii::deal_II_exceptions::internals:: \ + issue_error( \ + ::dealii::deal_II_exceptions::internals::abort_nothrow_on_exception, \ + __FILE__, __LINE__, __PRETTY_FUNCTION__, #cond, #exc, exc); \ +} #else -#define Assert(cond, exc) \ - { } +#define AssertNothrow(cond, exc) \ +{} #endif @@ -289,24 +326,21 @@ namespace deal_II_exceptions * @author Wolfgang Bangerth, 1997, 1998, Matthias Maier, 2013 */ #ifdef HAVE_BUILTIN_EXPECT -#define AssertThrow(cond, exc) \ - { \ - if (__builtin_expect(!(cond), false)) \ - ::dealii::deal_II_exceptions::internals:: \ - issue_error (__FILE__, \ - __LINE__, \ - __PRETTY_FUNCTION__, #cond, #exc, exc); \ - } - +#define AssertThrow(cond, exc) \ +{ \ + if (__builtin_expect(!(cond), false)) \ + ::dealii::deal_II_exceptions::internals:: \ + issue_error(::dealii::deal_II_exceptions::internals::throw_on_exception,\ + __FILE__, __LINE__, __PRETTY_FUNCTION__, #cond, #exc, exc); \ +} #else /*ifdef HAVE_BUILTIN_EXPECT*/ -#define AssertThrow(cond, exc) \ - { \ - if (!(cond)) \ - ::dealii::deal_II_exceptions::internals:: \ - issue_error (__FILE__, \ - __LINE__, \ - __PRETTY_FUNCTION__, #cond, #exc, exc); \ - } +#define AssertThrow(cond, exc) \ +{ \ + if (!(cond)) \ + ::dealii::deal_II_exceptions::internals:: \ + issue_error(::dealii::deal_II_exceptions::internals::throw_on_exception,\ + __FILE__, __LINE__, __PRETTY_FUNCTION__, #cond, #exc, exc); \ +} #endif /*ifdef HAVE_BUILTIN_EXPECT*/ diff --git a/deal.II/include/deal.II/base/geometry_info.h b/deal.II/include/deal.II/base/geometry_info.h index ced3b187de..2b8b62331f 100644 --- a/deal.II/include/deal.II/base/geometry_info.h +++ b/deal.II/include/deal.II/base/geometry_info.h @@ -2169,11 +2169,15 @@ struct GeometryInfo * in the glossary). */ template - static - void + static void alternating_form_at_vertices - (const Point (&vertices)[vertices_per_cell], - Tensor (&forms)[vertices_per_cell]); +#ifndef DEAL_II_CONSTEXPR_BUG + (const Point (&vertices)[vertices_per_cell], + Tensor (&forms)[vertices_per_cell]); +#else + (const Point *vertices, + Tensor *forms); +#endif /** * For each face of the reference diff --git a/deal.II/include/deal.II/base/logstream.h b/deal.II/include/deal.II/base/logstream.h index 8afb3ea2cb..fe03109f66 100644 --- a/deal.II/include/deal.II/base/logstream.h +++ b/deal.II/include/deal.II/base/logstream.h @@ -361,13 +361,6 @@ public: std::ios::fmtflags flags(const std::ios::fmtflags f); - /** - * Output a constant something through this stream. - */ - template - LogStream &operator << (const T &t); - - /** * Output double precision numbers through this stream. * @@ -557,12 +550,31 @@ private: * for every thread that sends log messages. */ Threads::ThreadLocalStorage > outstreams; + + template friend LogStream &operator << (LogStream & log, const T &t); }; /* ----------------------------- Inline functions and templates ---------------- */ +/** + * Output a constant something through LogStream: + * + * @note We declare this operator as a non-member function so that it is + * possible to overload it with more specialized templated versions under + * C++11 overload resolution rules + */ +template +inline +LogStream & operator<< (LogStream &log, const T &t) +{ + // print to the internal stringstream + log.get_stream() << t; + return log; +} + + inline std::ostringstream & LogStream::get_stream() @@ -586,17 +598,6 @@ LogStream::get_stream() } -template -inline -LogStream & -LogStream::operator<< (const T &t) -{ - // print to the internal stringstream - get_stream() << t; - - return *this; -} - inline diff --git a/deal.II/include/deal.II/base/std_cxx1x/array.h b/deal.II/include/deal.II/base/std_cxx1x/array.h index c660d09b54..68b8060321 100644 --- a/deal.II/include/deal.II/base/std_cxx1x/array.h +++ b/deal.II/include/deal.II/base/std_cxx1x/array.h @@ -20,7 +20,7 @@ #include -#ifdef DEAL_II_CAN_USE_CXX11 +#ifdef DEAL_II_USE_CXX11 # include DEAL_II_NAMESPACE_OPEN diff --git a/deal.II/include/deal.II/base/std_cxx1x/bind.h b/deal.II/include/deal.II/base/std_cxx1x/bind.h index 0c14d2ad0a..9cb2192988 100644 --- a/deal.II/include/deal.II/base/std_cxx1x/bind.h +++ b/deal.II/include/deal.II/base/std_cxx1x/bind.h @@ -20,7 +20,7 @@ #include -#ifdef DEAL_II_CAN_USE_CXX11 +#ifdef DEAL_II_USE_CXX11 # include diff --git a/deal.II/include/deal.II/base/std_cxx1x/condition_variable.h b/deal.II/include/deal.II/base/std_cxx1x/condition_variable.h index 89429ef2fc..4fd712c36c 100644 --- a/deal.II/include/deal.II/base/std_cxx1x/condition_variable.h +++ b/deal.II/include/deal.II/base/std_cxx1x/condition_variable.h @@ -20,7 +20,7 @@ #include -#ifdef DEAL_II_CAN_USE_CXX11 +#ifdef DEAL_II_USE_CXX11 # include DEAL_II_NAMESPACE_OPEN diff --git a/deal.II/include/deal.II/base/std_cxx1x/function.h b/deal.II/include/deal.II/base/std_cxx1x/function.h index d3e902ff89..86aea59ed1 100644 --- a/deal.II/include/deal.II/base/std_cxx1x/function.h +++ b/deal.II/include/deal.II/base/std_cxx1x/function.h @@ -20,7 +20,7 @@ #include -#ifdef DEAL_II_CAN_USE_CXX11 +#ifdef DEAL_II_USE_CXX11 # include DEAL_II_NAMESPACE_OPEN diff --git a/deal.II/include/deal.II/base/std_cxx1x/mutex.h b/deal.II/include/deal.II/base/std_cxx1x/mutex.h index 817ebda660..7c195fffc9 100644 --- a/deal.II/include/deal.II/base/std_cxx1x/mutex.h +++ b/deal.II/include/deal.II/base/std_cxx1x/mutex.h @@ -20,7 +20,7 @@ #include -#ifdef DEAL_II_CAN_USE_CXX11 +#ifdef DEAL_II_USE_CXX11 # include DEAL_II_NAMESPACE_OPEN diff --git a/deal.II/include/deal.II/base/std_cxx1x/shared_ptr.h b/deal.II/include/deal.II/base/std_cxx1x/shared_ptr.h index f8a6744ce5..172218deda 100644 --- a/deal.II/include/deal.II/base/std_cxx1x/shared_ptr.h +++ b/deal.II/include/deal.II/base/std_cxx1x/shared_ptr.h @@ -20,7 +20,7 @@ #include -#ifdef DEAL_II_CAN_USE_CXX11 +#ifdef DEAL_II_USE_CXX11 # include DEAL_II_NAMESPACE_OPEN diff --git a/deal.II/include/deal.II/base/std_cxx1x/thread.h b/deal.II/include/deal.II/base/std_cxx1x/thread.h index 6b43b2525a..ac3ed95d4d 100644 --- a/deal.II/include/deal.II/base/std_cxx1x/thread.h +++ b/deal.II/include/deal.II/base/std_cxx1x/thread.h @@ -20,7 +20,7 @@ #include -#ifdef DEAL_II_CAN_USE_CXX11 +#ifdef DEAL_II_USE_CXX11 # include DEAL_II_NAMESPACE_OPEN diff --git a/deal.II/include/deal.II/base/std_cxx1x/tuple.h b/deal.II/include/deal.II/base/std_cxx1x/tuple.h index 0282c112da..b3d846da6e 100644 --- a/deal.II/include/deal.II/base/std_cxx1x/tuple.h +++ b/deal.II/include/deal.II/base/std_cxx1x/tuple.h @@ -20,7 +20,7 @@ #include -#ifdef DEAL_II_CAN_USE_CXX11 +#ifdef DEAL_II_USE_CXX11 # include DEAL_II_NAMESPACE_OPEN diff --git a/deal.II/include/deal.II/base/std_cxx1x/type_traits.h b/deal.II/include/deal.II/base/std_cxx1x/type_traits.h index f64cf36d60..d0e34b3280 100644 --- a/deal.II/include/deal.II/base/std_cxx1x/type_traits.h +++ b/deal.II/include/deal.II/base/std_cxx1x/type_traits.h @@ -20,7 +20,7 @@ #include -#ifdef DEAL_II_CAN_USE_CXX11 +#ifdef DEAL_II_USE_CXX11 # include DEAL_II_NAMESPACE_OPEN diff --git a/deal.II/include/deal.II/fe/fe_bdm.h b/deal.II/include/deal.II/fe/fe_bdm.h index c3e1d675b5..25c9e9e3d0 100644 --- a/deal.II/include/deal.II/fe/fe_bdm.h +++ b/deal.II/include/deal.II/fe/fe_bdm.h @@ -28,7 +28,6 @@ #include - DEAL_II_NAMESPACE_OPEN /** diff --git a/deal.II/include/deal.II/grid/tria_objects.h b/deal.II/include/deal.II/grid/tria_objects.h index 22965b0918..542baab55f 100644 --- a/deal.II/include/deal.II/grid/tria_objects.h +++ b/deal.II/include/deal.II/grid/tria_objects.h @@ -779,11 +779,10 @@ namespace internal void *& TriaObjects::user_pointer (const unsigned int i) { -#ifdef DEBUG Assert(user_data_type == data_unknown || user_data_type == data_pointer, ExcPointerIndexClash()); user_data_type = data_pointer; -#endif + Assert(i::user_pointer (const unsigned int i) const { -#ifdef DEBUG Assert(user_data_type == data_unknown || user_data_type == data_pointer, ExcPointerIndexClash()); user_data_type = data_pointer; -#endif + Assert(i::user_index (const unsigned int i) { -#ifdef DEBUG Assert(user_data_type == data_unknown || user_data_type == data_index, ExcPointerIndexClash()); user_data_type = data_index; -#endif + Assert(i::user_index (const unsigned int i) const { -#ifdef DEBUG Assert(user_data_type == data_unknown || user_data_type == data_index, ExcPointerIndexClash()); user_data_type = data_index; -#endif + Assert(i::solve (double &value, deallog.pop(); - // in case of failure: throw - // exception + // in case of failure: throw exception if (this->control().last_check() != SolverControl::success) - throw SolverControl::NoConvergence (this->control().last_step(), - this->control().last_value()); + AssertThrow(false, SolverControl::NoConvergence (this->control().last_step(), + this->control().last_value())); // otherwise exit as normal } diff --git a/deal.II/include/deal.II/lac/parallel_block_vector.h b/deal.II/include/deal.II/lac/parallel_block_vector.h index 3be362a635..7e3f1bb190 100644 --- a/deal.II/include/deal.II/lac/parallel_block_vector.h +++ b/deal.II/include/deal.II/lac/parallel_block_vector.h @@ -261,6 +261,21 @@ namespace parallel */ void update_ghost_values () const; + /** + * This method zeros the entries on ghost dofs, but does not touch + * locally owned DoFs. + * + * After calling this method, read access to ghost elements of the + * vector is forbidden and an exception is thrown. Only write access to + * ghost elements is allowed in this state. + */ + void zero_out_ghosts (); + + /** + * Returns if this Vector contains ghost elements. + */ + bool has_ghost_elements() const; + /** * Return whether the vector contains only elements with value * zero. This function is mainly for internal consistency checks and @@ -584,8 +599,12 @@ namespace parallel void BlockVector::compress (::dealii::VectorOperation::values operation) { + // start all requests for all blocks before finishing the transfers as + // this saves repeated synchronizations for (unsigned int block=0; blockn_blocks(); ++block) - this->block(block).compress(operation); + this->block(block).compress_start(block*10 + 8273, operation); + for (unsigned int block=0; blockn_blocks(); ++block) + this->block(block).compress_finish(operation); } @@ -596,7 +615,34 @@ namespace parallel BlockVector::update_ghost_values () const { for (unsigned int block=0; blockn_blocks(); ++block) - this->block(block).update_ghost_values(); + this->block(block).update_ghost_values_start(block*10 + 9923); + for (unsigned int block=0; blockn_blocks(); ++block) + this->block(block).update_ghost_values_finish(); + } + + + + template + inline + void + BlockVector::zero_out_ghosts () + { + for (unsigned int block=0; blockn_blocks(); ++block) + this->block(block).zero_out_ghosts(); + } + + + + template + inline + bool + BlockVector::has_ghost_elements () const + { + bool has_ghost_elements = false; + for (unsigned int block=0; blockn_blocks(); ++block) + if (this->block(block).has_ghost_elements() == true) + has_ghost_elements = true; + return has_ghost_elements; } diff --git a/deal.II/include/deal.II/lac/parallel_vector.h b/deal.II/include/deal.II/lac/parallel_vector.h index 7e1d853e52..c568a4aba7 100644 --- a/deal.II/include/deal.II/lac/parallel_vector.h +++ b/deal.II/include/deal.II/lac/parallel_vector.h @@ -83,6 +83,22 @@ namespace parallel * - Of course, reduction operations (like norms) make use of collective * all-to-all MPI communications. * + * This vector can take two different states with respect to ghost + * elements: + * - After creation and whenever zero_out_ghosts() is called (or + * operator = (0.)), the vector does only allow writing + * into ghost elements but not reading from ghost elements. + * - After a call to update_ghost_values(), the vector does not allow + * writing into ghost elements but only reading from them. This is in + * order to avoid undesired ghost data artifacts when calling compress() + * after modifying some vector entries. + * The current statues of the ghost entries (read mode or write mode) can + * be queried by the method has_ghost_elements(), which returns + * true exactly when ghost elements have been updated and + * false otherwise, irrespective of the actual number of + * ghost entries in the vector layout (for that information, use + * n_ghost_entries() instead). + * * @author Katharina Kormann, Martin Kronbichler, 2010, 2011 */ template @@ -307,6 +323,16 @@ namespace parallel * ghost data is changed. This is needed to allow functions with a @p * const vector to perform the data exchange without creating * temporaries. + * + * After calling this method, write access to ghost elements of the + * vector is forbidden and an exception is thrown. Only read access to + * ghost elements is allowed in this state. Note that all subsequent + * operations on this vector, like global vector addition, etc., will + * also update the ghost values by a call to this method after the + * operation. However, global reduction operations like norms or the + * inner product will always ignore ghost elements in order to avoid + * counting the ghost data more than once. To allow writing to ghost + * elements again, call zero_out_ghosts(). */ void update_ghost_values () const; @@ -332,7 +358,14 @@ namespace parallel * the communication to finish. Once it is finished, add or set the data * (depending on the flag operation) to the respective positions in the * owning processor, and clear the contents in the ghost data - * fields. The meaning of this argument is the same as in compress(). + * fields. The meaning of this argument is the same as in + * compress(). + * + * This function should be called exactly once per vector after calling + * compress_start, otherwise the result is undefined. In particular, it + * is not well-defined to call compress_start on the same vector again + * before compress_finished has been called. However, there is no + * warning to prevent this situation. * * Must follow a call to the @p compress_start function. */ @@ -372,9 +405,23 @@ namespace parallel /** * This method zeros the entries on ghost dofs, but does not touch * locally owned DoFs. + * + * After calling this method, read access to ghost elements of the + * vector is forbidden and an exception is thrown. Only write access to + * ghost elements is allowed in this state. */ void zero_out_ghosts (); + /** + * Returns whether the vector currently is in a state where ghost values + * can be read or not. This is the same functionality as other parallel + * vectors have. If this method returns false, this only means that + * read-access to ghost elements is prohibited whereas write access is + * still possible (to those entries specified as ghosts during + * initialization), not that there are no ghost elements at all. + */ + bool has_ghost_elements() const; + /** * Return whether the vector contains only elements with value * zero. This function is mainly for internal consistency checks and @@ -909,6 +956,15 @@ namespace parallel */ mutable Number *import_data; + /** + * Stores whether the vector currently allows for reading ghost elements + * or not. Note that this is to ensure consistent ghost data and does + * not indicate whether the vector actually can store ghost elements. In + * particular, when assembling a vector we do not allow reading + * elements, only writing them. + */ + mutable bool vector_is_ghosted; + /** * Provide this class with all functionality of ::dealii::Vector by * creating a VectorView object. @@ -977,6 +1033,7 @@ namespace parallel allocated_size (0), val (0), import_data (0), + vector_is_ghosted (false), vector_view (0, static_cast(0)) {} @@ -990,6 +1047,7 @@ namespace parallel allocated_size (0), val (0), import_data (0), + vector_is_ghosted (false), vector_view (0, static_cast(0)) { reinit (v, true); @@ -1007,6 +1065,7 @@ namespace parallel allocated_size (0), val (0), import_data (0), + vector_is_ghosted (false), vector_view (0, static_cast(0)) { reinit (local_range, ghost_indices, communicator); @@ -1017,11 +1076,12 @@ namespace parallel template inline Vector::Vector (const IndexSet &local_range, - const MPI_Comm communicator) + const MPI_Comm communicator) : allocated_size (0), val (0), import_data (0), + vector_is_ghosted (false), vector_view (0, static_cast(0)) { IndexSet ghost_indices(local_range.size()); @@ -1037,6 +1097,7 @@ namespace parallel allocated_size (0), val (0), import_data (0), + vector_is_ghosted (false), vector_view (0, static_cast(0)) { reinit (size, false); @@ -1052,6 +1113,7 @@ namespace parallel allocated_size (0), val (0), import_data (0), + vector_is_ghosted (false), vector_view (0, static_cast(0)) { reinit (partitioner); @@ -1083,11 +1145,14 @@ namespace parallel { Assert (c.partitioner.get() != 0, ExcNotInitialized()); - // check whether the two vectors use the same - // parallel partitioner. if not, check if all - // local ranges are the same (that way, we can - // exchange data between different parallel - // layouts) + // we update ghost values whenever one of the input or output vector + // already held ghost values or when we import data from a vector with + // the same local range but different ghost layout + bool must_update_ghost_values = true; + + // check whether the two vectors use the same parallel partitioner. if + // not, check if all local ranges are the same (that way, we can + // exchange data between different parallel layouts) if (partitioner.get() == 0) reinit (c, true); else if (partitioner.get() != c.partitioner.get()) @@ -1101,8 +1166,12 @@ namespace parallel local_ranges_different_loc) reinit (c, true); } + else + must_update_ghost_values = vector_is_ghosted || c.vector_is_ghosted; + vector_view = c.vector_view; - update_ghost_values(); + if (must_update_ghost_values) + update_ghost_values(); return *this; } @@ -1135,8 +1204,12 @@ namespace parallel reinit (c, true); } vector_view.reinit (partitioner->local_size(), val); - if (partitioner->local_size() > 0) + + if (partitioner->local_size()) vector_view.equ (1., c.vector_view); + + if (vector_is_ghosted || c.vector_is_ghosted) + update_ghost_values(); return *this; } @@ -1195,6 +1268,17 @@ namespace parallel std::fill_n (&val[partitioner->local_size()], partitioner->n_ghost_indices(), Number()); + vector_is_ghosted = false; + } + + + + template + inline + bool + Vector::has_ghost_elements () const + { + return vector_is_ghosted; } @@ -1360,7 +1444,7 @@ namespace parallel Vector::mean_value_local () const { Assert (partitioner->size()!=0, ExcEmptyObject()); - return (partitioner->local_size()>0 ? + return (partitioner->local_size() ? vector_view.mean_value() : Number()); } @@ -1389,7 +1473,7 @@ namespace parallel typename Vector::real_type Vector::l1_norm_local () const { - return partitioner->local_size()>0 ? vector_view.l1_norm() : real_type(); + return partitioner->local_size() ? vector_view.l1_norm() : real_type(); } @@ -1424,7 +1508,7 @@ namespace parallel typename Vector::real_type Vector::lp_norm_local (const real_type p) const { - return partitioner->local_size()>0 ? vector_view.lp_norm(p) : real_type(); + return partitioner->local_size() ? vector_view.lp_norm(p) : real_type(); } @@ -1450,7 +1534,7 @@ namespace parallel typename Vector::real_type Vector::linfty_norm_local () const { - return partitioner->local_size()>0 ? vector_view.linfty_norm() : real_type(); + return partitioner->local_size() ? vector_view.linfty_norm() : real_type(); } @@ -1519,8 +1603,7 @@ namespace parallel { IndexSet is (size()); - const std::pair x = local_range(); - is.add_range (x.first, x.second); + is.add_range (local_range().first, local_range().second); return is; } @@ -1602,6 +1685,10 @@ namespace parallel Number Vector::operator() (const size_type global_index) const { + // do not allow reading a vector which is not in ghost mode + Assert (in_local_range (global_index) || vector_is_ghosted == true, + ExcMessage("You tried to read a ghost element of this vector, " + "but it has not imported its ghost values.")); return val[partitioner->global_to_local(global_index)]; } @@ -1612,6 +1699,12 @@ namespace parallel Number & Vector::operator() (const size_type global_index) { + // we would like to prevent reading ghosts from a vector that does not + // have them imported, but this is not possible because we might be in a + // part of the code where the vector has enabled ghosts but is non-const + // (then, the compiler picks this method according to the C++ rule book + // even if a human would pick the const method when this subsequent use + // is just a read) return val[partitioner->global_to_local (global_index)]; } @@ -1656,10 +1749,11 @@ namespace parallel const ForwardIterator indices_end, OutputIterator values_begin) const { - while (indices_begin != indices_end) { - *values_begin = operator()(*indices_begin); - indices_begin++; values_begin++; - } + while (indices_begin != indices_end) + { + *values_begin = operator()(*indices_begin); + indices_begin++; values_begin++; + } } @@ -1672,6 +1766,10 @@ namespace parallel AssertIndexRange (local_index, partitioner->local_size()+ partitioner->n_ghost_indices()); + // do not allow reading a vector which is not in ghost mode + Assert (local_index < local_size() || vector_is_ghosted == true, + ExcMessage("You tried to read a ghost element of this vector, " + "but it has not imported its ghost values.")); return val[local_index]; } @@ -1695,8 +1793,8 @@ namespace parallel Vector & Vector::operator = (const Number s) { - // if we call Vector::operator=0, we want to - // zero out all the entries plus ghosts. + // if we call Vector::operator=0, we want to zero out all the entries + // plus ghosts. if (partitioner->local_size() > 0) vector_view.dealii::template Vector::operator= (s); if (s==Number()) @@ -1713,11 +1811,15 @@ namespace parallel Vector::operator += (const Vector &v) { AssertDimension (local_size(), v.local_size()); - // dealii::Vector does not allow empty fields - // but this might happen on some processors - // for parallel implementation + + // dealii::Vector does not allow empty fields but this might happen on + // some processors for parallel implementation if (local_size()>0) vector_view += v.vector_view; + + if (vector_is_ghosted) + update_ghost_values(); + return *this; } @@ -1729,11 +1831,15 @@ namespace parallel Vector::operator -= (const Vector &v) { AssertDimension (local_size(), v.local_size()); - // dealii::Vector does not allow empty fields - // but this might happen on some processors - // for parallel implementation + + // dealii::Vector does not allow empty fields but this might happen on + // some processors for parallel implementation if (local_size()>0) vector_view -= v.vector_view; + + if (vector_is_ghosted) + update_ghost_values(); + return *this; } @@ -1771,7 +1877,7 @@ namespace parallel void Vector::add (const size_type n_indices, const size_type *indices, - const OtherNumber *values) + const OtherNumber *values) { for (size_type i=0; i::add (const Number a) { - // dealii::Vector does not allow empty fields - // but this might happen on some processors - // for parallel implementation - if (local_size()>0) + // dealii::Vector does not allow empty fields but this might happen on + // some processors for parallel implementation + if (local_size()) vector_view.add (a); + + if (vector_is_ghosted) + update_ghost_values(); } @@ -1802,12 +1910,13 @@ namespace parallel void Vector::add (const Vector &v) { - AssertDimension (local_size(), v.local_size()); - // dealii::Vector does not allow empty fields - // but this might happen on some processors - // for parallel implementation - if (local_size()>0) + // dealii::Vector does not allow empty fields but this might happen on + // some processors for parallel implementation + if (local_size()) vector_view.add (v.vector_view); + + if (vector_is_ghosted) + update_ghost_values(); } @@ -1818,12 +1927,13 @@ namespace parallel Vector::add (const Number a, const Vector &v) { - AssertDimension (local_size(), v.local_size()); - // dealii::Vector does not allow empty fields - // but this might happen on some processors - // for parallel implementation - if (local_size()>0) + // dealii::Vector does not allow empty fields but this might happen on + // some processors for parallel implementation + if (local_size()) vector_view.add (a, v.vector_view); + + if (vector_is_ghosted) + update_ghost_values(); } @@ -1836,13 +1946,13 @@ namespace parallel const Number b, const Vector &w) { - AssertDimension (local_size(), v.local_size()); - AssertDimension (local_size(), w.local_size()); - // dealii::Vector does not allow empty fields - // but this might happen on some processors - // for parallel implementation - if (local_size()>0) + // dealii::Vector does not allow empty fields but this might happen on + // some processors for parallel implementation + if (local_size()) vector_view.add (a, v.vector_view, b, w.vector_view); + + if (vector_is_ghosted) + update_ghost_values(); } @@ -1853,12 +1963,13 @@ namespace parallel Vector::sadd (const Number x, const Vector &v) { - AssertDimension (local_size(), v.local_size()); - // dealii::Vector does not allow empty fields - // but this might happen on some processors - // for parallel implementation - if (local_size()>0) + // dealii::Vector does not allow empty fields but this might happen on + // some processors for parallel implementation + if (local_size()) vector_view.sadd (x, v.vector_view); + + if (vector_is_ghosted) + update_ghost_values(); } @@ -1870,12 +1981,13 @@ namespace parallel const Number a, const Vector &v) { - AssertDimension (local_size(), v.local_size()); - // dealii::Vector does not allow empty fields - // but this might happen on some processors - // for parallel implementation - if (local_size()>0) + // dealii::Vector does not allow empty fields but this might happen on + // some processors for parallel implementation + if (local_size()) vector_view.sadd (x, a, v.vector_view); + + if (vector_is_ghosted) + update_ghost_values(); } @@ -1889,13 +2001,13 @@ namespace parallel const Number b, const Vector &w) { - AssertDimension (local_size(), v.local_size()); - AssertDimension (local_size(), w.local_size()); - // dealii::Vector does not allow empty fields - // but this might happen on some processors - // for parallel implementation - if (local_size()>0) + // dealii::Vector does not allow empty fields but this might happen on + // some processors for parallel implementation + if (local_size()) vector_view.sadd (x, a, v.vector_view, b, w.vector_view); + + if (vector_is_ghosted) + update_ghost_values(); } @@ -1911,15 +2023,14 @@ namespace parallel const Number c, const Vector &x) { - AssertDimension (local_size(), v.local_size()); - AssertDimension (local_size(), w.local_size()); - AssertDimension (local_size(), x.local_size()); - // dealii::Vector does not allow empty fields - // but this might happen on some processors - // for parallel implementation - if (local_size()>0) + // dealii::Vector does not allow empty fields but this might happen on + // some processors for parallel implementation + if (local_size()) vector_view.sadd (s, a, v.vector_view, b, w.vector_view, c, x.vector_view); + + if (vector_is_ghosted) + update_ghost_values(); } @@ -1929,11 +2040,7 @@ namespace parallel void Vector::scale (const Number factor) { - // dealii::Vector does not allow empty fields - // but this might happen on some processors - // for parallel implementation - if (local_size()>0) - vector_view *= factor; + operator *=(factor); } @@ -1943,11 +2050,14 @@ namespace parallel Vector & Vector::operator *= (const Number factor) { - // dealii::Vector does not allow empty fields - // but this might happen on some processors - // for parallel implementation - if (local_size()>0) - vector_view.operator *= (factor); + // dealii::Vector does not allow empty fields but this might happen on + // some processors for parallel implementation + if (local_size()) + vector_view *= factor; + + if (vector_is_ghosted) + update_ghost_values(); + return *this; } @@ -1958,11 +2068,7 @@ namespace parallel Vector & Vector::operator /= (const Number factor) { - // dealii::Vector does not allow empty fields - // but this might happen on some processors - // for parallel implementation - if (local_size()>0) - vector_view.operator /= (factor); + operator *= (1./factor); return *this; } @@ -1973,11 +2079,13 @@ namespace parallel void Vector::scale (const Vector &scaling_factors) { - // dealii::Vector does not allow empty fields - // but this might happen on some processors - // for parallel implementation - if (local_size()>0) + // dealii::Vector does not allow empty fields but this might happen on + // some processors for parallel implementation + if (local_size()) vector_view.scale (scaling_factors.vector_view); + + if (vector_is_ghosted) + update_ghost_values(); } @@ -1988,7 +2096,11 @@ namespace parallel void Vector::scale (const Vector &scaling_factors) { - vector_view.template scale (scaling_factors.vector_view); + if (local_size()) + vector_view.template scale (scaling_factors.vector_view); + + if (vector_is_ghosted) + update_ghost_values(); } @@ -1999,12 +2111,13 @@ namespace parallel Vector::equ (const Number a, const Vector &v) { - AssertDimension (local_size(), v.local_size()); - // dealii::Vector does not allow empty fields - // but this might happen on some processors - // for parallel implementation - if (local_size()>0) + // dealii::Vector does not allow empty fields but this might happen on + // some processors for parallel implementation + if (local_size()) vector_view.equ (a, v.vector_view); + + if (vector_is_ghosted) + update_ghost_values(); } @@ -2016,12 +2129,13 @@ namespace parallel Vector::equ (const Number a, const Vector &v) { - AssertDimension (local_size(), v.local_size()); - // dealii::Vector does not allow empty fields - // but this might happen on some processors - // for parallel implementation - if (local_size()>0) + // dealii::Vector does not allow empty fields but this might happen on + // some processors for parallel implementation + if (local_size()) vector_view.equ (a, v.vector_view); + + if (vector_is_ghosted) + update_ghost_values(); } @@ -2034,13 +2148,13 @@ namespace parallel const Number b, const Vector &w) { - AssertDimension (local_size(), v.local_size()); - AssertDimension (local_size(), w.local_size()); - // dealii::Vector does not allow empty fields - // but this might happen on some processors - // for parallel implementation - if (local_size()>0) + // dealii::Vector does not allow empty fields but this might happen on + // some processors for parallel implementation + if (local_size()) vector_view.equ (a, v.vector_view, b, w.vector_view); + + if (vector_is_ghosted) + update_ghost_values(); } @@ -2055,15 +2169,14 @@ namespace parallel const Number c, const Vector &x) { - AssertDimension (local_size(), v.local_size()); - AssertDimension (local_size(), w.local_size()); - AssertDimension (local_size(), w.local_size()); - // dealii::Vector does not allow empty fields - // but this might happen on some processors - // for parallel implementation - if (local_size()>0) + // dealii::Vector does not allow empty fields but this might happen on + // some processors for parallel implementation + if (local_size()) vector_view.equ (a, v.vector_view, b, w.vector_view, c, x.vector_view); + + if (vector_is_ghosted) + update_ghost_values(); } @@ -2074,13 +2187,13 @@ namespace parallel Vector::ratio (const Vector &a, const Vector &b) { - AssertDimension (local_size(), a.local_size()); - AssertDimension (local_size(), b.local_size()); - // dealii::Vector does not allow empty fields - // but this might happen on some processors - // for parallel implementation - if (local_size()>0) + // dealii::Vector does not allow empty fields but this might happen on + // some processors for parallel implementation + if (local_size()) vector_view.ratio (a.vector_view, b.vector_view); + + if (vector_is_ghosted) + update_ghost_values(); } diff --git a/deal.II/include/deal.II/lac/parallel_vector.templates.h b/deal.II/include/deal.II/lac/parallel_vector.templates.h index 334966c5f0..624168b2e7 100644 --- a/deal.II/include/deal.II/lac/parallel_vector.templates.h +++ b/deal.II/include/deal.II/lac/parallel_vector.templates.h @@ -92,6 +92,8 @@ namespace parallel // set entries to zero if so requested if (fast == false) this->operator = (Number()); + + vector_is_ghosted = false; } @@ -134,6 +136,8 @@ namespace parallel // call these methods and hence do not need to have the storage. import_data = 0; } + + vector_is_ghosted = false; } @@ -194,6 +198,8 @@ namespace parallel // call these methods and hence do not need to have the storage. import_data = 0; } + + vector_is_ghosted = false; } @@ -209,6 +215,8 @@ namespace parallel vector_view = c.vector_view; if (call_update_ghost_values == true) update_ghost_values(); + else + vector_is_ghosted = false; } @@ -218,10 +226,12 @@ namespace parallel Vector::compress_start (const unsigned int counter, ::dealii::VectorOperation::values operation) { -#ifdef DEAL_II_WITH_MPI + Assert (vector_is_ghosted == false, + ExcMessage ("Cannot call compress() on a ghosted vector")); +#ifdef DEAL_II_WITH_MPI // nothing to do for insert (only need to zero ghost entries in - // compress_finish(). in debug mode we still want to check consistency + // compress_finish()). in debug mode we want to check consistency // of the inserted data, therefore the communication is still // initialized. Having different code in debug and optimized mode is // somewhat dangerous, but it really saves communication so it seems @@ -244,20 +254,17 @@ namespace parallel const size_type n_import_targets = part.import_targets().size(); const size_type n_ghost_targets = part.ghost_targets().size(); - // Need to send and receive the data. Use - // non-blocking communication, where it is - // generally less overhead to first initiate - // the receive and then actually send the data + // Need to send and receive the data. Use non-blocking communication, + // where it is generally less overhead to first initiate the receive and + // then actually send the data if (compress_requests.size() == 0) { - // set channels in different range from - // update_ghost_values channels + // set channels in different range from update_ghost_values channels const unsigned int channel = counter + 400; unsigned int current_index_start = 0; compress_requests.resize (n_import_targets + n_ghost_targets); - // allocate import_data in case it is not set - // up yet + // allocate import_data in case it is not set up yet if (import_data == 0) import_data = new Number[part.n_import_indices()]; for (size_type i=0; i 0) { - int ierr; - ierr = MPI_Startall(compress_requests.size(),&compress_requests[0]); + int ierr = MPI_Startall(compress_requests.size(),&compress_requests[0]); Assert (ierr == MPI_SUCCESS, ExcInternalError()); } @@ -305,7 +311,6 @@ namespace parallel (void)counter; (void)operation; #endif - } @@ -328,8 +333,7 @@ namespace parallel const Utilities::MPI::Partitioner &part = *partitioner; - // nothing to do when we neither have import - // nor ghost indices. + // nothing to do when we neither have import nor ghost indices. if (part.n_ghost_indices()==0 && part.n_import_indices()==0) return; @@ -346,9 +350,8 @@ namespace parallel // first wait for the receive to complete if (compress_requests.size() > 0 && n_import_targets > 0) { - int ierr; - ierr = MPI_Waitall (n_import_targets, &compress_requests[0], - MPI_STATUSES_IGNORE); + int ierr = MPI_Waitall (n_import_targets, &compress_requests[0], + MPI_STATUSES_IGNORE); Assert (ierr == MPI_SUCCESS, ExcInternalError()); Number *read_position = import_data; @@ -377,10 +380,9 @@ namespace parallel if (compress_requests.size() > 0 && n_ghost_targets > 0) { - int ierr; - ierr = MPI_Waitall (n_ghost_targets, - &compress_requests[n_import_targets], - MPI_STATUSES_IGNORE); + int ierr = MPI_Waitall (n_ghost_targets, + &compress_requests[n_import_targets], + MPI_STATUSES_IGNORE); Assert (ierr == MPI_SUCCESS, ExcInternalError()); } else @@ -401,8 +403,7 @@ namespace parallel #ifdef DEAL_II_WITH_MPI const Utilities::MPI::Partitioner &part = *partitioner; - // nothing to do when we neither have import - // nor ghost indices. + // nothing to do when we neither have import nor ghost indices. if (part.n_ghost_indices()==0 && part.n_import_indices()==0) return; @@ -412,10 +413,9 @@ namespace parallel const size_type n_import_targets = part.import_targets().size(); const size_type n_ghost_targets = part.ghost_targets().size(); - // Need to send and receive the data. Use - // non-blocking communication, where it is - // generally less overhead to first initiate - // the receive and then actually send the data + // Need to send and receive the data. Use non-blocking communication, + // where it is generally less overhead to first initiate the receive and + // then actually send the data if (update_ghost_values_requests.size() == 0) { Assert (part.local_size() == vector_view.size(), @@ -424,8 +424,8 @@ namespace parallel update_ghost_values_requests.resize (n_import_targets+n_ghost_targets); for (size_type i=0; i(&val[current_index_start]), part.ghost_targets()[i].second*sizeof(Number), MPI_BYTE, @@ -439,8 +439,7 @@ namespace parallel AssertDimension (current_index_start, part.local_size()+part.n_ghost_indices()); - // allocate import_data in case it is not set - // up yet + // allocate import_data in case it is not set up yet if (import_data == 0 && part.n_import_indices() > 0) import_data = new Number[part.n_import_indices()]; current_index_start = 0; @@ -458,8 +457,7 @@ namespace parallel AssertDimension (current_index_start, part.n_import_indices()); } - // copy the data that is actually to be send - // to the import_data field + // copy the data that is actually to be send to the import_data field if (part.n_import_indices() > 0) { Assert (import_data != 0, ExcInternalError()); @@ -475,9 +473,8 @@ namespace parallel update_ghost_values_requests.size()); if (update_ghost_values_requests.size() > 0) { - int ierr; - ierr = MPI_Startall(update_ghost_values_requests.size(), - &update_ghost_values_requests[0]); + int ierr = MPI_Startall(update_ghost_values_requests.size(), + &update_ghost_values_requests[0]); Assert (ierr == MPI_SUCCESS, ExcInternalError()); } #else @@ -492,10 +489,8 @@ namespace parallel Vector::update_ghost_values_finish () const { #ifdef DEAL_II_WITH_MPI - // wait for both sends and receives to - // complete, even though only receives are - // really necessary. this gives (much) better - // performance + // wait for both sends and receives to complete, even though only + // receives are really necessary. this gives (much) better performance AssertDimension (partitioner->ghost_targets().size() + partitioner->import_targets().size(), update_ghost_values_requests.size()); @@ -504,13 +499,13 @@ namespace parallel // make this function thread safe Threads::Mutex::ScopedLock lock (mutex); - int ierr; - ierr = MPI_Waitall (update_ghost_values_requests.size(), - &update_ghost_values_requests[0], - MPI_STATUSES_IGNORE); + int ierr = MPI_Waitall (update_ghost_values_requests.size(), + &update_ghost_values_requests[0], + MPI_STATUSES_IGNORE); Assert (ierr == MPI_SUCCESS, ExcInternalError()); } #endif + vector_is_ghosted = true; } @@ -520,28 +515,38 @@ namespace parallel Vector::swap (Vector &v) { #ifdef DEAL_II_WITH_MPI - // introduce a Barrier over all MPI processes - // to make sure that the compress request are - // no longer used before changing the owner - if (v.partitioner->n_mpi_processes() > 1) - MPI_Barrier (v.partitioner->get_communicator()); - if (partitioner->n_mpi_processes() > 1 && - v.partitioner->n_mpi_processes() != - partitioner->n_mpi_processes()) - MPI_Barrier (partitioner->get_communicator()); + +#ifdef DEBUG + // make sure that there are not outstanding requests from updating ghost + // values or compress + int flag = 1; + int ierr = MPI_Testall (update_ghost_values_requests.size(), + &update_ghost_values_requests[0], + &flag, MPI_STATUSES_IGNORE); + Assert (ierr == MPI_SUCCESS, ExcInternalError()); + Assert (flag == 1, + ExcMessage("MPI found unfinished update_ghost_values() requests" + "when calling swap, which is not allowed")); + ierr = MPI_Testall (compress_requests.size(), &compress_requests[0], + &flag, MPI_STATUSES_IGNORE); + Assert (ierr == MPI_SUCCESS, ExcInternalError()); + Assert (flag == 1, + ExcMessage("MPI found unfinished compress() requests " + "when calling swap, which is not allowed")); +#endif std::swap (compress_requests, v.compress_requests); std::swap (update_ghost_values_requests, v.update_ghost_values_requests); #endif - std::swap (partitioner, v.partitioner); - std::swap (allocated_size, v.allocated_size); - std::swap (val, v.val); - std::swap (import_data, v.import_data); + std::swap (partitioner, v.partitioner); + std::swap (allocated_size, v.allocated_size); + std::swap (val, v.val); + std::swap (import_data, v.import_data); + std::swap (vector_is_ghosted, v.vector_is_ghosted); - // vector view cannot be swapped so reset it - // manually (without touching the vector - // elements) + // vector view cannot be swapped so reset it manually (without touching + // the vector elements) vector_view.reinit (partitioner->local_size(), val); v.vector_view.reinit (v.partitioner->local_size(), v.val); } @@ -555,10 +560,9 @@ namespace parallel std::size_t memory = sizeof(*this); memory += sizeof (Number) * static_cast(allocated_size); - // if the partitioner is shared between more - // processors, just count a fraction of that - // memory, since we're not actually using more - // memory for it. + // if the partitioner is shared between more processors, just count a + // fraction of that memory, since we're not actually using more memory + // for it. if (partitioner.use_count() > 0) memory += partitioner->memory_consumption()/partitioner.use_count()+1; if (import_data != 0) @@ -587,10 +591,9 @@ namespace parallel else out.setf (std::ios::fixed, std::ios::floatfield); - // to make the vector write out all the - // information in order, use as many barriers - // as there are processors and start writing - // when it's our turn + // to make the vector write out all the information in order, use as + // many barriers as there are processors and start writing when it's our + // turn #ifdef DEAL_II_WITH_MPI if (partitioner->n_mpi_processes() > 1) for (unsigned int i=0; ithis_mpi_process(); i++) @@ -609,17 +612,22 @@ namespace parallel for (size_type i=0; ilocal_size(); ++i) out << local_element(i) << std::endl; out << std::endl; - out << "Ghost entries (global index / value):" << std::endl; - if (across) - for (size_type i=0; in_ghost_indices(); ++i) - out << '(' << partitioner->ghost_indices().nth_index_in_set(i) - << '/' << local_element(partitioner->local_size()+i) << ") "; - else - for (size_type i=0; in_ghost_indices(); ++i) - out << '(' << partitioner->ghost_indices().nth_index_in_set(i) - << '/' << local_element(partitioner->local_size()+i) << ")" - << std::endl; - out << std::endl << std::flush; + + if (vector_is_ghosted) + { + out << "Ghost entries (global index / value):" << std::endl; + if (across) + for (size_type i=0; in_ghost_indices(); ++i) + out << '(' << partitioner->ghost_indices().nth_index_in_set(i) + << '/' << local_element(partitioner->local_size()+i) << ") "; + else + for (size_type i=0; in_ghost_indices(); ++i) + out << '(' << partitioner->ghost_indices().nth_index_in_set(i) + << '/' << local_element(partitioner->local_size()+i) << ")" + << std::endl; + out << std::endl; + } + out << std::flush; #ifdef DEAL_II_WITH_MPI if (partitioner->n_mpi_processes() > 1) diff --git a/deal.II/include/deal.II/lac/slepc_solver.h b/deal.II/include/deal.II/lac/slepc_solver.h index 184164369d..2ce90aa9fb 100644 --- a/deal.II/include/deal.II/lac/slepc_solver.h +++ b/deal.II/include/deal.II/lac/slepc_solver.h @@ -86,7 +86,7 @@ DEAL_II_NAMESPACE_OPEN void SolverBase::solve (const PETScWrappers::MatrixBase &A, const PETScWrappers::MatrixBase &B, - std::vector &eigenvalues, + std::vector &eigenvalues, std::vector &eigenvectors, const unsigned int n_eigenpairs) { ... } @@ -154,7 +154,7 @@ namespace SLEPcWrappers template void solve (const PETScWrappers::MatrixBase &A, - std::vector &eigenvalues, + std::vector &eigenvalues, std::vector &eigenvectors, const unsigned int n_eigenpairs = 1); @@ -167,7 +167,7 @@ namespace SLEPcWrappers void solve (const PETScWrappers::MatrixBase &A, const PETScWrappers::MatrixBase &B, - std::vector &eigenvalues, + std::vector &eigenvalues, std::vector &eigenvectors, const unsigned int n_eigenpairs = 1); @@ -204,7 +204,7 @@ namespace SLEPcWrappers * default, no target is set. */ void - set_target_eigenvalue (const double &this_target); + set_target_eigenvalue (const PetscScalar &this_target); /** * Indicate which part of the spectrum is to be computed. By @@ -748,7 +748,7 @@ namespace SLEPcWrappers template void SolverBase::solve (const PETScWrappers::MatrixBase &A, - std::vector &eigenvalues, + std::vector &eigenvalues, std::vector &eigenvectors, const unsigned int n_eigenpairs) { @@ -780,7 +780,7 @@ namespace SLEPcWrappers void SolverBase::solve (const PETScWrappers::MatrixBase &A, const PETScWrappers::MatrixBase &B, - std::vector &eigenvalues, + std::vector &eigenvalues, std::vector &eigenvectors, const unsigned int n_eigenpairs) { diff --git a/deal.II/include/deal.II/lac/solver_bicgstab.h b/deal.II/include/deal.II/lac/solver_bicgstab.h index 98d65a7646..25a6eaa87d 100644 --- a/deal.II/include/deal.II/lac/solver_bicgstab.h +++ b/deal.II/include/deal.II/lac/solver_bicgstab.h @@ -436,8 +436,8 @@ SolverBicgstab::solve(const MATRIX &A, // in case of failure: throw exception if (this->control().last_check() != SolverControl::success) - throw SolverControl::NoConvergence (this->control().last_step(), - this->control().last_value()); + AssertThrow(false, SolverControl::NoConvergence (this->control().last_step(), + this->control().last_value())); // otherwise exit as normal } diff --git a/deal.II/include/deal.II/lac/solver_cg.h b/deal.II/include/deal.II/lac/solver_cg.h index 3f31cdce72..4c01b49cc4 100644 --- a/deal.II/include/deal.II/lac/solver_cg.h +++ b/deal.II/include/deal.II/lac/solver_cg.h @@ -476,11 +476,10 @@ SolverCG::solve (const MATRIX &A, // Deallocate Memory cleanup(); - // in case of failure: throw - // exception + // in case of failure: throw exception if (this->control().last_check() != SolverControl::success) - throw SolverControl::NoConvergence (this->control().last_step(), - this->control().last_value()); + AssertThrow(false, SolverControl::NoConvergence (this->control().last_step(), + this->control().last_value())); // otherwise exit as normal } diff --git a/deal.II/include/deal.II/lac/solver_control.h b/deal.II/include/deal.II/lac/solver_control.h index 7116304470..f303cddcf8 100644 --- a/deal.II/include/deal.II/lac/solver_control.h +++ b/deal.II/include/deal.II/lac/solver_control.h @@ -68,11 +68,8 @@ class SolverControl : public Subscriptor public: /** - * Enum denoting the different - * states a solver can be in. See - * the general documentation of - * this class for more - * information. + * Enum denoting the different states a solver can be in. See the general + * documentation of this class for more information. */ enum State { @@ -84,42 +81,36 @@ public: failure }; + + /** - * Class to be thrown upon - * failing convergence of an - * iterative solver, when either - * the number of iterations - * exceeds the limit or the - * residual fails to reach the - * desired limit, e.g. in the - * case of a break-down. + * Class to be thrown upon failing convergence of an iterative solver, + * when either the number of iterations exceeds the limit or the residual + * fails to reach the desired limit, e.g. in the case of a break-down. * - * The residual in the last - * iteration, as well as the - * iteration number of the last - * step are stored in this object - * and can be recovered upon - * catching an exception of this - * class. + * The residual in the last iteration, as well as the iteration number of + * the last step are stored in this object and can be recovered upon + * catching an exception of this class. */ - class NoConvergence : public std::exception + + class NoConvergence : public dealii::ExceptionBase { public: - /** - * Constructor. - */ NoConvergence (const unsigned int last_step, - const double last_residual); + const double last_residual) + : last_step (last_step), last_residual(last_residual) + {} - /** - * Standardized output for - * catch handlers. - */ - virtual const char *what () const throw (); + virtual ~NoConvergence () throw () {} + + virtual void print_info (std::ostream &out) const + { + out << "Iterative method reported convergence failure in step " + << last_step << " with residual " << last_residual << std::endl; + } /** - * Iteration number of the - * last step. + * Iteration number of the last step. */ const unsigned int last_step; @@ -130,25 +121,17 @@ public: }; + /** - * Constructor. The parameters - * @p n and @p tol are the - * maximum number of iteration - * steps before failure and the - * tolerance to determine success + * Constructor. The parameters @p n and @p tol are the maximum number of + * iteration steps before failure and the tolerance to determine success * of the iteration. * - * @p log_history specifies - * whether the history (i.e. the - * value to be checked and the - * number of the iteration step) - * shall be printed to - * @p deallog stream. Default - * is: do not print. Similarly, - * @p log_result specifies the - * whether the final result is - * logged to @p deallog. Default - * is yes. + * @p log_history specifies whether the history (i.e. the value to be + * checked and the number of the iteration step) shall be printed to @p + * deallog stream. Default is: do not print. Similarly, @p log_result + * specifies the whether the final result is logged to @p deallog. + * Default is yes. */ SolverControl (const unsigned int n = 100, const double tol = 1.e-10, @@ -156,9 +139,8 @@ public: const bool log_result = true); /** - * Virtual destructor is needed - * as there are virtual functions - * in this class. + * Virtual destructor is needed as there are virtual functions in this + * class. */ virtual ~SolverControl(); @@ -173,48 +155,26 @@ public: void parse_parameters (ParameterHandler ¶m); /** - * Decide about success or failure - * of an iteration. This function - * gets the current iteration step - * to determine, whether the - * allowed number of steps has - * been exceeded and returns - * @p failure in this case. If - * @p check_value is below the - * prescribed tolerance, it - * returns @p success. In all - * other cases @p iterate is - * returned to suggest - * continuation of the iterative - * procedure. + * Decide about success or failure of an iteration. This function gets + * the current iteration step to determine, whether the allowed number of + * steps has been exceeded and returns @p failure in this case. If @p + * check_value is below the prescribed tolerance, it returns @p success. + * In all other cases @p iterate is returned to suggest continuation of + * the iterative procedure. * - * The iteration is also aborted - * if the residual becomes a - * denormalized value - * (@p NaN). Note, however, that - * this check is only performed - * if the @p isnan function is - * provided by the operating - * system, which is not always - * true. The @p configure - * scripts checks for this and - * sets the flag @p HAVE_ISNAN - * in the file - * Make.global_options if - * this function was found. + * The iteration is also aborted if the residual becomes a denormalized + * value (@p NaN). Note, however, that this check is only performed if + * the @p isnan function is provided by the operating system, which is + * not always true. The @p configure scripts checks for this and sets the + * flag @p HAVE_ISNAN in the file Make.global_options if this + * function was found. * - * check() additionally - * preserves @p step and - * @p check_value. These - * values are accessible by - * last_value() and + * check() additionally preserves @p step and @p check_value. + * These values are accessible by last_value() and * last_step(). * - * Derived classes may overload - * this function, e.g. to log the - * convergence indicators - * (@p check_value) or to do - * other computations. + * Derived classes may overload this function, e.g. to log the + * convergence indicators (@p check_value) or to do other computations. */ virtual State check (const unsigned int step, const double check_value); @@ -225,15 +185,13 @@ public: State last_check() const; /** - * Return the initial convergence - * criterion. + * Return the initial convergence criterion. */ double initial_value() const; /** - * Return the convergence value of last - * iteration step for which @p check was - * called by the solver. + * Return the convergence value of last iteration step for which @p check + * was called by the solver. */ double last_value() const; @@ -253,20 +211,15 @@ public: unsigned int set_max_steps (const unsigned int); /** - * Enables the failure - * check. Solving is stopped with - * @p ReturnState @p failure if - * residual>failure_residual with + * Enables the failure check. Solving is stopped with @p ReturnState @p + * failure if residual>failure_residual with * failure_residual:=rel_failure_residual*first_residual. */ void set_failure_criterion (const double rel_failure_residual); /** - * Disables failure check and - * resets - * @p relative_failure_residual - * and @p failure_residual to - * zero. + * Disables failure check and resets @p relative_failure_residual and @p + * failure_residual to zero. */ void clear_failure_criterion (); @@ -281,45 +234,34 @@ public: double set_tolerance (const double); /** - * Enables writing residuals of - * each step into a vector for - * later analysis. + * Enables writing residuals of each step into a vector for later + * analysis. */ void enable_history_data(); /** - * Average error reduction over - * all steps. + * Average error reduction over all steps. * - * Requires - * enable_history_data() + * Requires enable_history_data() */ double average_reduction() const; /** - * Error reduction of the last - * step; for stationary - * iterations, this approximates - * the norm of the iteration - * matrix. + * Error reduction of the last step; for stationary iterations, this + * approximates the norm of the iteration matrix. * - * Requires - * enable_history_data() + * Requires enable_history_data() */ double final_reduction() const; /** - * Error reduction of any - * iteration step. + * Error reduction of any iteration step. * - * Requires - * enable_history_data() + * Requires enable_history_data() */ double step_reduction(unsigned int step) const; /** - * Log each iteration step. Use - * @p log_frequency for skipping - * steps. + * Log each iteration step. Use @p log_frequency for skipping steps. */ void log_history (const bool); @@ -344,13 +286,9 @@ public: bool log_result () const; /** - * This exception is thrown if a - * function operating on the - * vector of history data of a - * SolverControl object id - * called, but storage of history - * data was not enabled by - * enable_history_data(). + * This exception is thrown if a function operating on the vector of + * history data of a SolverControl object id called, but storage of + * history data was not enabled by enable_history_data(). */ DeclException0(ExcHistoryDataRequired); @@ -386,33 +324,26 @@ protected: unsigned int lstep; /** - * Is set to @p true by - * @p set_failure_criterion and - * enables failure checking. + * Is set to @p true by @p set_failure_criterion and enables failure + * checking. */ bool check_failure; /** - * Stores the - * @p rel_failure_residual set by - * @p set_failure_criterion + * Stores the @p rel_failure_residual set by @p set_failure_criterion */ double relative_failure_residual; /** - * @p failure_residual equals the - * first residual multiplied by - * @p relative_crit set by - * @p set_failure_criterion (see there). + * @p failure_residual equals the first residual multiplied by @p + * relative_crit set by @p set_failure_criterion (see there). * - * Until the first residual is - * known it is 0. + * Until the first residual is known it is 0. */ double failure_residual; /** - * Log convergence history to - * @p deallog. + * Log convergence history to @p deallog. */ bool m_log_history; @@ -422,29 +353,23 @@ protected: unsigned int m_log_frequency; /** - * Log iteration result to - * @p deallog. If true, after - * finishing the iteration, a - * statement about failure or - * success together with @p lstep + * Log iteration result to @p deallog. If true, after finishing the + * iteration, a statement about failure or success together with @p lstep * and @p lvalue are logged. */ bool m_log_result; /** - * Control over the storage of - * history data. Set by + * Control over the storage of history data. Set by * enable_history_data(). */ bool history_data_enabled; /** - * Vector storing the result - * after each iteration step for - * later statistical analysis. + * Vector storing the result after each iteration step for later + * statistical analysis. * - * Use of this vector is enabled - * by enable_history_data(). + * Use of this vector is enabled by enable_history_data(). */ std::vector history_data; }; @@ -468,11 +393,8 @@ class ReductionControl : public SolverControl { public: /** - * Constructor. Provide the - * reduction factor in addition - * to arguments that have the - * same meaning as those of the - * constructor of the + * Constructor. Provide the reduction factor in addition to arguments + * that have the same meaning as those of the constructor of the * SolverControl constructor. */ ReductionControl (const unsigned int maxiter = 100, @@ -482,27 +404,20 @@ public: const bool log_result = true); /** - * Initialize with a - * SolverControl object. The - * result will emulate - * SolverControl by setting - * #reduce to zero. + * Initialize with a SolverControl object. The result will emulate + * SolverControl by setting #reduce to zero. */ ReductionControl (const SolverControl &c); /** - * Assign a SolverControl object - * to ReductionControl. The - * result of the assignment will - * emulate SolverControl by - * setting #reduce to zero. + * Assign a SolverControl object to ReductionControl. The result of the + * assignment will emulate SolverControl by setting #reduce to zero. */ ReductionControl &operator= (const SolverControl &c); /** - * Virtual destructor is needed - * as there are virtual functions - * in this class. + * Virtual destructor is needed as there are virtual functions in this + * class. */ virtual ~ReductionControl(); @@ -517,12 +432,9 @@ public: void parse_parameters (ParameterHandler ¶m); /** - * Decide about success or failure - * of an iteration. This function - * calls the one in the base - * class, but sets the tolerance - * to reduction * initial value - * upon the first iteration. + * Decide about success or failure of an iteration. This function calls + * the one in the base class, but sets the tolerance to reduction * + * initial value upon the first iteration. */ virtual State check (const unsigned int step, const double check_value); @@ -544,10 +456,8 @@ protected: double reduce; /** - * Reduced tolerance. Stop iterations - * if either this value is achieved - * or if the base class indicates - * success. + * Reduced tolerance. Stop iterations if either this value is achieved or + * if the base class indicates success. */ double reduced_tol; }; diff --git a/deal.II/include/deal.II/lac/solver_gmres.h b/deal.II/include/deal.II/lac/solver_gmres.h index 902b00493b..448244f6eb 100644 --- a/deal.II/include/deal.II/lac/solver_gmres.h +++ b/deal.II/include/deal.II/lac/solver_gmres.h @@ -824,8 +824,8 @@ SolverGMRES::solve (const MATRIX &A, deallog.pop(); // in case of failure: throw exception if (this->control().last_check() != SolverControl::success) - throw SolverControl::NoConvergence (this->control().last_step(), - this->control().last_value()); + AssertThrow(false, SolverControl::NoConvergence (this->control().last_step(), + this->control().last_value())); // otherwise exit as normal } @@ -959,8 +959,8 @@ SolverFGMRES::solve ( deallog.pop(); // in case of failure: throw exception if (this->control().last_check() != SolverControl::success) - throw SolverControl::NoConvergence (this->control().last_step(), - this->control().last_value()); + AssertThrow(false, SolverControl::NoConvergence (this->control().last_step(), + this->control().last_value())); } #endif // DOXYGEN diff --git a/deal.II/include/deal.II/lac/solver_minres.h b/deal.II/include/deal.II/lac/solver_minres.h index 37934f4f04..5242f99d35 100644 --- a/deal.II/include/deal.II/lac/solver_minres.h +++ b/deal.II/include/deal.II/lac/solver_minres.h @@ -380,11 +380,10 @@ SolverMinRes::solve (const MATRIX &A, // Output deallog.pop (); - // in case of failure: throw - // exception + // in case of failure: throw exception if (this->control().last_check() != SolverControl::success) - throw SolverControl::NoConvergence (this->control().last_step(), - this->control().last_value()); + AssertThrow(false, SolverControl::NoConvergence (this->control().last_step(), + this->control().last_value())); // otherwise exit as normal } diff --git a/deal.II/include/deal.II/lac/solver_qmrs.h b/deal.II/include/deal.II/lac/solver_qmrs.h index 1cce088b84..cf807c4c6c 100644 --- a/deal.II/include/deal.II/lac/solver_qmrs.h +++ b/deal.II/include/deal.II/lac/solver_qmrs.h @@ -301,11 +301,10 @@ SolverQMRS::solve (const MATRIX &A, // Output deallog.pop(); - // in case of failure: throw - // exception + // in case of failure: throw exception if (this->control().last_check() != SolverControl::success) - throw SolverControl::NoConvergence (this->control().last_step(), - this->control().last_value()); + AssertThrow(false, SolverControl::NoConvergence (this->control().last_step(), + this->control().last_value())); // otherwise exit as normal } diff --git a/deal.II/include/deal.II/lac/solver_relaxation.h b/deal.II/include/deal.II/lac/solver_relaxation.h index 6a78113b5e..547c67bc4d 100644 --- a/deal.II/include/deal.II/lac/solver_relaxation.h +++ b/deal.II/include/deal.II/lac/solver_relaxation.h @@ -150,11 +150,10 @@ SolverRelaxation::solve ( } deallog.pop(); - // in case of failure: throw - // exception + // in case of failure: throw exception if (this->control().last_check() != SolverControl::success) - throw SolverControl::NoConvergence (this->control().last_step(), - this->control().last_value()); + AssertThrow(false, SolverControl::NoConvergence (this->control().last_step(), + this->control().last_value())); // otherwise exit as normal } diff --git a/deal.II/include/deal.II/lac/solver_richardson.h b/deal.II/include/deal.II/lac/solver_richardson.h index fd881aeb6d..804a3a1162 100644 --- a/deal.II/include/deal.II/lac/solver_richardson.h +++ b/deal.II/include/deal.II/lac/solver_richardson.h @@ -285,11 +285,10 @@ SolverRichardson::solve (const MATRIX &A, this->memory.free(Vd); deallog.pop(); - // in case of failure: throw - // exception + // in case of failure: throw exception if (this->control().last_check() != SolverControl::success) - throw SolverControl::NoConvergence (this->control().last_step(), - this->control().last_value()); + AssertThrow(false, SolverControl::NoConvergence (this->control().last_step(), + this->control().last_value())); // otherwise exit as normal } @@ -345,11 +344,10 @@ SolverRichardson::Tsolve (const MATRIX &A, this->memory.free(Vr); this->memory.free(Vd); deallog.pop(); - // in case of failure: throw - // exception + // in case of failure: throw exception if (this->control().last_check() != SolverControl::success) - throw SolverControl::NoConvergence (this->control().last_step(), - this->control().last_value()); + AssertThrow(false, SolverControl::NoConvergence (this->control().last_step(), + this->control().last_value())); // otherwise exit as normal } diff --git a/deal.II/include/deal.II/lac/vector_memory.h b/deal.II/include/deal.II/lac/vector_memory.h index 069ce99e85..73e0594351 100644 --- a/deal.II/include/deal.II/lac/vector_memory.h +++ b/deal.II/include/deal.II/lac/vector_memory.h @@ -80,7 +80,7 @@ public: * as there are virtual functions * in this class. */ - virtual ~VectorMemory() {} + virtual ~VectorMemory () {} /** * Return a pointer to a new @@ -283,7 +283,7 @@ public: * a warning message, if there * are allocated vectors left. */ - ~GrowingVectorMemory(); + virtual ~GrowingVectorMemory(); /** * Return a pointer to a new diff --git a/deal.II/include/deal.II/matrix_free/fe_evaluation.h b/deal.II/include/deal.II/matrix_free/fe_evaluation.h index 4e3cc316c5..aa3ff08f55 100644 --- a/deal.II/include/deal.II/matrix_free/fe_evaluation.h +++ b/deal.II/include/deal.II/matrix_free/fe_evaluation.h @@ -1304,6 +1304,10 @@ protected: void apply_hessians (const VectorizedArray in [], VectorizedArray out []); + VectorizedArray shape_val_evenodd[fe_degree+1][(n_q_points_1d+1)/2]; + VectorizedArray shape_gra_evenodd[fe_degree+1][(n_q_points_1d+1)/2]; + VectorizedArray shape_hes_evenodd[fe_degree+1][(n_q_points_1d+1)/2]; + /** * Friend declarations. */ @@ -1660,7 +1664,7 @@ namespace internal VectorType &vec, Number &res) const { - res = vector_access (vec, index); + res = vector_access (const_cast(vec), index); } void pre_constraints (const Number &, @@ -1675,7 +1679,7 @@ namespace internal VectorType &vec, Number &res) const { - res += weight * vector_access (vec, index); + res += weight * vector_access (const_cast(vec), index); } void post_constraints (const Number &sum, @@ -3804,9 +3808,9 @@ namespace internal int direction, bool dof_to_quad, bool add> inline void - apply_tensor_product (const VectorizedArray *shape_data, - const VectorizedArray in [], - VectorizedArray out []) + apply_tensor_product (const Number *shape_data, + const Number in [], + Number out []) { AssertIndexRange (direction, dim); const int mm = dof_to_quad ? (fe_degree+1) : n_q_points_1d, @@ -3822,12 +3826,12 @@ namespace internal { for (int col=0; col val0; + Number val0; if (dof_to_quad == true) val0 = shape_data[col]; else val0 = shape_data[col*n_q_points_1d]; - VectorizedArray res0 = val0 * in[0]; + Number res0 = val0 * in[0]; for (int ind=1; ind inline void - apply_tensor_product_values (const VectorizedArray *shape_values, - const VectorizedArray in [], - VectorizedArray out []) + apply_tensor_product_values (const Number *shape_values, + const Number in [], + Number out []) { AssertIndexRange (direction, dim); const int mm = dof_to_quad ? (fe_degree+1) : n_q_points_1d, @@ -3915,7 +3919,7 @@ namespace internal { for (int col=0; col val0, val1, in0, in1, res0, res1; + Number val0, val1, in0, in1, res0, res1; if (dof_to_quad == true) { val0 = shape_values[col]; @@ -3955,7 +3959,7 @@ namespace internal } } else - res0 = res1 = VectorizedArray(); + res0 = res1 = Number(); if (dof_to_quad == true) { if (mm % 2 == 1) @@ -3996,8 +4000,8 @@ namespace internal } else if (dof_to_quad == true && nn%2==1) { - VectorizedArray res0; - VectorizedArray val0 = shape_values[n_cols]; + Number res0; + Number val0 = shape_values[n_cols]; if (mid > 0) { res0 = in[0] + in[stride*(mm-1)]; @@ -4005,13 +4009,13 @@ namespace internal for (int ind=1; ind val1 = in[stride*ind] + in[stride*(mm-1-ind)]; + Number val1 = in[stride*ind] + in[stride*(mm-1-ind)]; val1 *= val0; res0 += val1; } } else - res0 = VectorizedArray(); + res0 = Number(); if (add == false) out[stride*n_cols] = res0; else @@ -4019,16 +4023,16 @@ namespace internal } else if (dof_to_quad == false && nn%2 == 1) { - VectorizedArray res0; + Number res0; if (mid > 0) { - VectorizedArray val0 = shape_values[n_cols*n_q_points_1d]; + Number val0 = shape_values[n_cols*n_q_points_1d]; res0 = in[0] + in[stride*(mm-1)]; res0 *= val0; for (int ind=1; ind val1 = in[stride*ind] + in[stride*(mm-1-ind)]; + Number val1 = in[stride*ind] + in[stride*(mm-1-ind)]; val1 *= val0; res0 += val1; } @@ -4098,9 +4102,9 @@ namespace internal int direction, bool dof_to_quad, bool add> inline void - apply_tensor_product_gradients (const VectorizedArray *shape_gradients, - const VectorizedArray in [], - VectorizedArray out []) + apply_tensor_product_gradients (const Number *shape_gradients, + const Number in [], + Number out []) { AssertIndexRange (direction, dim); const int mm = dof_to_quad ? (fe_degree+1) : n_q_points_1d, @@ -4118,7 +4122,7 @@ namespace internal { for (int col=0; col val0, val1, in0, in1, res0, res1; + Number val0, val1, in0, in1, res0, res1; if (dof_to_quad == true) { val0 = shape_gradients[col]; @@ -4158,7 +4162,7 @@ namespace internal } } else - res0 = res1 = VectorizedArray(); + res0 = res1 = Number(); if (mm % 2 == 1) { if (dof_to_quad == true) @@ -4182,7 +4186,7 @@ namespace internal } if ( nn%2 == 1 ) { - VectorizedArray val0, res0; + Number val0, res0; if (dof_to_quad == true) val0 = shape_gradients[n_cols]; else @@ -4195,7 +4199,7 @@ namespace internal val0 = shape_gradients[ind*n_q_points_1d+n_cols]; else val0 = shape_gradients[n_cols*n_q_points_1d+ind]; - VectorizedArray val1 = in[stride*ind] - in[stride*(mm-1-ind)]; + Number val1 = in[stride*ind] - in[stride*(mm-1-ind)]; val1 *= val0; res0 += val1; } @@ -4242,9 +4246,9 @@ namespace internal int direction, bool dof_to_quad, bool add> inline void - apply_tensor_product_hessians (const VectorizedArray *shape_hessians, - const VectorizedArray in [], - VectorizedArray out []) + apply_tensor_product_hessians (const Number *shape_hessians, + const Number in [], + Number out []) { AssertIndexRange (direction, dim); const int mm = dof_to_quad ? (fe_degree+1) : n_q_points_1d, @@ -4262,7 +4266,7 @@ namespace internal { for (int col=0; col val0, val1, in0, in1, res0, res1; + Number val0, val1, in0, in1, res0, res1; if (dof_to_quad == true) { val0 = shape_hessians[col]; @@ -4302,7 +4306,7 @@ namespace internal } } else - res0 = res1 = VectorizedArray(); + res0 = res1 = Number(); if (mm % 2 == 1) { if (dof_to_quad == true) @@ -4326,7 +4330,7 @@ namespace internal } if ( nn%2 == 1 ) { - VectorizedArray val0, res0; + Number val0, res0; if (dof_to_quad == true) val0 = shape_hessians[n_cols]; else @@ -4341,13 +4345,13 @@ namespace internal val0 = shape_hessians[ind*n_q_points_1d+n_cols]; else val0 = shape_hessians[n_cols*n_q_points_1d+ind]; - VectorizedArray val1 = in[stride*ind] + in[stride*(mm-1-ind)]; + Number val1 = in[stride*ind] + in[stride*(mm-1-ind)]; val1 *= val0; res0 += val1; } } else - res0 = VectorizedArray(); + res0 = Number(); if (mm % 2 == 1) { if (dof_to_quad == true) @@ -4390,6 +4394,204 @@ namespace internal + // This method implements a different approach to the symmetric case for + // values, gradients, and Hessians also treated with the above functions: It + // is possible to reduce the cost per dimension from N^2 to N^2/2, where N + // is the number of 1D dofs (there are only N^2/2 different entries in the + // shape matrix, so this is plausible). The approach is based on the idea of + // applying the operator on the even and odd part of the input vectors + // separately, given that the shape functions evaluated on quadrature points + // are symmetric. This method is presented e.g. in the book "Implementing + // Spectral Methods for Partial Differential Equations" by David A. Kopriva, + // Springer, 2009, section 3.5.3 (Even-Odd-Decomposition). Even though the + // experiments in the book say that the method is not efficient for N<20, it + // is more efficient in the context where the loop bounds are compile-time + // constants (templates). + template + inline + void + apply_tensor_product_evenodd (const Number shapes [][(n_q_points_1d+1)/2], + const Number in [], + Number out []) + { + AssertIndexRange (type, 3); + AssertIndexRange (direction, dim); + const int mm = dof_to_quad ? (fe_degree+1) : n_q_points_1d, + nn = dof_to_quad ? n_q_points_1d : (fe_degree+1); + const int n_cols = nn / 2; + const int mid = mm / 2; + + const int n_blocks1 = (dim > 1 ? (direction > 0 ? nn : mm) : 1); + const int n_blocks2 = (dim > 2 ? (direction > 1 ? nn : mm) : 1); + const int stride = Utilities::fixed_int_power::value; + + // this code may look very inefficient at first sight due to the many + // different cases with if's at the innermost loop part, but all of the + // conditionals can be evaluated at compile time because they are + // templates, so the compiler should optimize everything away + for (int i2=0; i2 0) + { + if (dof_to_quad == true) + { + r0 = shapes[0][col] * xp[0]; + r1 = shapes[fe_degree][col] * xm[0]; + } + else + { + r0 = shapes[col][0] * xp[0]; + r1 = shapes[fe_degree-col][0] * xm[0]; + } + for (int ind=1; ind 0)) + r0 += shapes[col][mid] * in[stride*mid]; + + if (add == false) + { + out[stride*col] = r0 + r1; + if (type == 1 && dof_to_quad == false) + out[stride*(nn-1-col)] = r1 - r0; + else + out[stride*(nn-1-col)] = r0 - r1; + } + else + { + out[stride*col] += r0 + r1; + if (type == 1 && dof_to_quad == false) + out[stride*(nn-1-col)] += r1 - r0; + else + out[stride*(nn-1-col)] += r0 - r1; + } + } + if ( type == 0 && dof_to_quad == true && nn%2==1 && mm%2==1 ) + { + if (add==false) + out[stride*n_cols] = in[stride*mid]; + else + out[stride*n_cols] += in[stride*mid]; + } + else if (dof_to_quad == true && nn%2==1) + { + Number r0; + if (mid > 0) + { + r0 = shapes[0][n_cols] * xp[0]; + for (int ind=1; ind 0) + { + if (type == 1) + { + r0 = shapes[n_cols][0] * xm[0]; + for (int ind=1; ind inline void - apply_tensor_product_gradients_gl (const VectorizedArray *shape_gradients, - const VectorizedArray in [], - VectorizedArray out []) + apply_tensor_product_gradients_gl (const Number *shape_gradients, + const Number in [], + Number out []) { AssertIndexRange (direction, dim); const int mm = fe_degree+1; @@ -4436,7 +4638,7 @@ namespace internal { for (int col=0; col val0, val1, in0, in1, res0, res1; + Number val0, val1, in0, in1, res0, res1; if (mid > 0) { if (dof_to_quad == true) @@ -4506,7 +4708,7 @@ namespace internal } } else - res0 = res1 = VectorizedArray(); + res0 = res1 = Number(); if (mm % 2 == 1) { if (dof_to_quad == true) @@ -4530,7 +4732,7 @@ namespace internal } if ( nn%2 == 1 ) { - VectorizedArray val0, res0; + Number val0, res0; if (dof_to_quad == true) val0 = shape_gradients[n_cols]; else @@ -4545,13 +4747,13 @@ namespace internal val0 = shape_gradients[ind*mm+n_cols]; else val0 = shape_gradients[n_cols*mm+ind]; - VectorizedArray val1 = in[stride*ind] - in[stride*(mm-1-ind)]; + Number val1 = in[stride*ind] - in[stride*(mm-1-ind)]; val1 *= val0; res0 += val1; } } else - res0 = VectorizedArray(); + res0 = Number(); if (add == false) out[stride*n_cols] = res0; else @@ -4904,7 +5106,8 @@ namespace internal fe_eval.dof_values_initialized = true; #endif } -} + +} // end of namespace internal @@ -4986,8 +5189,8 @@ FEEvaluationGeneral ::apply_values(const VectorizedArray in [], VectorizedArray out []) { - internal::apply_tensor_product + internal::apply_tensor_product, direction, dof_to_quad, add> (this->data.shape_values.begin(), in, out); } @@ -5002,8 +5205,8 @@ FEEvaluationGeneral ::apply_gradients(const VectorizedArray in [], VectorizedArray out []) { - internal::apply_tensor_product + internal::apply_tensor_product, direction, dof_to_quad, add> (this->data.shape_gradients.begin(), in, out); } @@ -5018,8 +5221,8 @@ FEEvaluationGeneral ::apply_hessians(const VectorizedArray in [], VectorizedArray out []) { - internal::apply_tensor_product + internal::apply_tensor_product, direction, dof_to_quad, add> (this->data.shape_hessians.begin(), in, out); } @@ -5090,6 +5293,43 @@ FEEvaluation j-1][0]) < zero_tol, ExcMessage(error_message)); #endif + + // Compute symmetric and skew-symmetric part of shape values for even-odd + // decomposition + for (unsigned int i=0; i<(fe_degree+1)/2; ++i) + for (unsigned int q=0; q<(n_q_points_1d+1)/2; ++q) + { + shape_val_evenodd[i][q] = + 0.5 * (this->data.shape_values[i*n_q_points_1d+q] + + this->data.shape_values[i*n_q_points_1d+n_q_points_1d-1-q]); + shape_val_evenodd[fe_degree-i][q] = + 0.5 * (this->data.shape_values[i*n_q_points_1d+q] - + this->data.shape_values[i*n_q_points_1d+n_q_points_1d-1-q]); + + shape_gra_evenodd[i][q] = + 0.5 * (this->data.shape_gradients[i*n_q_points_1d+q] + + this->data.shape_gradients[i*n_q_points_1d+n_q_points_1d-1-q]); + shape_gra_evenodd[fe_degree-i][q] = + 0.5 * (this->data.shape_gradients[i*n_q_points_1d+q] - + this->data.shape_gradients[i*n_q_points_1d+n_q_points_1d-1-q]); + + shape_hes_evenodd[i][q] = + 0.5 * (this->data.shape_hessians[i*n_q_points_1d+q] + + this->data.shape_hessians[i*n_q_points_1d+n_q_points_1d-1-q]); + shape_hes_evenodd[fe_degree-i][q] = + 0.5 * (this->data.shape_hessians[i*n_q_points_1d+q] - + this->data.shape_hessians[i*n_q_points_1d+n_q_points_1d-1-q]); + } + if (fe_degree % 2 == 0) + for (unsigned int q=0; q<(n_q_points_1d+1)/2; ++q) + { + shape_val_evenodd[fe_degree/2][q] = + this->data.shape_values[(fe_degree/2)*n_q_points_1d+q]; + shape_gra_evenodd[fe_degree/2][q] = + this->data.shape_gradients[(fe_degree/2)*n_q_points_1d+q]; + shape_hes_evenodd[fe_degree/2][q] = + this->data.shape_hessians[(fe_degree/2)*n_q_points_1d+q]; + } } @@ -5129,9 +5369,9 @@ FEEvaluation ::apply_values (const VectorizedArray in [], VectorizedArray out []) { - internal::apply_tensor_product_values - (this->data.shape_values.begin(), in, out); + internal::apply_tensor_product_evenodd, direction, dof_to_quad, add, 0> + (shape_val_evenodd, in, out); } @@ -5145,9 +5385,9 @@ FEEvaluation ::apply_gradients (const VectorizedArray in [], VectorizedArray out []) { - internal::apply_tensor_product_gradients - (this->data.shape_gradients.begin(), in, out); + internal::apply_tensor_product_evenodd, direction, dof_to_quad, add, 1> + (shape_gra_evenodd, in, out); } @@ -5164,9 +5404,9 @@ FEEvaluation ::apply_hessians (const VectorizedArray in [], VectorizedArray out []) { - internal::apply_tensor_product_hessians - (this->data.shape_hessians.begin(), in, out); + internal::apply_tensor_product_evenodd, direction, dof_to_quad, add, 2> + (shape_hes_evenodd, in, out); } @@ -5407,8 +5647,8 @@ FEEvaluationGL ::apply_gradients (const VectorizedArray in [], VectorizedArray out []) { - internal::apply_tensor_product_gradients_gl + internal::apply_tensor_product_gradients_gl, direction, dof_to_quad, add> (this->data.shape_gradients.begin(), in, out); } diff --git a/deal.II/include/deal.II/matrix_free/matrix_free.h b/deal.II/include/deal.II/matrix_free/matrix_free.h index 8c8b0a83f6..ffc29f351e 100644 --- a/deal.II/include/deal.II/matrix_free/matrix_free.h +++ b/deal.II/include/deal.II/matrix_free/matrix_free.h @@ -1643,17 +1643,22 @@ reinit(const Mapping &mapping, // internal helper functions that define how to call MPI data exchange // functions: for generic vectors, do nothing at all. For distributed vectors, -// can call update_ghost_values_start function and so on. If we have -// collections of vectors, just do the individual functions of the -// components. this is a bit messy for block vectors, which use some -// additional helper functions to select the blocks +// call update_ghost_values_start function and so on. If we have collections +// of vectors, just do the individual functions of the components. In order to +// keep ghost values consistent (whether we are in read or write mode). the whole situation is a bit complicated by the fact +// that we need to treat block vectors differently, which use some additional +// helper functions to select the blocks and template magic. namespace internal { template - void update_ghost_values_start_block (const VectorStruct &vec, + bool update_ghost_values_start_block (const VectorStruct &vec, const unsigned int channel, internal::bool2type); template + void reset_ghost_values_block (const VectorStruct &vec, + const bool zero_out_ghosts, + internal::bool2type); + template void update_ghost_values_finish_block (const VectorStruct &vec, internal::bool2type); template @@ -1665,9 +1670,16 @@ namespace internal internal::bool2type); template - void update_ghost_values_start_block (const VectorStruct &, + bool update_ghost_values_start_block (const VectorStruct &, const unsigned int, internal::bool2type) + { + return false; + } + template + void reset_ghost_values_block (const VectorStruct &, + const bool, + internal::bool2type) {} template void update_ghost_values_finish_block (const VectorStruct &, @@ -1685,55 +1697,125 @@ namespace internal + // returns true if the vector was in a state without ghost values before, + // i.e., we need to zero out ghosts in the very end template inline - void update_ghost_values_start (const VectorStruct &vec, + bool update_ghost_values_start (const VectorStruct &vec, const unsigned int channel = 0) { - update_ghost_values_start_block(vec, channel, - internal::bool2type::value>()); + return + update_ghost_values_start_block(vec, channel, + internal::bool2type::value>()); } template inline - void update_ghost_values_start (const parallel::distributed::Vector &vec, + bool update_ghost_values_start (const parallel::distributed::Vector &vec, const unsigned int channel = 0) { + bool return_value = !vec.has_ghost_elements(); vec.update_ghost_values_start(channel); + return return_value; } template inline - void update_ghost_values_start (const std::vector &vec) + bool update_ghost_values_start (const std::vector &vec) { + bool return_value = false; for (unsigned int comp=0; comp inline - void update_ghost_values_start (const std::vector &vec) + bool update_ghost_values_start (const std::vector &vec) { + bool return_value = false; for (unsigned int comp=0; comp inline - void update_ghost_values_start_block (const VectorStruct &vec, + bool update_ghost_values_start_block (const VectorStruct &vec, const unsigned int channel, internal::bool2type) { + bool return_value = false; for (unsigned int i=0; i + inline + void reset_ghost_values (const VectorStruct &vec, + const bool zero_out_ghosts) + { + reset_ghost_values_block(vec, zero_out_ghosts, + internal::bool2type::value>()); + } + + + + template + inline + void reset_ghost_values (const parallel::distributed::Vector &vec, + const bool zero_out_ghosts) + { + if (zero_out_ghosts) + const_cast&>(vec).zero_out_ghosts(); + } + + + + template + inline + void reset_ghost_values (const std::vector &vec, + const bool zero_out_ghosts) + { + for (unsigned int comp=0; comp + inline + void reset_ghost_values (const std::vector &vec, + const bool zero_out_ghosts) + { + for (unsigned int comp=0; comp + inline + void reset_ghost_values_block (const VectorStruct &vec, + const bool zero_out_ghosts, + internal::bool2type) + { + for (unsigned int i=0; i::cell_loop OutVector &dst, const InVector &src) const { + // in any case, need to start the ghost import at the beginning + bool ghosts_were_not_set = internal::update_ghost_values_start (src); + #ifdef DEAL_II_WITH_THREADS // Use multithreading if so requested and if there is enough work to do in @@ -2181,7 +2266,6 @@ MatrixFree::cell_loop if (task_info.use_partition_partition == true) { - internal::update_ghost_values_start(src); tbb::empty_task *root = new( tbb::task::allocate_root() ) tbb::empty_task; unsigned int evens = task_info.evens; @@ -2221,10 +2305,7 @@ MatrixFree::cell_loop internal::MPIComDistribute *worker_dist = new (worker[j]->allocate_child()) internal::MPIComDistribute(src); - if (odds == 0) - break; - else - worker_dist->spawn(*worker_dist); + worker_dist->spawn(*worker_dist); } if (j::cell_loop root->wait_for_all(); root->destroy(*root); - internal::compress_finish(dst); } else // end of partition-partition, start of partition-color { - internal::update_ghost_values_start(src); unsigned int evens = task_info.evens; unsigned int odds = task_info.odds; @@ -2387,7 +2466,6 @@ MatrixFree::cell_loop internal::compress_start(dst); } - internal::compress_finish(dst); } } else @@ -2396,8 +2474,6 @@ MatrixFree::cell_loop { std::pair cell_range; - internal::update_ghost_values_start (src); - // First operate on cells where no ghost data is needed (inner cells) { cell_range.first = 0; @@ -2426,9 +2502,11 @@ MatrixFree::cell_loop cell_range.second = size_info.n_macro_cells; cell_operation (*this, dst, src, cell_range); } - - internal::compress_finish(dst); } + + // In every case, we need to finish transfers at the very end + internal::compress_finish(dst); + internal::reset_ghost_values(src, ghosts_were_not_set); } diff --git a/deal.II/source/CMakeLists.txt b/deal.II/source/CMakeLists.txt index aa94d1d8ec..ec1f36db0c 100644 --- a/deal.II/source/CMakeLists.txt +++ b/deal.II/source/CMakeLists.txt @@ -79,9 +79,9 @@ FOREACH(build ${DEAL_II_BUILD_TYPES}) COMPILE_DEFINITIONS "${DEAL_II_DEFINITIONS};${DEAL_II_DEFINITIONS_${build}}" COMPILE_FLAGS "${DEAL_II_CXX_FLAGS_${build}}" INSTALL_NAME_DIR "${CMAKE_INSTALL_PREFIX}/${DEAL_II_LIBRARY_RELDIR}" - ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/lib" - LIBRARY_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/lib" - RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/lib" + ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/${DEAL_II_LIBRARY_RELDIR}" + LIBRARY_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/${DEAL_II_LIBRARY_RELDIR}" + RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/${DEAL_II_LIBRARY_RELDIR}" ) TARGET_LINK_LIBRARIES(${DEAL_II_BASE_NAME}${DEAL_II_${build}_SUFFIX} diff --git a/deal.II/source/base/CMakeLists.txt b/deal.II/source/base/CMakeLists.txt index 892456ad89..ab491a5e57 100644 --- a/deal.II/source/base/CMakeLists.txt +++ b/deal.II/source/base/CMakeLists.txt @@ -66,10 +66,15 @@ SET(_src utilities.cc ) +SET(_inst + data_out_base.inst.in + ) + FILE(GLOB _header ${CMAKE_SOURCE_DIR}/include/deal.II/base/*.h ) -DEAL_II_ADD_LIBRARY(obj_base OBJECT ${_src} ${_header} +DEAL_II_ADD_LIBRARY(obj_base OBJECT ${_src} ${_header} ${_inst} ${CMAKE_BINARY_DIR}/include/deal.II/base/config.h ) +EXPAND_INSTANTIATIONS(obj_base "${_inst}") diff --git a/deal.II/source/base/data_out_base.cc b/deal.II/source/base/data_out_base.cc index e5518afcb8..f1e611fc5d 100644 --- a/deal.II/source/base/data_out_base.cc +++ b/deal.II/source/base/data_out_base.cc @@ -379,7 +379,7 @@ void DataOutBase::DataOutFilter::write_data_set(const std::string &name, const u { r = filtered_points[i]; if (d < dimension) data_sets.back()[r*new_dim+d] = data_vectors(set_num+d, i); - else data_sets.back()[r] = 0; + else data_sets.back()[r*new_dim+d] = 0; } } } @@ -428,8 +428,11 @@ namespace //----------------------------------------------------------------------// //For a given patch, compute the node interpolating the corner nodes //linearly at the point (xstep, ystep, zstep)*1./n_subdivisions. -//If the popints are saved in the patch->data member, return the +//If the points are saved in the patch->data member, return the //saved point instead + +//TODO: Make this function return its value, rather than using a reference +// as first argument; take a reference for 'patch', not a pointer template inline void @@ -7955,28 +7958,6 @@ operator >> (std::istream &in, // explicit instantiations -#define INSTANTIATE(dim,spacedim) \ - template class DataOutInterface; \ - template class DataOutReader; \ - template struct DataOutBase::Patch; \ - template \ - std::ostream & \ - operator << (std::ostream &out, \ - const DataOutBase::Patch &patch); \ - template \ - std::istream & \ - operator >> (std::istream &in, \ - DataOutBase::Patch &patch) - -INSTANTIATE(1,1); -INSTANTIATE(2,2); -INSTANTIATE(3,3); -INSTANTIATE(4,4); -INSTANTIATE(1,2); -INSTANTIATE(1,3); -INSTANTIATE(2,3); -INSTANTIATE(3,4); - -#undef INSTANTIATE +#include "data_out_base.inst" DEAL_II_NAMESPACE_CLOSE diff --git a/deal.II/source/base/data_out_base.inst.in b/deal.II/source/base/data_out_base.inst.in new file mode 100644 index 0000000000..96cc2fea22 --- /dev/null +++ b/deal.II/source/base/data_out_base.inst.in @@ -0,0 +1,129 @@ +// --------------------------------------------------------------------- +// $Id$ +// +// Copyright (C) 2013 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + + +for (deal_II_dimension : DIMENSIONS; deal_II_space_dimension : SPACE_DIMENSIONS) +{ +#if deal_II_dimension <= deal_II_space_dimension + template class DataOutInterface; + template class DataOutReader; + template struct DataOutBase::Patch; + + template + std::ostream & + operator << (std::ostream &out, + const DataOutBase::Patch &patch); + + template + std::istream & + operator >> (std::istream &in, + DataOutBase::Patch &patch); + + template + void + DataOutBase::write_vtk (const std::vector > &patches, + const std::vector &data_names, + const std::vector > &vector_data_ranges, + const VtkFlags &flags, + std::ostream &out); + + template + void + DataOutBase::write_vtu (const std::vector > &patches, + const std::vector &data_names, + const std::vector > &vector_data_ranges, + const VtkFlags &flags, + std::ostream &out); + + template + void + DataOutBase::write_ucd (const std::vector > &patches, + const std::vector &data_names, + const std::vector > &vector_data_ranges, + const UcdFlags &flags, + std::ostream &out); + + template + void + DataOutBase::write_dx (const std::vector > &patches, + const std::vector &data_names, + const std::vector > &vector_data_ranges, + const DXFlags &flags, + std::ostream &out); + + template + void + DataOutBase::write_gnuplot (const std::vector > &patches, + const std::vector &data_names, + const std::vector > &vector_data_ranges, + const GnuplotFlags &flags, + std::ostream &out); + + template + void + DataOutBase::write_povray (const std::vector > &patches, + const std::vector &data_names, + const std::vector > &vector_data_ranges, + const PovrayFlags &flags, + std::ostream &out); + + template + void + DataOutBase::write_eps (const std::vector > &patches, + const std::vector &data_names, + const std::vector > &vector_data_ranges, + const EpsFlags &flags, + std::ostream &out); + + template + void + DataOutBase::write_gmv (const std::vector > &patches, + const std::vector &data_names, + const std::vector > &vector_data_ranges, + const GmvFlags &flags, + std::ostream &out); + + template + void + DataOutBase::write_tecplot (const std::vector > &patches, + const std::vector &data_names, + const std::vector > &vector_data_ranges, + const TecplotFlags &flags, + std::ostream &out); + + template + void + DataOutBase::write_tecplot_binary (const std::vector > &patches, + const std::vector &data_names, + const std::vector > &vector_data_ranges, + const TecplotFlags &flags, + std::ostream &out); + template + void + DataOutBase::write_svg (const std::vector > &patches, + const std::vector &data_names, + const std::vector > &vector_data_ranges, + const SvgFlags &flags, + std::ostream &out); + template + void + DataOutBase::write_deal_II_intermediate (const std::vector > &patches, + const std::vector &data_names, + const std::vector > &vector_data_ranges, + const Deal_II_IntermediateFlags &flags, + std::ostream &out); +#endif +} diff --git a/deal.II/source/base/exceptions.cc b/deal.II/source/base/exceptions.cc index 89350862fc..81f8b3075e 100644 --- a/deal.II/source/base/exceptions.cc +++ b/deal.II/source/base/exceptions.cc @@ -281,16 +281,26 @@ namespace deal_II_exceptions namespace internals { - void abort (const ExceptionBase &exc) + void abort (const ExceptionBase &exc, bool nothrow /*= false*/) { if (dealii::deal_II_exceptions::abort_on_exception) { - //* Print the error message and bail out: + // Print the error message and bail out: std::cerr << exc.what() << std::endl; std::abort(); } + else if (nothrow) + { + // We are not allowed to throw, and not allowed to abort. + // Just print the exception name to deallog and continue + // normally: + deallog << "Exception: " << exc.get_exc_name() << std::endl; + } else { + // We are not allowed to abort, so just throw the error so just + // throw the error so just throw the error so just throw the + // error: throw exc; } } diff --git a/deal.II/source/base/function_lib.cc b/deal.II/source/base/function_lib.cc index a1b5efd521..92d7097165 100644 --- a/deal.II/source/base/function_lib.cc +++ b/deal.II/source/base/function_lib.cc @@ -2218,13 +2218,13 @@ namespace Functions const double co = (r==0.) ? 0. : (p(0)-center(0))/r; const double si = (r==0.) ? 0. : (p(1)-center(1))/r; +#ifdef HAVE_JN const double dJn = (order==0) ? (-jn(1, r*wave_number)) : (.5*(jn(order-1, wave_number*r) -jn(order+1, wave_number*r))); Tensor<1,dim> result; result[0] = wave_number * co * dJn; result[1] = wave_number * si * dJn; -#ifdef HAVE_JN return result; #else Assert(false, ExcMessage("Bessel function jn was not found by configure")); diff --git a/deal.II/source/base/geometry_info.cc b/deal.II/source/base/geometry_info.cc index 6569451117..7bb041574e 100644 --- a/deal.II/source/base/geometry_info.cc +++ b/deal.II/source/base/geometry_info.cc @@ -1842,8 +1842,13 @@ template void GeometryInfo:: alternating_form_at_vertices -(const Point (&vertices)[vertices_per_cell], - Tensor (&forms)[vertices_per_cell]) +#ifndef DEAL_II_CONSTEXPR_BUG + (const Point (&vertices)[vertices_per_cell], + Tensor (&forms)[vertices_per_cell]) +#else + (const Point *vertices, + Tensor *forms) +#endif { // for each of the vertices, // compute the alternating form @@ -1901,32 +1906,48 @@ template void GeometryInfo<1>:: alternating_form_at_vertices -(const Point<1> (&vertices)[vertices_per_cell], - Tensor<1-1,1> (&forms)[vertices_per_cell]) +#ifndef DEAL_II_CONSTEXPR_BUG +(const Point<1> (&)[vertices_per_cell], + Tensor<1-1,1> (&)[vertices_per_cell]) +#else +(const Point<1> *, Tensor<1-1,1> *) +#endif ; template void GeometryInfo<1>:: alternating_form_at_vertices -(const Point<2> (&vertices)[vertices_per_cell], - Tensor<2-1,2> (&forms)[vertices_per_cell]) +#ifndef DEAL_II_CONSTEXPR_BUG +(const Point<2> (&)[vertices_per_cell], + Tensor<2-1,2> (&)[vertices_per_cell]) +#else +(const Point<2> *, Tensor<2-1,2> *) +#endif ; template void GeometryInfo<2>:: alternating_form_at_vertices +#ifndef DEAL_II_CONSTEXPR_BUG (const Point<2> (&vertices)[vertices_per_cell], Tensor<2-2,2> (&forms)[vertices_per_cell]) +#else +(const Point<2> *, Tensor<2-2,2> *) +#endif ; template void GeometryInfo<2>:: alternating_form_at_vertices +#ifndef DEAL_II_CONSTEXPR_BUG (const Point<3> (&vertices)[vertices_per_cell], Tensor<3-2,3> (&forms)[vertices_per_cell]) +#else +(const Point<3> *, Tensor<3-2,3> *) +#endif ; @@ -1934,8 +1955,12 @@ template void GeometryInfo<3>:: alternating_form_at_vertices +#ifndef DEAL_II_CONSTEXPR_BUG (const Point<3> (&vertices)[vertices_per_cell], Tensor<3-3,3> (&forms)[vertices_per_cell]) +#else +(const Point<3> *, Tensor<3-3,3> *) +#endif ; diff --git a/deal.II/source/base/parameter_handler.cc b/deal.II/source/base/parameter_handler.cc index 267fef855e..58f27f55fb 100644 --- a/deal.II/source/base/parameter_handler.cc +++ b/deal.II/source/base/parameter_handler.cc @@ -1658,7 +1658,7 @@ double ParameterHandler::get_double (const std::string &entry_string) const char *endptr; double d = std::strtod (s.c_str(), &endptr); // assert there was no error - AssertThrow ((s.c_str()!='\0') || (*endptr == '\0'), + AssertThrow ((*s.c_str() != '\0') || (*endptr == '\0'), ExcConversionError(s)); return d; diff --git a/deal.II/source/base/subscriptor.cc b/deal.II/source/base/subscriptor.cc index fa2af252e4..bc039582d7 100644 --- a/deal.II/source/base/subscriptor.cc +++ b/deal.II/source/base/subscriptor.cc @@ -105,8 +105,8 @@ Subscriptor::~Subscriptor () if (infostring == "") infostring = ""; - Assert (counter == 0, - ExcInUse (counter, object_info->name(), infostring)); + AssertNothrow (counter == 0, + ExcInUse (counter, object_info->name(), infostring)); } else { diff --git a/deal.II/source/lac/chunk_sparsity_pattern.cc b/deal.II/source/lac/chunk_sparsity_pattern.cc index 021d90b89b..57aaa9cf6c 100644 --- a/deal.II/source/lac/chunk_sparsity_pattern.cc +++ b/deal.II/source/lac/chunk_sparsity_pattern.cc @@ -790,5 +790,11 @@ template void ChunkSparsityPattern::copy_from (const FullMatrix &, const size_type , const bool); +template +void ChunkSparsityPattern::copy_from (const FullMatrix &, + const size_type); +template +void ChunkSparsityPattern::copy_from (const FullMatrix &, + const size_type); DEAL_II_NAMESPACE_CLOSE diff --git a/deal.II/source/lac/petsc_solver.cc b/deal.II/source/lac/petsc_solver.cc index 9da2aeef47..57c3bdb7c0 100644 --- a/deal.II/source/lac/petsc_solver.cc +++ b/deal.II/source/lac/petsc_solver.cc @@ -135,8 +135,8 @@ namespace PETScWrappers // in case of failure: throw // exception if (solver_control.last_check() != SolverControl::success) - throw SolverControl::NoConvergence (solver_control.last_step(), - solver_control.last_value()); + AssertThrow(false, SolverControl::NoConvergence (solver_control.last_step(), + solver_control.last_value())); // otherwise exit as normal } @@ -822,8 +822,10 @@ namespace PETScWrappers * throw exception */ if (solver_control.last_check() != SolverControl::success) - throw SolverControl::NoConvergence (solver_control.last_step(), - solver_control.last_value()); + { + AssertThrow(false, SolverControl::NoConvergence (solver_control.last_step(), + solver_control.last_value())); + } else { /** diff --git a/deal.II/source/lac/slepc_solver.cc b/deal.II/source/lac/slepc_solver.cc index 6ea1e27454..3fd24cd777 100644 --- a/deal.II/source/lac/slepc_solver.cc +++ b/deal.II/source/lac/slepc_solver.cc @@ -92,7 +92,7 @@ namespace SLEPcWrappers } void - SolverBase::set_target_eigenvalue (const double &this_target) + SolverBase::set_target_eigenvalue (const PetscScalar &this_target) { target_eigenvalue = this_target; } @@ -225,8 +225,8 @@ namespace SLEPcWrappers // and in case of failure: throw exception if (solver_control.last_check () != SolverControl::success) - throw SolverControl::NoConvergence (solver_control.last_step (), - solver_control.last_value ()); + AssertThrow(false, SolverControl::NoConvergence (solver_control.last_step(), + solver_control.last_value())); } } diff --git a/deal.II/source/lac/solver_control.cc b/deal.II/source/lac/solver_control.cc index a6f1c61cb5..8f40d154a4 100644 --- a/deal.II/source/lac/solver_control.cc +++ b/deal.II/source/lac/solver_control.cc @@ -26,41 +26,6 @@ DEAL_II_NAMESPACE_OPEN /*----------------------- SolverControl ---------------------------------*/ -SolverControl::NoConvergence::NoConvergence (const unsigned int last_step, - const double last_residual) - : - last_step (last_step), - last_residual (last_residual) -{} - - -const char * -SolverControl::NoConvergence::what () const throw () -{ - // have a place where to store the - // description of the exception as a char * - // - // this thing obviously is not multi-threading - // safe, but we don't care about that for now - // - // we need to make this object static, since - // we want to return the data stored in it - // and therefore need a lifetime which is - // longer than the execution time of this - // function - static std::string description; - // convert the messages printed by the - // exceptions into a std::string - std::ostringstream out; - out << "Iterative method reported convergence failure in step " - << last_step << " with residual " << last_residual; - - description = out.str(); - return description.c_str(); -} - - - SolverControl::SolverControl (const unsigned int maxiter, const double tolerance, const bool m_log_history, diff --git a/deal.II/source/lac/trilinos_solver.cc b/deal.II/source/lac/trilinos_solver.cc index 25edefb025..6eb7ae8567 100644 --- a/deal.II/source/lac/trilinos_solver.cc +++ b/deal.II/source/lac/trilinos_solver.cc @@ -264,8 +264,8 @@ namespace TrilinosWrappers solver_control.check (solver.NumIters(), solver.TrueResidual()); if (solver_control.last_check() != SolverControl::success) - throw SolverControl::NoConvergence (solver_control.last_step(), - solver_control.last_value()); + AssertThrow(false, SolverControl::NoConvergence (solver_control.last_step(), + solver_control.last_value())); } @@ -458,8 +458,8 @@ namespace TrilinosWrappers solver_control.check (0, 0); if (solver_control.last_check() != SolverControl::success) - throw SolverControl::NoConvergence (solver_control.last_step(), - solver_control.last_value()); + AssertThrow(false, SolverControl::NoConvergence (solver_control.last_step(), + solver_control.last_value())); } @@ -523,8 +523,8 @@ namespace TrilinosWrappers solver_control.check (0, 0); if (solver_control.last_check() != SolverControl::success) - throw SolverControl::NoConvergence (solver_control.last_step(), - solver_control.last_value()); + AssertThrow(false, SolverControl::NoConvergence (solver_control.last_step(), + solver_control.last_value())); } diff --git a/deal.II/source/lac/vector_memory.cc b/deal.II/source/lac/vector_memory.cc index ecc97a6b0d..9ed021a997 100644 --- a/deal.II/source/lac/vector_memory.cc +++ b/deal.II/source/lac/vector_memory.cc @@ -104,8 +104,8 @@ template inline GrowingVectorMemory::~GrowingVectorMemory() { - AssertThrow(current_alloc == 0, - StandardExceptions::ExcMemoryLeak(current_alloc)); + AssertNothrow(current_alloc == 0, + StandardExceptions::ExcMemoryLeak(current_alloc)); if (log_statistics) { deallog << "GrowingVectorMemory:Overall allocated vectors: " diff --git a/deal.II/source/numerics/data_out_dof_data.inst.in b/deal.II/source/numerics/data_out_dof_data.inst.in index 08ee60879b..ea420b3519 100644 --- a/deal.II/source/numerics/data_out_dof_data.inst.in +++ b/deal.II/source/numerics/data_out_dof_data.inst.in @@ -105,6 +105,7 @@ for (VEC : SERIAL_VECTORS; DH : DOFHANDLER_TEMPLATES; deal_II_dimension : DIMENS // things for DataOutRotation +#if deal_II_dimension < 3 template void DataOut_DoFData,deal_II_dimension+1,deal_II_dimension+1>:: add_data_vector (const VEC &, @@ -143,12 +144,12 @@ for (VEC : SERIAL_VECTORS; DH : DOFHANDLER_TEMPLATES; deal_II_dimension : DIMENS add_data_vector (const DH &, const VEC &, const DataPostprocessor::space_dimension> &); - +#endif // codim 1 - #if deal_II_dimension < 3 +#if deal_II_dimension < 3 template void DataOut_DoFData,deal_II_dimension,deal_II_dimension+1>:: add_data_vector (const VEC &, @@ -187,7 +188,7 @@ for (VEC : SERIAL_VECTORS; DH : DOFHANDLER_TEMPLATES; deal_II_dimension : DIMENS add_data_vector (const DH &, const VEC &, const DataPostprocessor::space_dimension> &); - #endif +#endif @@ -315,6 +316,7 @@ for (DH : DOFHANDLER_TEMPLATES; deal_II_dimension : DIMENSIONS) // things for DataOutRotation +#if deal_II_dimension < 3 template void DataOut_DoFData,deal_II_dimension+1,deal_II_dimension+1>:: add_data_vector (const IndexSet &, @@ -348,10 +350,11 @@ for (DH : DOFHANDLER_TEMPLATES; deal_II_dimension : DIMENSIONS) const IndexSet &, const std::vector &, const std::vector &); +#endif // codim 1 - #if deal_II_dimension < 3 +#if deal_II_dimension < 3 template void DataOut_DoFData,deal_II_dimension,deal_II_dimension+1>:: add_data_vector (const IndexSet &, @@ -370,7 +373,7 @@ for (DH : DOFHANDLER_TEMPLATES; deal_II_dimension : DIMENSIONS) DataOut_DoFData,deal_II_dimension,deal_II_dimension+1>:: add_data_vector (const IndexSet &, const DataPostprocessor::space_dimension> &); - #endif +#endif } @@ -379,21 +382,21 @@ for (DH : DOFHANDLER_TEMPLATES; deal_II_dimension : DIMENSIONS) for (DH : DOFHANDLER_TEMPLATES; deal_II_dimension : DIMENSIONS) { template class DataOut_DoFData,deal_II_dimension>; - template class DataOut_DoFData,deal_II_dimension+1>; - #if deal_II_dimension < 3 +#if deal_II_dimension < 3 + template class DataOut_DoFData,deal_II_dimension+1>; template class DataOut_DoFData,deal_II_dimension,deal_II_dimension+1>; template class DataOut_DoFData,deal_II_dimension,deal_II_dimension+1>; - #endif +#endif - #if deal_II_dimension >= 2 +#if deal_II_dimension >= 2 template class DataOut_DoFData,deal_II_dimension-1,deal_II_dimension>; - #endif +#endif - #if deal_II_dimension == 3 +#if deal_II_dimension == 3 template class DataOut_DoFData,1,3>; template class DataOut_DoFData,1,3>; - #endif +#endif } @@ -416,7 +419,7 @@ for (DH : DOFHANDLER_TEMPLATES; deal_II_dimension : DIMENSIONS; deal_II_space_di namespace DataOut \{ #if deal_II_dimension <= deal_II_space_dimension template - void + void ParallelDataBase:: reinit_all_fe_values > (std::vector > > > &dof_data, diff --git a/deal.II/source/numerics/data_out_rotation.inst.in b/deal.II/source/numerics/data_out_rotation.inst.in index a44a7c69f8..87c868bf7c 100644 --- a/deal.II/source/numerics/data_out_rotation.inst.in +++ b/deal.II/source/numerics/data_out_rotation.inst.in @@ -17,5 +17,7 @@ for (deal_II_dimension : DIMENSIONS) { +#if deal_II_dimension < 3 template class DataOutRotation >; +#endif } diff --git a/deal.II/source/numerics/data_out_stack.inst.in b/deal.II/source/numerics/data_out_stack.inst.in index 67a6ccc773..b3f3bb27ff 100644 --- a/deal.II/source/numerics/data_out_stack.inst.in +++ b/deal.II/source/numerics/data_out_stack.inst.in @@ -17,6 +17,7 @@ for (deal_II_dimension : DIMENSIONS) { +#if deal_II_dimension < 3 template class DataOutStack >; template void DataOutStack >:: @@ -34,4 +35,5 @@ add_data_vector (const Vector &vec, template void DataOutStack >:: add_data_vector (const Vector &vec, const std::string &name); +#endif } diff --git a/deal.II/source/numerics/matrix_tools.cc b/deal.II/source/numerics/matrix_tools.cc index 75d21fe9e8..054d623452 100644 --- a/deal.II/source/numerics/matrix_tools.cc +++ b/deal.II/source/numerics/matrix_tools.cc @@ -1271,12 +1271,6 @@ namespace MatrixCreator copy_data.dofs.resize(copy_data.dofs_per_cell); cell->get_dof_indices (copy_data.dofs); - const unsigned int max_dofs_per_cell = fe_collection.max_dofs_per_cell(), - max_dofs_per_face = fe_collection.max_dofs_per_face(); - - FullMatrix cell_matrix(max_dofs_per_cell, max_dofs_per_cell); - Vector cell_vector(max_dofs_per_cell); - UpdateFlags update_flags = UpdateFlags (update_values | update_JxW_values | diff --git a/deal.II/tests/CMakeLists.txt b/deal.II/tests/CMakeLists.txt new file mode 100644 index 0000000000..681d3851bf --- /dev/null +++ b/deal.II/tests/CMakeLists.txt @@ -0,0 +1,122 @@ +## --------------------------------------------------------------------- +## $Id$ +## +## Copyright (C) 2013 by the deal.II authors +## +## This file is part of the deal.II library. +## +## The deal.II library is free software; you can use it, redistribute +## it, and/or modify it under the terms of the GNU Lesser General +## Public License as published by the Free Software Foundation; either +## version 2.1 of the License, or (at your option) any later version. +## The full text of the license can be found in the file LICENSE at +## the top level of the deal.II distribution. +## +## --------------------------------------------------------------------- + +# +# This is a bloody hack to avoid a severe performance penalty when using +# 12k top level targets with GNU Make that really does not like that... +# +# The only choice we have is to set up every test subdirectory as an +# independent project. Unfortunately this adds quite a significant amount +# of complexity :-( +# + +ADD_SUBDIRECTORY(quick_tests) + +# +# Custom targets to set and clean up the testsuite: +# + +# Setup tests: +ADD_CUSTOM_TARGET(setup_tests) + +# Clean all tests +ADD_CUSTOM_TARGET(clean_tests) + +# Remove all tests: +ADD_CUSTOM_TARGET(prune_tests) + +# +# Write a minimalistic CTestTestfile.cmake file to CMAKE_BINARY_DIR and +# CMAKE_CURRENT_BINARY_DIR: +# +FILE(WRITE ${CMAKE_BINARY_DIR}/CTestTestfile.cmake + "SUBDIRS(tests)" + ) +FILE(WRITE ${CMAKE_CURRENT_BINARY_DIR}/CTestTestfile.cmake "") + +# +# Pass all relevant "TEST_" and "_DIR" variables down to the subprojects: +# +SET(_options) +LIST(APPEND _options -DDEAL_II_SOURCE_DIR=${CMAKE_SOURCE_DIR}) +LIST(APPEND _options -DDEAL_II_BINARY_DIR=${CMAKE_BINARY_DIR}) +FOREACH(_var + NUMDIFF_DIR + TEST_DIFF + TEST_OVERRIDE_LOCATION + TEST_PICKUP_REGEX + TEST_TIME_LIMIT + ) + LIST(APPEND _options "-U${_var}") + IF(DEFINED ${_var}) + LIST(APPEND _options "-D${_var}=${${_var}}") + ENDIF() +ENDFOREACH() + +# +# Glob together a list of all subfolders to set up: +# +FILE(GLOB _categories RELATIVE ${TEST_DIR} ${TEST_DIR}/*) +SET(_categories all-headers build_tests mesh_converter ${_categories}) +LIST(REMOVE_DUPLICATES _categories) + +# +# Define a subproject for every enabled category: +# + +FOREACH(_category ${_categories}) + IF(EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/${_category}/CMakeLists.txt) + SET(_category_dir ${CMAKE_CURRENT_SOURCE_DIR}/${_category}) + ELSEIF(EXISTS ${TEST_DIR}/${_category}/CMakeLists.txt) + SET(_category_dir ${TEST_DIR}/${_category}) + ELSE() + SET(_category_dir) + ENDIF() + + IF(NOT "${_category_dir}" STREQUAL "") + + ADD_CUSTOM_TARGET(setup_tests_${_category} + COMMAND ${CMAKE_COMMAND} -E make_directory + ${CMAKE_CURRENT_BINARY_DIR}/${_category} + COMMAND cd ${CMAKE_CURRENT_BINARY_DIR}/${_category} && + ${CMAKE_COMMAND} -G${CMAKE_GENERATOR} ${_options} ${_category_dir} + > /dev/null # Shoo! + DEPENDS ${_category_dir} + COMMENT "Processing tests/${_category}" + ) + ADD_DEPENDENCIES(setup_tests setup_tests_${_category}) + + # depend on a valid build directory (libraries built, config in place): + ADD_DEPENDENCIES(setup_tests_${_category} setup_build_dir) + + ADD_CUSTOM_TARGET(clean_tests_${_category} + COMMAND [ ! -d ${_category} ] || ${CMAKE_COMMAND} + --build ${CMAKE_CURRENT_BINARY_DIR}/${_category} --target clean + ) + ADD_DEPENDENCIES(clean_tests clean_tests_${_category}) + + ADD_CUSTOM_TARGET(prune_tests_${_category} + COMMAND ${CMAKE_COMMAND} -E remove_directory + ${CMAKE_CURRENT_BINARY_DIR}/${_category} + ) + ADD_DEPENDENCIES(prune_tests prune_tests_${_category}) + + FILE(APPEND ${CMAKE_CURRENT_BINARY_DIR}/CTestTestfile.cmake + "SUBDIRS(${_category})\n" + ) + + ENDIF() +ENDFOREACH() diff --git a/deal.II/tests/all-headers/CMakeLists.txt b/deal.II/tests/all-headers/CMakeLists.txt new file mode 100644 index 0000000000..2a68496524 --- /dev/null +++ b/deal.II/tests/all-headers/CMakeLists.txt @@ -0,0 +1,89 @@ +## --------------------------------------------------------------------- +## $Id$ +## +## Copyright (C) 2013 by the deal.II authors +## +## This file is part of the deal.II library. +## +## The deal.II library is free software; you can use it, redistribute +## it, and/or modify it under the terms of the GNU Lesser General +## Public License as published by the Free Software Foundation; either +## version 2.1 of the License, or (at your option) any later version. +## The full text of the license can be found in the file LICENSE at +## the top level of the deal.II distribution. +## +## --------------------------------------------------------------------- + +CMAKE_MINIMUM_REQUIRED(VERSION 2.8.8) +INCLUDE(${DEAL_II_SOURCE_DIR}/cmake/setup_testsuite.cmake) +PROJECT(testsuite CXX) +INCLUDE(${DEAL_II_TARGET_CONFIG}) + +# +# Header tests are special: +# +# Construct a list of all header files and build up a test that just tries +# to compile a simple worker (test_header.cc) that only includes the given +# header file. We omit linking to save some time. +# + +SET(_category all-headers) + +FILE(GLOB_RECURSE _header ${DEAL_II_SOURCE_DIR}/include/deal.II/*.h) + +FOREACH(_full_file ${_header}) + GET_FILENAME_COMPONENT(_file ${_full_file} NAME) + + # TODO: A more sophisticated way to get the relative include path: + GET_FILENAME_COMPONENT(_path ${_full_file} PATH) + GET_FILENAME_COMPONENT(_path ${_path} NAME) + IF("${_path}" STREQUAL "std_cxx1x") + SET(_path "base/std_cxx1x") + ENDIF() + + FOREACH(_build ${DEAL_II_BUILD_TYPES}) + STRING(TOLOWER ${_build} _build_lowercase) + + SET(_test ${_category}/${_path}/${_file}.${_build_lowercase}) + STRING(REGEX REPLACE "\\/" "-" _target ${_test}) + + # Respect TEST_PICKUP_REGEX: + IF( "${TEST_PICKUP_REGEX}" STREQUAL "" OR + _test MATCHES "${TEST_PICKUP_REGEX}" ) + + # Add an object library for each header file and build configuration: + ADD_LIBRARY(${_target} OBJECT EXCLUDE_FROM_ALL test_header.cc) + + SET_TARGET_PROPERTIES(${_target} PROPERTIES + LINK_FLAGS "${DEAL_II_LINKER_FLAGS} ${DEAL_II_LINKER_FLAGS_${_build}}" + COMPILE_DEFINITIONS "${DEAL_II_USER_DEFINITIONS};${DEAL_II_USER_DEFINITIONS_${_build}}" + COMPILE_FLAGS "${DEAL_II_CXX_FLAGS} ${DEAL_II_CXX_FLAGS_${_build}}" + ) + SET_PROPERTY(TARGET ${_target} APPEND PROPERTY + INCLUDE_DIRECTORIES "${DEAL_II_INCLUDE_DIRS}" + ) + SET_PROPERTY(TARGET ${_target} APPEND PROPERTY + COMPILE_DEFINITIONS HEADER= + ) + + ADD_CUSTOM_TARGET(${_target}.build + COMMAND + echo "${_test}: BUILD successful." + && echo "${_test}: PASSED." + ) + ADD_DEPENDENCIES(${_target}.build ${_target}) + + # And finally add the test: + ADD_TEST(NAME ${_test} + COMMAND ${CMAKE_COMMAND} -DTRGT=${_target}.build -DTEST=${_test} + -DDEAL_II_BINARY_DIR=${CMAKE_BINARY_DIR} + -P ${DEAL_II_SOURCE_DIR}/cmake/scripts/run_test.cmake + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} + ) + SET_TESTS_PROPERTIES(${_test} PROPERTIES + LABEL "${_category}" + TIMEOUT ${TEST_TIME_LIMIT} + ) + ENDIF() + ENDFOREACH() +ENDFOREACH() diff --git a/deal.II/tests/all-headers/test_header.cc b/deal.II/tests/all-headers/test_header.cc new file mode 100644 index 0000000000..fbd150cfed --- /dev/null +++ b/deal.II/tests/all-headers/test_header.cc @@ -0,0 +1,10 @@ +#include HEADER + +// Make sure that config.h is always included: +DEAL_II_NAMESPACE_OPEN +DEAL_II_NAMESPACE_CLOSE + +int main() +{ + return 0; +} diff --git a/deal.II/tests/build_tests/CMakeLists.txt b/deal.II/tests/build_tests/CMakeLists.txt new file mode 100644 index 0000000000..e0beebfc1e --- /dev/null +++ b/deal.II/tests/build_tests/CMakeLists.txt @@ -0,0 +1,182 @@ +## --------------------------------------------------------------------- +## $Id$ +## +## Copyright (C) 2013 by the deal.II authors +## +## This file is part of the deal.II library. +## +## The deal.II library is free software; you can use it, redistribute +## it, and/or modify it under the terms of the GNU Lesser General +## Public License as published by the Free Software Foundation; either +## version 2.1 of the License, or (at your option) any later version. +## The full text of the license can be found in the file LICENSE at +## the top level of the deal.II distribution. +## +## --------------------------------------------------------------------- + +CMAKE_MINIMUM_REQUIRED(VERSION 2.8.8) +INCLUDE(${DEAL_II_SOURCE_DIR}/cmake/setup_testsuite.cmake) +PROJECT(testsuite CXX) +INCLUDE(${DEAL_II_TARGET_CONFIG}) + +# +# build_tests tests are special: +# +# Try to configure and build every example step in debug and release +# configuration. Error condition is that if a test is successfully +# configured it must compile successfully... +# +# ... except for the tests listed below: They have to configure, build and +# run successfully in the given build configuration: +# + +SET(_category build_tests) + +# Run a minimalistic set of steps in debug configuration: +SET(_debug_steps + step-1 step-2 step-3 step-4 step-5 + step-6 step-47 + ) + +# Run all configuration independent steps in release configuration: +SET(_release_steps + step-1 step-2 step-3 step-4 step-5 + step-6 step-7 step-8 step-9 step-10 + step-11 step-12 step-13 step-14 step-16 + step-20 step-23 step-25 step-26 step-27 + step-30 step-38 step-39 step-44 step-45 + step-47 step-48 step-49 + ) + +FILE(GLOB _steps ${DEAL_II_SOURCE_DIR}/examples/step-*) + +FOREACH(_step_full ${_steps}) + GET_FILENAME_COMPONENT(_step ${_step_full} NAME) + + FOREACH(_build ${DEAL_II_BUILD_TYPES}) + STRING(TOLOWER ${_build} _build_lowercase) + + SET(_test ${_category}/${_step}.${_build_lowercase}) + + # Respect TEST_PICKUP_REGEX: + IF( "${TEST_PICKUP_REGEX}" STREQUAL "" OR + _test MATCHES "${TEST_PICKUP_REGEX}" ) + + SET(_step_dir ${CMAKE_CURRENT_BINARY_DIR}/${_step}.${_build_lowercase}) + + FILE(GLOB _files ${_step_full}/*) + SET(_command) + FOREACH(_file ${_files}) + IF(NOT _file MATCHES "/(doc|.svn)") + LIST(APPEND _command + COMMAND ${CMAKE_COMMAND} -E copy ${_file} ${_step_dir} + ) + ENDIF() + ENDFOREACH() + + # A rule how to copy the example step to the current directory: + ADD_CUSTOM_COMMAND(OUTPUT ${_step_dir}/CMakeLists.txt + COMMAND ${CMAKE_COMMAND} -E make_directory ${_step_dir} + ${_command} + DEPENDS + ${_files} + ${DEAL_II_TARGET_DEBUG} + ${DEAL_II_TARGET_RELEASE} + ) + + # And a rule on how to configure the example step: + ADD_CUSTOM_COMMAND(OUTPUT ${_step_dir}/configure_output + COMMAND rm -f ${_step_dir}/failing_configure_output + COMMAND ${CMAKE_COMMAND} + -DDEAL_II_DIR=${DEAL_II_BINARY_DIR} -DCMAKE_BUILD_TYPE=${_build} . + > ${_step_dir}/configure_output 2>&1 + || (mv ${_step_dir}/configure_output + ${_step_dir}/failing_configure_output + && echo "${_test}: CONFIGURE failed. Output:" + && cat ${_step_dir}/failing_configure_output) # succeed anyway! + WORKING_DIRECTORY ${_step_dir} + DEPENDS ${_step_dir}/CMakeLists.txt + ${DEAL_II_PATH}/${DEAL_II_PROJECT_CONFIG_RELDIR}/${DEAL_II_PACKAGE_NAME}Config.cmake + ) + + # And a rule on how to build the example step: + ADD_CUSTOM_COMMAND(OUTPUT ${_step_dir}/build_output + COMMAND [ ! -f ${_step_dir}/configure_output ] + || (rm -f ${_step_dir}/failing_build_output + &&${CMAKE_COMMAND} --build ${_step_dir} --target all + > ${_step_dir}/build_output 2>&1) + || (mv ${_step_dir}/build_output + ${_step_dir}/failing_build_output + && echo "${_test}: CONFIGURE successful." + && echo "${_test}: BUILD failed. Output:" + && cat ${_step_dir}/failing_build_output + && exit 1) + COMMAND [ -f ${_step_dir}/configure_output ] + || (rm -f ${_step_dir}/build_output + && rm -f ${_step_dir}/failing_build_output + && echo "${_test}: BUILD stage not invoked due to failing CONFIGURE") # succeed anyway! + WORKING_DIRECTORY ${_step_dir} + DEPENDS ${_step_dir}/configure_output + ) + + # And a rule on how to run the example step: + ADD_CUSTOM_COMMAND(OUTPUT ${_step_dir}/run_output + COMMAND [ ! -f ${_step_dir}/build_output ] + || (rm -f ${_step_dir}/failing_run_output + && ${CMAKE_COMMAND} --build ${_step_dir} --target run + > ${_step_dir}/run_output 2>&1) + || (mv ${_step_dir}/run_output + ${_step_dir}/failing_run_output + && echo "${_test}: CONFIGURE successful." + && echo "${_test}: BUILD successful." + && echo "${_test}: RUN failed. Output:" + && cat ${_step_dir}/failing_run_output + && exit 1) + COMMAND [ -f ${_step_dir}/build_output ] + || ( rm -f ${_step_dir}/run_output + && rm -f ${_step_dir}/failing_run_output + && echo "${_test}: RUN stage not invoked due to failing BUILD" + && exit 1) + WORKING_DIRECTORY ${_step_dir} + DEPENDS ${_step_dir}/build_output + ) + + ITEM_MATCHES(_match ${_step} ${_${_build_lowercase}_steps}) + IF(_match) + # Add a full test (CONFIGURE, BUILD, RUN): + SET(_target ${_category}-${_step}.${_build_lowercase}.run) + ADD_CUSTOM_TARGET(${_target} + COMMAND + echo "${_test}: CONFIGURE successful." + && echo "${_test}: BUILD successful." + && echo "${_test}: RUN successful." + && echo "${_test}: PASSED." + DEPENDS ${_step_dir}/run_output + ) + ELSE() + # Add a minimal test (CONFIGURE, BUILD): + SET(_target ${_category}-${_step}.${_build_lowercase}.build) + ADD_CUSTOM_TARGET(${_target} + COMMAND + echo "${_test}: CONFIGURE successful." + && echo "${_test}: BUILD successful." + && echo "${_test}: PASSED." + DEPENDS ${_step_dir}/build_output + ) + ENDIF() + + # And finally add the test: + ADD_TEST(NAME ${_test} + COMMAND ${CMAKE_COMMAND} -DTRGT=${_target} -DTEST=${_test} + -DDEAL_II_BINARY_DIR=${CMAKE_BINARY_DIR} + -P ${DEAL_II_SOURCE_DIR}/cmake/scripts/run_test.cmake + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} + ) + SET_TESTS_PROPERTIES(${_test} PROPERTIES + LABEL "${_category}" + TIMEOUT ${TEST_TIME_LIMIT} + ) + + ENDIF() + ENDFOREACH() +ENDFOREACH() diff --git a/deal.II/tests/mesh_converter/CMakeLists.txt b/deal.II/tests/mesh_converter/CMakeLists.txt new file mode 100644 index 0000000000..9d1e80e293 --- /dev/null +++ b/deal.II/tests/mesh_converter/CMakeLists.txt @@ -0,0 +1,108 @@ +## --------------------------------------------------------------------- +## $Id$ +## +## Copyright (C) 2013 by the deal.II authors +## +## This file is part of the deal.II library. +## +## The deal.II library is free software; you can use it, redistribute +## it, and/or modify it under the terms of the GNU Lesser General +## Public License as published by the Free Software Foundation; either +## version 2.1 of the License, or (at your option) any later version. +## The full text of the license can be found in the file LICENSE at +## the top level of the deal.II distribution. +## +## --------------------------------------------------------------------- + +CMAKE_MINIMUM_REQUIRED(VERSION 2.8.8) +INCLUDE(${DEAL_II_SOURCE_DIR}/cmake/setup_testsuite.cmake) +PROJECT(testsuite CXX) +INCLUDE(${DEAL_II_EXECUTABLE_CONFIG}) + +# +# Tests for the mesh_converter executable +# + +SET(_category mesh_converter) + +FILE(GLOB _meshes ${CMAKE_CURRENT_SOURCE_DIR}/meshes/?d/*.inp) + +FOREACH(_full_file ${_meshes}) + GET_FILENAME_COMPONENT(_mesh ${_full_file} NAME_WE) + + SET(_test_full ${_category}/${_mesh}) + SET(_test_directory ${CMAKE_CURRENT_BINARY_DIR}/${_mesh}) # directory to run the test in + STRING(REGEX REPLACE "\\.inp$" ".ucd" _comparison_file ${_full_file}) + + # Is this a 3D or 2D mesh? + SET(_dim 3) + IF(_full_file MATCHES "meshes/2d") + SET(_dim 2) + ENDIF() + + # Respect TEST_PICKUP_REGEX: + IF( "${TEST_PICKUP_REGEX}" STREQUAL "" OR + _test_full MATCHES "${TEST_PICKUP_REGEX}" ) + + FILE(MAKE_DIRECTORY ${_test_directory}) + + ADD_CUSTOM_COMMAND(OUTPUT ${_test_directory}/output + COMMAND rm -f ${_test_directory}/failing_output + COMMAND touch ${_test_directory}/output + COMMAND + mesh_converter_exe ${_dim} ${_full_file} output + || (mv ${_test_directory}/output + ${_test_directory}/failing_output + && echo "${_test_full}: RUN failed. ------ Result: ${_test_directory}/failing_output" + && echo "${_test_full}: RUN failed. ------ Partial output:" + && cat ${_test_directory}/failing_output + && exit 1) + COMMAND + ${PERL_EXECUTABLE} -pi ${DEAL_II_SOURCE_DIR}/cmake/scripts/normalize.pl + ${_test_directory}/output + WORKING_DIRECTORY ${_test_directory} + DEPENDS mesh_converter_exe ${DEAL_II_SOURCE_DIR}/cmake/scripts/normalize.pl + ) + ADD_CUSTOM_COMMAND(OUTPUT ${_test_directory}/diff + COMMAND rm -f ${_test_directory}/failing_diff + COMMAND touch ${_test_directory}/diff + COMMAND + ${TEST_DIFF} ${_test_directory}/output ${_comparison_file} > ${_test_directory}/diff + || (mv ${_test_directory}/diff + ${_test_directory}/failing_diff + && echo "${_test_full}: RUN successful." + && echo "${_test_full}: DIFF failed. ------ Source: ${_comparison_file}" + && echo "${_test_full}: DIFF failed. ------ Result: ${_test_directory}/output" + && echo "${_test_full}: DIFF failed. ------ Diff: ${_test_directory}/failing_diff" + && echo "${_test_full}: DIFF failed. ------ Diffs as follows:" + && cat ${_test_directory}/failing_diff + && exit 1) + WORKING_DIRECTORY ${_test_directory} + DEPENDS + ${_test_directory}/output + ${_comparison_file} + ) + + ADD_CUSTOM_TARGET(${_mesh}.diff DEPENDS ${_test_directory}/diff + COMMAND + echo "${_test_full}: RUN successful." + && echo "${_test_full}: DIFF successful." + && echo "${_test_full}: PASSED." + ) + + ADD_TEST(NAME ${_test_full} + COMMAND ${CMAKE_COMMAND} + -DTRGT=${_mesh}.diff + -DTEST=${_test_full} + -DDEAL_II_BINARY_DIR=${CMAKE_BINARY_DIR} + -P ${DEAL_II_SOURCE_DIR}/cmake/scripts/run_test.cmake + WORKING_DIRECTORY ${_test_directory} + ) + SET_TESTS_PROPERTIES(${_test_full} PROPERTIES + LABEL "${_category}" + TIMEOUT ${TEST_TIME_LIMIT} + ) + + ENDIF() + +ENDFOREACH() diff --git a/deal.II/tests/mesh_converter/meshes/2d/2d_test.cub b/deal.II/tests/mesh_converter/meshes/2d/2d_test.cub new file mode 100644 index 0000000000..90511d4e04 Binary files /dev/null and b/deal.II/tests/mesh_converter/meshes/2d/2d_test.cub differ diff --git a/deal.II/tests/mesh_converter/meshes/2d/2d_test.inp b/deal.II/tests/mesh_converter/meshes/2d/2d_test.inp new file mode 100644 index 0000000000..687c0ba3a5 --- /dev/null +++ b/deal.II/tests/mesh_converter/meshes/2d/2d_test.inp @@ -0,0 +1,252 @@ +*HEADING +cubit(rent/Geometry/Mesh_conversion/Program/mesh/2d/2d_test.inp): 01/17/2009: 16 +*NODE + 1, -1.000000e+00, 0.000000e+00, 0.000000e+00 + 2, -1.000000e+00, -1.666667e-01, 0.000000e+00 + 3, -8.333333e-01, -1.666667e-01, 0.000000e+00 + 4, -8.333333e-01, 0.000000e+00, 0.000000e+00 + 5, -1.000000e+00, -3.333333e-01, 0.000000e+00 + 6, -8.333333e-01, -3.333333e-01, 0.000000e+00 + 7, -1.000000e+00, -5.000000e-01, 0.000000e+00 + 8, -8.333333e-01, -5.000000e-01, 0.000000e+00 + 9, -1.000000e+00, -6.666667e-01, 0.000000e+00 + 10, -8.333333e-01, -6.666667e-01, 0.000000e+00 + 11, -1.000000e+00, -8.333333e-01, 0.000000e+00 + 12, -8.333333e-01, -8.333333e-01, 0.000000e+00 + 13, -1.000000e+00, -1.000000e+00, 0.000000e+00 + 14, -8.333333e-01, -1.000000e+00, 0.000000e+00 + 15, -6.666667e-01, -1.666667e-01, 0.000000e+00 + 16, -6.666667e-01, 0.000000e+00, 0.000000e+00 + 17, -6.666667e-01, -3.333333e-01, 0.000000e+00 + 18, -6.666667e-01, -5.000000e-01, 0.000000e+00 + 19, -6.666667e-01, -6.666667e-01, 0.000000e+00 + 20, -6.666667e-01, -8.333333e-01, 0.000000e+00 + 21, -6.666667e-01, -1.000000e+00, 0.000000e+00 + 22, -5.000000e-01, -1.666667e-01, 0.000000e+00 + 23, -5.000000e-01, 0.000000e+00, 0.000000e+00 + 24, -5.000000e-01, -3.333333e-01, 0.000000e+00 + 25, -5.000000e-01, -5.000000e-01, 0.000000e+00 + 26, -5.000000e-01, -6.666667e-01, 0.000000e+00 + 27, -5.000000e-01, -8.333333e-01, 0.000000e+00 + 28, -5.000000e-01, -1.000000e+00, 0.000000e+00 + 29, -3.333333e-01, -1.666667e-01, 0.000000e+00 + 30, -3.333333e-01, 0.000000e+00, 0.000000e+00 + 31, -3.333333e-01, -3.333333e-01, 0.000000e+00 + 32, -3.333333e-01, -5.000000e-01, 0.000000e+00 + 33, -3.333333e-01, -6.666667e-01, 0.000000e+00 + 34, -3.333333e-01, -8.333333e-01, 0.000000e+00 + 35, -3.333333e-01, -1.000000e+00, 0.000000e+00 + 36, -1.666667e-01, -1.666667e-01, 0.000000e+00 + 37, -1.666667e-01, 0.000000e+00, 0.000000e+00 + 38, -1.666667e-01, -3.333333e-01, 0.000000e+00 + 39, -1.666667e-01, -5.000000e-01, 0.000000e+00 + 40, -1.666667e-01, -6.666667e-01, 0.000000e+00 + 41, -1.666667e-01, -8.333333e-01, 0.000000e+00 + 42, -1.666667e-01, -1.000000e+00, 0.000000e+00 + 43, -1.387779e-17, -1.666667e-01, 0.000000e+00 + 44, 0.000000e+00, 0.000000e+00, 0.000000e+00 + 45, -3.122502e-17, -3.333333e-01, 0.000000e+00 + 46, -4.250073e-17, -5.000000e-01, 0.000000e+00 + 47, -4.531965e-17, -6.666667e-01, 0.000000e+00 + 48, -5.990217e-17, -8.333333e-01, 0.000000e+00 + 49, 0.000000e+00, -1.000000e+00, 0.000000e+00 + 50, 1.666667e-01, -1.666667e-01, 0.000000e+00 + 51, 1.666667e-01, 1.387779e-17, 0.000000e+00 + 52, 1.666667e-01, -3.333333e-01, 0.000000e+00 + 53, 1.666667e-01, -5.000000e-01, 0.000000e+00 + 54, 1.666667e-01, -6.666667e-01, 0.000000e+00 + 55, 1.666667e-01, -8.333333e-01, 0.000000e+00 + 56, 1.666667e-01, -1.000000e+00, 0.000000e+00 + 57, 3.333333e-01, -1.666667e-01, 0.000000e+00 + 58, 3.333333e-01, 3.122502e-17, 0.000000e+00 + 59, 3.333333e-01, -3.333333e-01, 0.000000e+00 + 60, 3.333333e-01, -5.000000e-01, 0.000000e+00 + 61, 3.333333e-01, -6.666667e-01, 0.000000e+00 + 62, 3.333333e-01, -8.333333e-01, 0.000000e+00 + 63, 3.333333e-01, -1.000000e+00, 0.000000e+00 + 64, 5.000000e-01, -1.666667e-01, 0.000000e+00 + 65, 5.000000e-01, 4.943962e-17, 0.000000e+00 + 66, 5.000000e-01, -3.333333e-01, 0.000000e+00 + 67, 5.000000e-01, -5.000000e-01, 0.000000e+00 + 68, 5.000000e-01, -6.666667e-01, 0.000000e+00 + 69, 5.000000e-01, -8.333333e-01, 0.000000e+00 + 70, 5.000000e-01, -1.000000e+00, 0.000000e+00 + 71, 6.666667e-01, -1.666667e-01, 0.000000e+00 + 72, 6.666667e-01, 4.705437e-17, 0.000000e+00 + 73, 6.666667e-01, -3.333333e-01, 0.000000e+00 + 74, 6.666667e-01, -5.000000e-01, 0.000000e+00 + 75, 6.666667e-01, -6.666667e-01, 0.000000e+00 + 76, 6.666667e-01, -8.333333e-01, 0.000000e+00 + 77, 6.666667e-01, -1.000000e+00, 0.000000e+00 + 78, 8.333333e-01, -1.666667e-01, 0.000000e+00 + 79, 8.333333e-01, 6.033585e-17, 0.000000e+00 + 80, 8.333333e-01, -3.333333e-01, 0.000000e+00 + 81, 8.333333e-01, -5.000000e-01, 0.000000e+00 + 82, 8.333333e-01, -6.666667e-01, 0.000000e+00 + 83, 8.333333e-01, -8.333333e-01, 0.000000e+00 + 84, 8.333333e-01, -1.000000e+00, 0.000000e+00 + 85, 1.000000e+00, -1.666667e-01, 0.000000e+00 + 86, 1.000000e+00, 0.000000e+00, 0.000000e+00 + 87, 1.000000e+00, -3.333333e-01, 0.000000e+00 + 88, 1.000000e+00, -5.000000e-01, 0.000000e+00 + 89, 1.000000e+00, -6.666667e-01, 0.000000e+00 + 90, 1.000000e+00, -8.333333e-01, 0.000000e+00 + 91, 1.000000e+00, -1.000000e+00, 0.000000e+00 + 92, 1.000000e+00, 1.666667e-01, 0.000000e+00 + 93, 8.333333e-01, 1.666667e-01, 0.000000e+00 + 94, 1.000000e+00, 3.333333e-01, 0.000000e+00 + 95, 8.333333e-01, 3.333333e-01, 0.000000e+00 + 96, 1.000000e+00, 5.000000e-01, 0.000000e+00 + 97, 8.333333e-01, 5.000000e-01, 0.000000e+00 + 98, 1.000000e+00, 6.666667e-01, 0.000000e+00 + 99, 8.333333e-01, 6.666667e-01, 0.000000e+00 + 100, 1.000000e+00, 8.333333e-01, 0.000000e+00 + 101, 8.333333e-01, 8.333333e-01, 0.000000e+00 + 102, 1.000000e+00, 1.000000e+00, 0.000000e+00 + 103, 8.333333e-01, 1.000000e+00, 0.000000e+00 + 104, 6.666667e-01, 1.666667e-01, 0.000000e+00 + 105, 6.666667e-01, 3.333333e-01, 0.000000e+00 + 106, 6.666667e-01, 5.000000e-01, 0.000000e+00 + 107, 6.666667e-01, 6.666667e-01, 0.000000e+00 + 108, 6.666667e-01, 8.333333e-01, 0.000000e+00 + 109, 6.666667e-01, 1.000000e+00, 0.000000e+00 + 110, 5.000000e-01, 1.666667e-01, 0.000000e+00 + 111, 5.000000e-01, 3.333333e-01, 0.000000e+00 + 112, 5.000000e-01, 5.000000e-01, 0.000000e+00 + 113, 5.000000e-01, 6.666667e-01, 0.000000e+00 + 114, 5.000000e-01, 8.333333e-01, 0.000000e+00 + 115, 5.000000e-01, 1.000000e+00, 0.000000e+00 + 116, 3.333333e-01, 1.666667e-01, 0.000000e+00 + 117, 3.333333e-01, 3.333333e-01, 0.000000e+00 + 118, 3.333333e-01, 5.000000e-01, 0.000000e+00 + 119, 3.333333e-01, 6.666667e-01, 0.000000e+00 + 120, 3.333333e-01, 8.333333e-01, 0.000000e+00 + 121, 3.333333e-01, 1.000000e+00, 0.000000e+00 + 122, 1.666667e-01, 1.666667e-01, 0.000000e+00 + 123, 1.666667e-01, 3.333333e-01, 0.000000e+00 + 124, 1.666667e-01, 5.000000e-01, 0.000000e+00 + 125, 1.666667e-01, 6.666667e-01, 0.000000e+00 + 126, 1.666667e-01, 8.333333e-01, 0.000000e+00 + 127, 1.666667e-01, 1.000000e+00, 0.000000e+00 + 128, 0.000000e+00, 1.666667e-01, 0.000000e+00 + 129, 0.000000e+00, 3.333333e-01, 0.000000e+00 + 130, 0.000000e+00, 5.000000e-01, 0.000000e+00 + 131, 0.000000e+00, 6.666667e-01, 0.000000e+00 + 132, 0.000000e+00, 8.333333e-01, 0.000000e+00 + 133, 0.000000e+00, 1.000000e+00, 0.000000e+00 +*ELEMENT, TYPE=S4R, ELSET=EB1 + 1, 1, 2, 3, 4 + 2, 2, 5, 6, 3 + 3, 5, 7, 8, 6 + 4, 7, 9, 10, 8 + 5, 9, 11, 12, 10 + 6, 11, 13, 14, 12 + 7, 4, 3, 15, 16 + 8, 3, 6, 17, 15 + 9, 6, 8, 18, 17 + 10, 8, 10, 19, 18 + 11, 10, 12, 20, 19 + 12, 12, 14, 21, 20 + 13, 16, 15, 22, 23 + 14, 15, 17, 24, 22 + 15, 17, 18, 25, 24 + 16, 18, 19, 26, 25 + 17, 19, 20, 27, 26 + 18, 20, 21, 28, 27 + 19, 23, 22, 29, 30 + 20, 22, 24, 31, 29 + 21, 24, 25, 32, 31 + 22, 25, 26, 33, 32 + 23, 26, 27, 34, 33 + 24, 27, 28, 35, 34 + 25, 30, 29, 36, 37 + 26, 29, 31, 38, 36 + 27, 31, 32, 39, 38 + 28, 32, 33, 40, 39 + 29, 33, 34, 41, 40 + 30, 34, 35, 42, 41 + 31, 37, 36, 43, 44 + 32, 36, 38, 45, 43 + 33, 38, 39, 46, 45 + 34, 39, 40, 47, 46 + 35, 40, 41, 48, 47 + 36, 41, 42, 49, 48 + 37, 44, 43, 50, 51 + 38, 43, 45, 52, 50 + 39, 45, 46, 53, 52 + 40, 46, 47, 54, 53 + 41, 47, 48, 55, 54 + 42, 48, 49, 56, 55 + 43, 51, 50, 57, 58 + 44, 50, 52, 59, 57 + 45, 52, 53, 60, 59 + 46, 53, 54, 61, 60 + 47, 54, 55, 62, 61 + 48, 55, 56, 63, 62 + 49, 58, 57, 64, 65 + 50, 57, 59, 66, 64 + 51, 59, 60, 67, 66 + 52, 60, 61, 68, 67 + 53, 61, 62, 69, 68 + 54, 62, 63, 70, 69 + 55, 65, 64, 71, 72 + 56, 64, 66, 73, 71 + 57, 66, 67, 74, 73 + 58, 67, 68, 75, 74 + 59, 68, 69, 76, 75 + 60, 69, 70, 77, 76 + 61, 72, 71, 78, 79 + 62, 71, 73, 80, 78 + 63, 73, 74, 81, 80 + 64, 74, 75, 82, 81 + 65, 75, 76, 83, 82 + 66, 76, 77, 84, 83 + 67, 79, 78, 85, 86 + 68, 78, 80, 87, 85 + 69, 80, 81, 88, 87 + 70, 81, 82, 89, 88 + 71, 82, 83, 90, 89 + 72, 83, 84, 91, 90 + 73, 86, 92, 93, 79 + 74, 92, 94, 95, 93 + 75, 94, 96, 97, 95 + 76, 96, 98, 99, 97 + 77, 98, 100, 101, 99 + 78, 100, 102, 103, 101 + 79, 79, 93, 104, 72 + 80, 93, 95, 105, 104 + 81, 95, 97, 106, 105 + 82, 97, 99, 107, 106 + 83, 99, 101, 108, 107 + 84, 101, 103, 109, 108 + 85, 72, 104, 110, 65 + 86, 104, 105, 111, 110 + 87, 105, 106, 112, 111 + 88, 106, 107, 113, 112 + 89, 107, 108, 114, 113 + 90, 108, 109, 115, 114 + 91, 65, 110, 116, 58 + 92, 110, 111, 117, 116 + 93, 111, 112, 118, 117 + 94, 112, 113, 119, 118 + 95, 113, 114, 120, 119 + 96, 114, 115, 121, 120 + 97, 58, 116, 122, 51 + 98, 116, 117, 123, 122 + 99, 117, 118, 124, 123 + 100, 118, 119, 125, 124 + 101, 119, 120, 126, 125 + 102, 120, 121, 127, 126 + 103, 51, 122, 128, 44 + 104, 122, 123, 129, 128 + 105, 123, 124, 130, 129 + 106, 124, 125, 131, 130 + 107, 125, 126, 132, 131 + 108, 126, 127, 133, 132 +*SURFACE, NAME=SS1 + 78, E2 + 84, E2 + 90, E2 + 96, E2 + 102, E2 + 108, E2 diff --git a/deal.II/tests/mesh_converter/meshes/2d/2d_test.ucd b/deal.II/tests/mesh_converter/meshes/2d/2d_test.ucd new file mode 100644 index 0000000000..9f67909260 --- /dev/null +++ b/deal.II/tests/mesh_converter/meshes/2d/2d_test.ucd @@ -0,0 +1,251 @@ +# FEM Mesh Converter +# Mesh type: AVS UCD + +133 114 0 0 0 +1 -1.00000000e+00 0.00000000e+00 0.00000000e+00 +2 -1.00000000e+00 -1.66666700e-01 0.00000000e+00 +3 -8.33333300e-01 -1.66666700e-01 0.00000000e+00 +4 -8.33333300e-01 0.00000000e+00 0.00000000e+00 +5 -1.00000000e+00 -3.33333300e-01 0.00000000e+00 +6 -8.33333300e-01 -3.33333300e-01 0.00000000e+00 +7 -1.00000000e+00 -5.00000000e-01 0.00000000e+00 +8 -8.33333300e-01 -5.00000000e-01 0.00000000e+00 +9 -1.00000000e+00 -6.66666700e-01 0.00000000e+00 +10 -8.33333300e-01 -6.66666700e-01 0.00000000e+00 +11 -1.00000000e+00 -8.33333300e-01 0.00000000e+00 +12 -8.33333300e-01 -8.33333300e-01 0.00000000e+00 +13 -1.00000000e+00 -1.00000000e+00 0.00000000e+00 +14 -8.33333300e-01 -1.00000000e+00 0.00000000e+00 +15 -6.66666700e-01 -1.66666700e-01 0.00000000e+00 +16 -6.66666700e-01 0.00000000e+00 0.00000000e+00 +17 -6.66666700e-01 -3.33333300e-01 0.00000000e+00 +18 -6.66666700e-01 -5.00000000e-01 0.00000000e+00 +19 -6.66666700e-01 -6.66666700e-01 0.00000000e+00 +20 -6.66666700e-01 -8.33333300e-01 0.00000000e+00 +21 -6.66666700e-01 -1.00000000e+00 0.00000000e+00 +22 -5.00000000e-01 -1.66666700e-01 0.00000000e+00 +23 -5.00000000e-01 0.00000000e+00 0.00000000e+00 +24 -5.00000000e-01 -3.33333300e-01 0.00000000e+00 +25 -5.00000000e-01 -5.00000000e-01 0.00000000e+00 +26 -5.00000000e-01 -6.66666700e-01 0.00000000e+00 +27 -5.00000000e-01 -8.33333300e-01 0.00000000e+00 +28 -5.00000000e-01 -1.00000000e+00 0.00000000e+00 +29 -3.33333300e-01 -1.66666700e-01 0.00000000e+00 +30 -3.33333300e-01 0.00000000e+00 0.00000000e+00 +31 -3.33333300e-01 -3.33333300e-01 0.00000000e+00 +32 -3.33333300e-01 -5.00000000e-01 0.00000000e+00 +33 -3.33333300e-01 -6.66666700e-01 0.00000000e+00 +34 -3.33333300e-01 -8.33333300e-01 0.00000000e+00 +35 -3.33333300e-01 -1.00000000e+00 0.00000000e+00 +36 -1.66666700e-01 -1.66666700e-01 0.00000000e+00 +37 -1.66666700e-01 0.00000000e+00 0.00000000e+00 +38 -1.66666700e-01 -3.33333300e-01 0.00000000e+00 +39 -1.66666700e-01 -5.00000000e-01 0.00000000e+00 +40 -1.66666700e-01 -6.66666700e-01 0.00000000e+00 +41 -1.66666700e-01 -8.33333300e-01 0.00000000e+00 +42 -1.66666700e-01 -1.00000000e+00 0.00000000e+00 +43 0.00000000e+00 -1.66666700e-01 0.00000000e+00 +44 0.00000000e+00 0.00000000e+00 0.00000000e+00 +45 0.00000000e+00 -3.33333300e-01 0.00000000e+00 +46 0.00000000e+00 -5.00000000e-01 0.00000000e+00 +47 0.00000000e+00 -6.66666700e-01 0.00000000e+00 +48 0.00000000e+00 -8.33333300e-01 0.00000000e+00 +49 0.00000000e+00 -1.00000000e+00 0.00000000e+00 +50 1.66666700e-01 -1.66666700e-01 0.00000000e+00 +51 1.66666700e-01 0.00000000e+00 0.00000000e+00 +52 1.66666700e-01 -3.33333300e-01 0.00000000e+00 +53 1.66666700e-01 -5.00000000e-01 0.00000000e+00 +54 1.66666700e-01 -6.66666700e-01 0.00000000e+00 +55 1.66666700e-01 -8.33333300e-01 0.00000000e+00 +56 1.66666700e-01 -1.00000000e+00 0.00000000e+00 +57 3.33333300e-01 -1.66666700e-01 0.00000000e+00 +58 3.33333300e-01 0.00000000e+00 0.00000000e+00 +59 3.33333300e-01 -3.33333300e-01 0.00000000e+00 +60 3.33333300e-01 -5.00000000e-01 0.00000000e+00 +61 3.33333300e-01 -6.66666700e-01 0.00000000e+00 +62 3.33333300e-01 -8.33333300e-01 0.00000000e+00 +63 3.33333300e-01 -1.00000000e+00 0.00000000e+00 +64 5.00000000e-01 -1.66666700e-01 0.00000000e+00 +65 5.00000000e-01 0.00000000e+00 0.00000000e+00 +66 5.00000000e-01 -3.33333300e-01 0.00000000e+00 +67 5.00000000e-01 -5.00000000e-01 0.00000000e+00 +68 5.00000000e-01 -6.66666700e-01 0.00000000e+00 +69 5.00000000e-01 -8.33333300e-01 0.00000000e+00 +70 5.00000000e-01 -1.00000000e+00 0.00000000e+00 +71 6.66666700e-01 -1.66666700e-01 0.00000000e+00 +72 6.66666700e-01 0.00000000e+00 0.00000000e+00 +73 6.66666700e-01 -3.33333300e-01 0.00000000e+00 +74 6.66666700e-01 -5.00000000e-01 0.00000000e+00 +75 6.66666700e-01 -6.66666700e-01 0.00000000e+00 +76 6.66666700e-01 -8.33333300e-01 0.00000000e+00 +77 6.66666700e-01 -1.00000000e+00 0.00000000e+00 +78 8.33333300e-01 -1.66666700e-01 0.00000000e+00 +79 8.33333300e-01 0.00000000e+00 0.00000000e+00 +80 8.33333300e-01 -3.33333300e-01 0.00000000e+00 +81 8.33333300e-01 -5.00000000e-01 0.00000000e+00 +82 8.33333300e-01 -6.66666700e-01 0.00000000e+00 +83 8.33333300e-01 -8.33333300e-01 0.00000000e+00 +84 8.33333300e-01 -1.00000000e+00 0.00000000e+00 +85 1.00000000e+00 -1.66666700e-01 0.00000000e+00 +86 1.00000000e+00 0.00000000e+00 0.00000000e+00 +87 1.00000000e+00 -3.33333300e-01 0.00000000e+00 +88 1.00000000e+00 -5.00000000e-01 0.00000000e+00 +89 1.00000000e+00 -6.66666700e-01 0.00000000e+00 +90 1.00000000e+00 -8.33333300e-01 0.00000000e+00 +91 1.00000000e+00 -1.00000000e+00 0.00000000e+00 +92 1.00000000e+00 1.66666700e-01 0.00000000e+00 +93 8.33333300e-01 1.66666700e-01 0.00000000e+00 +94 1.00000000e+00 3.33333300e-01 0.00000000e+00 +95 8.33333300e-01 3.33333300e-01 0.00000000e+00 +96 1.00000000e+00 5.00000000e-01 0.00000000e+00 +97 8.33333300e-01 5.00000000e-01 0.00000000e+00 +98 1.00000000e+00 6.66666700e-01 0.00000000e+00 +99 8.33333300e-01 6.66666700e-01 0.00000000e+00 +100 1.00000000e+00 8.33333300e-01 0.00000000e+00 +101 8.33333300e-01 8.33333300e-01 0.00000000e+00 +102 1.00000000e+00 1.00000000e+00 0.00000000e+00 +103 8.33333300e-01 1.00000000e+00 0.00000000e+00 +104 6.66666700e-01 1.66666700e-01 0.00000000e+00 +105 6.66666700e-01 3.33333300e-01 0.00000000e+00 +106 6.66666700e-01 5.00000000e-01 0.00000000e+00 +107 6.66666700e-01 6.66666700e-01 0.00000000e+00 +108 6.66666700e-01 8.33333300e-01 0.00000000e+00 +109 6.66666700e-01 1.00000000e+00 0.00000000e+00 +110 5.00000000e-01 1.66666700e-01 0.00000000e+00 +111 5.00000000e-01 3.33333300e-01 0.00000000e+00 +112 5.00000000e-01 5.00000000e-01 0.00000000e+00 +113 5.00000000e-01 6.66666700e-01 0.00000000e+00 +114 5.00000000e-01 8.33333300e-01 0.00000000e+00 +115 5.00000000e-01 1.00000000e+00 0.00000000e+00 +116 3.33333300e-01 1.66666700e-01 0.00000000e+00 +117 3.33333300e-01 3.33333300e-01 0.00000000e+00 +118 3.33333300e-01 5.00000000e-01 0.00000000e+00 +119 3.33333300e-01 6.66666700e-01 0.00000000e+00 +120 3.33333300e-01 8.33333300e-01 0.00000000e+00 +121 3.33333300e-01 1.00000000e+00 0.00000000e+00 +122 1.66666700e-01 1.66666700e-01 0.00000000e+00 +123 1.66666700e-01 3.33333300e-01 0.00000000e+00 +124 1.66666700e-01 5.00000000e-01 0.00000000e+00 +125 1.66666700e-01 6.66666700e-01 0.00000000e+00 +126 1.66666700e-01 8.33333300e-01 0.00000000e+00 +127 1.66666700e-01 1.00000000e+00 0.00000000e+00 +128 0.00000000e+00 1.66666700e-01 0.00000000e+00 +129 0.00000000e+00 3.33333300e-01 0.00000000e+00 +130 0.00000000e+00 5.00000000e-01 0.00000000e+00 +131 0.00000000e+00 6.66666700e-01 0.00000000e+00 +132 0.00000000e+00 8.33333300e-01 0.00000000e+00 +133 0.00000000e+00 1.00000000e+00 0.00000000e+00 +1 1 quad 1 2 3 4 +2 1 quad 2 5 6 3 +3 1 quad 5 7 8 6 +4 1 quad 7 9 10 8 +5 1 quad 9 11 12 10 +6 1 quad 11 13 14 12 +7 1 quad 4 3 15 16 +8 1 quad 3 6 17 15 +9 1 quad 6 8 18 17 +10 1 quad 8 10 19 18 +11 1 quad 10 12 20 19 +12 1 quad 12 14 21 20 +13 1 quad 16 15 22 23 +14 1 quad 15 17 24 22 +15 1 quad 17 18 25 24 +16 1 quad 18 19 26 25 +17 1 quad 19 20 27 26 +18 1 quad 20 21 28 27 +19 1 quad 23 22 29 30 +20 1 quad 22 24 31 29 +21 1 quad 24 25 32 31 +22 1 quad 25 26 33 32 +23 1 quad 26 27 34 33 +24 1 quad 27 28 35 34 +25 1 quad 30 29 36 37 +26 1 quad 29 31 38 36 +27 1 quad 31 32 39 38 +28 1 quad 32 33 40 39 +29 1 quad 33 34 41 40 +30 1 quad 34 35 42 41 +31 1 quad 37 36 43 44 +32 1 quad 36 38 45 43 +33 1 quad 38 39 46 45 +34 1 quad 39 40 47 46 +35 1 quad 40 41 48 47 +36 1 quad 41 42 49 48 +37 1 quad 44 43 50 51 +38 1 quad 43 45 52 50 +39 1 quad 45 46 53 52 +40 1 quad 46 47 54 53 +41 1 quad 47 48 55 54 +42 1 quad 48 49 56 55 +43 1 quad 51 50 57 58 +44 1 quad 50 52 59 57 +45 1 quad 52 53 60 59 +46 1 quad 53 54 61 60 +47 1 quad 54 55 62 61 +48 1 quad 55 56 63 62 +49 1 quad 58 57 64 65 +50 1 quad 57 59 66 64 +51 1 quad 59 60 67 66 +52 1 quad 60 61 68 67 +53 1 quad 61 62 69 68 +54 1 quad 62 63 70 69 +55 1 quad 65 64 71 72 +56 1 quad 64 66 73 71 +57 1 quad 66 67 74 73 +58 1 quad 67 68 75 74 +59 1 quad 68 69 76 75 +60 1 quad 69 70 77 76 +61 1 quad 72 71 78 79 +62 1 quad 71 73 80 78 +63 1 quad 73 74 81 80 +64 1 quad 74 75 82 81 +65 1 quad 75 76 83 82 +66 1 quad 76 77 84 83 +67 1 quad 79 78 85 86 +68 1 quad 78 80 87 85 +69 1 quad 80 81 88 87 +70 1 quad 81 82 89 88 +71 1 quad 82 83 90 89 +72 1 quad 83 84 91 90 +73 1 quad 86 92 93 79 +74 1 quad 92 94 95 93 +75 1 quad 94 96 97 95 +76 1 quad 96 98 99 97 +77 1 quad 98 100 101 99 +78 1 quad 100 102 103 101 +79 1 quad 79 93 104 72 +80 1 quad 93 95 105 104 +81 1 quad 95 97 106 105 +82 1 quad 97 99 107 106 +83 1 quad 99 101 108 107 +84 1 quad 101 103 109 108 +85 1 quad 72 104 110 65 +86 1 quad 104 105 111 110 +87 1 quad 105 106 112 111 +88 1 quad 106 107 113 112 +89 1 quad 107 108 114 113 +90 1 quad 108 109 115 114 +91 1 quad 65 110 116 58 +92 1 quad 110 111 117 116 +93 1 quad 111 112 118 117 +94 1 quad 112 113 119 118 +95 1 quad 113 114 120 119 +96 1 quad 114 115 121 120 +97 1 quad 58 116 122 51 +98 1 quad 116 117 123 122 +99 1 quad 117 118 124 123 +100 1 quad 118 119 125 124 +101 1 quad 119 120 126 125 +102 1 quad 120 121 127 126 +103 1 quad 51 122 128 44 +104 1 quad 122 123 129 128 +105 1 quad 123 124 130 129 +106 1 quad 124 125 131 130 +107 1 quad 125 126 132 131 +108 1 quad 126 127 133 132 +1 1 line 102 103 +2 1 line 103 109 +3 1 line 109 115 +4 1 line 115 121 +5 1 line 121 127 +6 1 line 127 133 diff --git a/deal.II/tests/mesh_converter/meshes/2d/quad.cub b/deal.II/tests/mesh_converter/meshes/2d/quad.cub new file mode 100644 index 0000000000..71a08291db Binary files /dev/null and b/deal.II/tests/mesh_converter/meshes/2d/quad.cub differ diff --git a/deal.II/tests/mesh_converter/meshes/2d/quad.inp b/deal.II/tests/mesh_converter/meshes/2d/quad.inp new file mode 100644 index 0000000000..e809a6dafc --- /dev/null +++ b/deal.II/tests/mesh_converter/meshes/2d/quad.inp @@ -0,0 +1,29 @@ +*HEADING +cubit(y/Work/Current/Geometry/Mesh_conversion/mesh/2d/quad2.inp): 01/17/2009: 13 +*NODE + 1, -1.000000e+00, 1.000000e+00, 0.000000e+00 + 2, -1.000000e+00, 0.000000e+00, 0.000000e+00 + 3, 0.000000e+00, 0.000000e+00, 0.000000e+00 + 4, 0.000000e+00, 1.000000e+00, 0.000000e+00 + 5, -1.000000e+00, -1.000000e+00, 0.000000e+00 + 6, 0.000000e+00, -1.000000e+00, 0.000000e+00 + 7, 1.000000e+00, 0.000000e+00, 0.000000e+00 + 8, 1.000000e+00, 1.000000e+00, 0.000000e+00 + 9, 1.000000e+00, -1.000000e+00, 0.000000e+00 +*ELEMENT, TYPE=S4R, ELSET=EB14 + 1, 1, 2, 3, 4 + 2, 2, 5, 6, 3 + 3, 4, 3, 7, 8 + 4, 3, 6, 9, 7 +*SURFACE, NAME=SS1 + 1, E1 + 2, E1 +*SURFACE, NAME=SS2 + 2, E2 + 4, E2 +*SURFACE, NAME=SS3 + 3, E3 + 4, E3 +*SURFACE, NAME=SS4 + 1, E4 + 3, E4 diff --git a/deal.II/tests/mesh_converter/meshes/2d/quad.ucd b/deal.II/tests/mesh_converter/meshes/2d/quad.ucd new file mode 100644 index 0000000000..e7f13e28f2 --- /dev/null +++ b/deal.II/tests/mesh_converter/meshes/2d/quad.ucd @@ -0,0 +1,25 @@ +# FEM Mesh Converter +# Mesh type: AVS UCD + +9 12 0 0 0 +1 -1.00000000e+00 1.00000000e+00 0.00000000e+00 +2 -1.00000000e+00 0.00000000e+00 0.00000000e+00 +3 0.00000000e+00 0.00000000e+00 0.00000000e+00 +4 0.00000000e+00 1.00000000e+00 0.00000000e+00 +5 -1.00000000e+00 -1.00000000e+00 0.00000000e+00 +6 0.00000000e+00 -1.00000000e+00 0.00000000e+00 +7 1.00000000e+00 0.00000000e+00 0.00000000e+00 +8 1.00000000e+00 1.00000000e+00 0.00000000e+00 +9 1.00000000e+00 -1.00000000e+00 0.00000000e+00 +1 14 quad 1 2 3 4 +2 14 quad 2 5 6 3 +3 14 quad 4 3 7 8 +4 14 quad 3 6 9 7 +1 1 line 1 2 +2 1 line 2 5 +3 2 line 5 6 +4 2 line 6 9 +5 3 line 7 8 +6 3 line 9 7 +7 4 line 4 1 +8 4 line 8 4 diff --git a/deal.II/tests/mesh_converter/meshes/3d/CC.cub b/deal.II/tests/mesh_converter/meshes/3d/CC.cub new file mode 100644 index 0000000000..4cb63d111b Binary files /dev/null and b/deal.II/tests/mesh_converter/meshes/3d/CC.cub differ diff --git a/deal.II/tests/mesh_converter/meshes/3d/CC_cubit_new.inp b/deal.II/tests/mesh_converter/meshes/3d/CC_cubit_new.inp new file mode 100644 index 0000000000..0bb7fa5cb2 --- /dev/null +++ b/deal.II/tests/mesh_converter/meshes/3d/CC_cubit_new.inp @@ -0,0 +1,157 @@ +*HEADING +cubit(y/Mesh_conversion/Mesh_conversion_update/mesh/CC_TEMP.inp): 06/13/2010: 18 +version: 12.1 +** +********************************** P A R T S ********************************** +*PART, NAME=Part-Default +** +********************************** N O D E S ********************************** +*NODE, NSET=ALLNODES + 1, -5.000000e-01, -5.000000e-01, 5.000000e-01 + 2, -5.000000e-01, -5.000000e-01, 0.000000e+00 + 3, -5.000000e-01, 0.000000e+00, 0.000000e+00 + 4, -5.000000e-01, 0.000000e+00, 5.000000e-01 + 5, 0.000000e+00, -5.000000e-01, 5.000000e-01 + 6, 0.000000e+00, -5.000000e-01, 0.000000e+00 + 7, 0.000000e+00, 0.000000e+00, 0.000000e+00 + 8, 0.000000e+00, 0.000000e+00, 5.000000e-01 + 9, -5.000000e-01, -5.000000e-01, -5.000000e-01 + 10, -5.000000e-01, 0.000000e+00, -5.000000e-01 + 11, 0.000000e+00, -5.000000e-01, -5.000000e-01 + 12, 0.000000e+00, 0.000000e+00, -5.000000e-01 + 13, -5.000000e-01, 5.000000e-01, 0.000000e+00 + 14, -5.000000e-01, 5.000000e-01, 5.000000e-01 + 15, 0.000000e+00, 5.000000e-01, 0.000000e+00 + 16, 0.000000e+00, 5.000000e-01, 5.000000e-01 + 17, -5.000000e-01, 5.000000e-01, -5.000000e-01 + 18, 0.000000e+00, 5.000000e-01, -5.000000e-01 + 19, 5.000000e-01, -5.000000e-01, 5.000000e-01 + 20, 5.000000e-01, -5.000000e-01, 0.000000e+00 + 21, 5.000000e-01, 0.000000e+00, 0.000000e+00 + 22, 5.000000e-01, 0.000000e+00, 5.000000e-01 + 23, 5.000000e-01, -5.000000e-01, -5.000000e-01 + 24, 5.000000e-01, 0.000000e+00, -5.000000e-01 + 25, 5.000000e-01, 5.000000e-01, 0.000000e+00 + 26, 5.000000e-01, 5.000000e-01, 5.000000e-01 + 27, 5.000000e-01, 5.000000e-01, -5.000000e-01 + 28, -2.500000e-01, 5.500000e-01, 2.500000e-01 + 29, -2.500000e-01, 5.500000e-01, 0.000000e+00 + 30, -2.500000e-01, 8.000000e-01, 0.000000e+00 + 31, -2.500000e-01, 8.000000e-01, 2.500000e-01 + 32, 0.000000e+00, 5.500000e-01, 2.500000e-01 + 33, 0.000000e+00, 5.500000e-01, 0.000000e+00 + 34, 0.000000e+00, 8.000000e-01, 0.000000e+00 + 35, 0.000000e+00, 8.000000e-01, 2.500000e-01 + 36, -2.500000e-01, 5.500000e-01, -2.500000e-01 + 37, -2.500000e-01, 8.000000e-01, -2.500000e-01 + 38, 0.000000e+00, 5.500000e-01, -2.500000e-01 + 39, 0.000000e+00, 8.000000e-01, -2.500000e-01 + 40, -2.500000e-01, 1.050000e+00, 0.000000e+00 + 41, -2.500000e-01, 1.050000e+00, 2.500000e-01 + 42, 0.000000e+00, 1.050000e+00, 0.000000e+00 + 43, 0.000000e+00, 1.050000e+00, 2.500000e-01 + 44, -2.500000e-01, 1.050000e+00, -2.500000e-01 + 45, 0.000000e+00, 1.050000e+00, -2.500000e-01 + 46, 2.500000e-01, 5.500000e-01, 2.500000e-01 + 47, 2.500000e-01, 5.500000e-01, 0.000000e+00 + 48, 2.500000e-01, 8.000000e-01, 0.000000e+00 + 49, 2.500000e-01, 8.000000e-01, 2.500000e-01 + 50, 2.500000e-01, 5.500000e-01, -2.500000e-01 + 51, 2.500000e-01, 8.000000e-01, -2.500000e-01 + 52, 2.500000e-01, 1.050000e+00, 0.000000e+00 + 53, 2.500000e-01, 1.050000e+00, 2.500000e-01 + 54, 2.500000e-01, 1.050000e+00, -2.500000e-01 +** +********************************** E L E M E N T S **************************** +*ELEMENT, TYPE=C3D8R, ELSET=EB1 + 1, 1, 2, 3, 4, 5, 6, 7, 8 + 2, 2, 9, 10, 3, 6, 11, 12, 7 + 3, 4, 3, 13, 14, 8, 7, 15, 16 + 4, 3, 10, 17, 13, 7, 12, 18, 15 + 5, 5, 6, 7, 8, 19, 20, 21, 22 + 6, 6, 11, 12, 7, 20, 23, 24, 21 + 7, 8, 7, 15, 16, 22, 21, 25, 26 + 8, 7, 12, 18, 15, 21, 24, 27, 25 +*ELEMENT, TYPE=C3D8R, ELSET=EB2 + 9, 28, 29, 30, 31, 32, 33, 34, 35 + 10, 29, 36, 37, 30, 33, 38, 39, 34 + 11, 31, 30, 40, 41, 35, 34, 42, 43 + 12, 30, 37, 44, 40, 34, 39, 45, 42 + 13, 32, 33, 34, 35, 46, 47, 48, 49 + 14, 33, 38, 39, 34, 47, 50, 51, 48 + 15, 35, 34, 42, 43, 49, 48, 52, 53 + 16, 34, 39, 45, 42, 48, 51, 54, 52 +** +********************************** S I D E S E T S ********************************** +*ELSET, ELSET=SS1_S3 + 1, 2, 5, 6 +*SURFACE, NAME=SS1 +SS1_S3, S3 +*ELSET, ELSET=SS2_S4 + 10, 12, 14, 16, 2, 4, 6, 8 +*SURFACE, NAME=SS2 +SS2_S4, S4 +*ELSET, ELSET=SS3_S1 + 9, 10, 11, 12, 1, 2, 3, 4 +*SURFACE, NAME=SS3 +SS3_S1, S1 +*ELSET, ELSET=SS4_S5 + 11, 12, 15, 16 +*SURFACE, NAME=SS4 +SS4_S5, S5 +*ELSET, ELSET=SS100_S5 + 3, 4, 7, 8 +*SURFACE, NAME=SS100 +SS100_S5, S5 +*ELSET, ELSET=SS101_S3 + 9, 10, 13, 14 +*SURFACE, NAME=SS101 +SS101_S3, S3 +** +********************************** P R O P E R T I E S ************************ +*SOLID SECTION, ELSET=EB1, MATERIAL=Default-Steel +*SOLID SECTION, ELSET=EB2, MATERIAL=Default-Steel +** +*END PART +** +** +** +********************************** E N D P A R T S ********************************** +** +** +********************************** A S S E M B L Y ************************************ +** +*ASSEMBLY, NAME=ASSEMBLY1 +** +*INSTANCE, NAME=Part-Default_1, PART=Part-Default +*END INSTANCE +** +*END ASSEMBLY +** +** +** +*MATERIAL, NAME = Default-Steel +*ELASTIC, TYPE=ISOTROPIC +2.068000e+05, 2.900000e-01 +*DENSITY +7.000000e-06 +*CONDUCTIVITY,TYPE=ISO +4.500000e-02 +*SPECIFIC HEAT +5.000000e+02 +** +** +************************************** H I S T O R Y ************************************* +** +*PREPRINT +** +**************************************** S T E P 1 *************************************** +*STEP,INC=100,NAME=Default Set +** +*STATIC +1, 1, 1e-05, 1 +** +** +** +** +*END STEP diff --git a/deal.II/tests/mesh_converter/meshes/3d/CC_cubit_new.ucd b/deal.II/tests/mesh_converter/meshes/3d/CC_cubit_new.ucd new file mode 100644 index 0000000000..bc68a51d6d --- /dev/null +++ b/deal.II/tests/mesh_converter/meshes/3d/CC_cubit_new.ucd @@ -0,0 +1,106 @@ +# FEM Mesh Converter +# Mesh type: AVS UCD + +54 48 0 0 0 +1 -5.00000000e-01 -5.00000000e-01 5.00000000e-01 +2 -5.00000000e-01 -5.00000000e-01 0.00000000e+00 +3 -5.00000000e-01 0.00000000e+00 0.00000000e+00 +4 -5.00000000e-01 0.00000000e+00 5.00000000e-01 +5 0.00000000e+00 -5.00000000e-01 5.00000000e-01 +6 0.00000000e+00 -5.00000000e-01 0.00000000e+00 +7 0.00000000e+00 0.00000000e+00 0.00000000e+00 +8 0.00000000e+00 0.00000000e+00 5.00000000e-01 +9 -5.00000000e-01 -5.00000000e-01 -5.00000000e-01 +10 -5.00000000e-01 0.00000000e+00 -5.00000000e-01 +11 0.00000000e+00 -5.00000000e-01 -5.00000000e-01 +12 0.00000000e+00 0.00000000e+00 -5.00000000e-01 +13 -5.00000000e-01 5.00000000e-01 0.00000000e+00 +14 -5.00000000e-01 5.00000000e-01 5.00000000e-01 +15 0.00000000e+00 5.00000000e-01 0.00000000e+00 +16 0.00000000e+00 5.00000000e-01 5.00000000e-01 +17 -5.00000000e-01 5.00000000e-01 -5.00000000e-01 +18 0.00000000e+00 5.00000000e-01 -5.00000000e-01 +19 5.00000000e-01 -5.00000000e-01 5.00000000e-01 +20 5.00000000e-01 -5.00000000e-01 0.00000000e+00 +21 5.00000000e-01 0.00000000e+00 0.00000000e+00 +22 5.00000000e-01 0.00000000e+00 5.00000000e-01 +23 5.00000000e-01 -5.00000000e-01 -5.00000000e-01 +24 5.00000000e-01 0.00000000e+00 -5.00000000e-01 +25 5.00000000e-01 5.00000000e-01 0.00000000e+00 +26 5.00000000e-01 5.00000000e-01 5.00000000e-01 +27 5.00000000e-01 5.00000000e-01 -5.00000000e-01 +28 -2.50000000e-01 5.50000000e-01 2.50000000e-01 +29 -2.50000000e-01 5.50000000e-01 0.00000000e+00 +30 -2.50000000e-01 8.00000000e-01 0.00000000e+00 +31 -2.50000000e-01 8.00000000e-01 2.50000000e-01 +32 0.00000000e+00 5.50000000e-01 2.50000000e-01 +33 0.00000000e+00 5.50000000e-01 0.00000000e+00 +34 0.00000000e+00 8.00000000e-01 0.00000000e+00 +35 0.00000000e+00 8.00000000e-01 2.50000000e-01 +36 -2.50000000e-01 5.50000000e-01 -2.50000000e-01 +37 -2.50000000e-01 8.00000000e-01 -2.50000000e-01 +38 0.00000000e+00 5.50000000e-01 -2.50000000e-01 +39 0.00000000e+00 8.00000000e-01 -2.50000000e-01 +40 -2.50000000e-01 1.05000000e+00 0.00000000e+00 +41 -2.50000000e-01 1.05000000e+00 2.50000000e-01 +42 0.00000000e+00 1.05000000e+00 0.00000000e+00 +43 0.00000000e+00 1.05000000e+00 2.50000000e-01 +44 -2.50000000e-01 1.05000000e+00 -2.50000000e-01 +45 0.00000000e+00 1.05000000e+00 -2.50000000e-01 +46 2.50000000e-01 5.50000000e-01 2.50000000e-01 +47 2.50000000e-01 5.50000000e-01 0.00000000e+00 +48 2.50000000e-01 8.00000000e-01 0.00000000e+00 +49 2.50000000e-01 8.00000000e-01 2.50000000e-01 +50 2.50000000e-01 5.50000000e-01 -2.50000000e-01 +51 2.50000000e-01 8.00000000e-01 -2.50000000e-01 +52 2.50000000e-01 1.05000000e+00 0.00000000e+00 +53 2.50000000e-01 1.05000000e+00 2.50000000e-01 +54 2.50000000e-01 1.05000000e+00 -2.50000000e-01 +1 1 hex 1 2 3 4 5 6 7 8 +2 1 hex 2 9 10 3 6 11 12 7 +3 1 hex 4 3 13 14 8 7 15 16 +4 1 hex 3 10 17 13 7 12 18 15 +5 1 hex 5 6 7 8 19 20 21 22 +6 1 hex 6 11 12 7 20 23 24 21 +7 1 hex 8 7 15 16 22 21 25 26 +8 1 hex 7 12 18 15 21 24 27 25 +9 2 hex 28 29 30 31 32 33 34 35 +10 2 hex 29 36 37 30 33 38 39 34 +11 2 hex 31 30 40 41 35 34 42 43 +12 2 hex 30 37 44 40 34 39 45 42 +13 2 hex 32 33 34 35 46 47 48 49 +14 2 hex 33 38 39 34 47 50 51 48 +15 2 hex 35 34 42 43 49 48 52 53 +16 2 hex 34 39 45 42 48 51 54 52 +1 1 quad 1 2 6 5 +2 1 quad 2 9 11 6 +3 1 quad 5 6 20 19 +4 1 quad 6 11 23 20 +5 2 quad 36 37 39 38 +6 2 quad 37 44 45 39 +7 2 quad 38 39 51 50 +8 2 quad 39 45 54 51 +9 2 quad 9 10 12 11 +10 2 quad 10 17 18 12 +11 2 quad 11 12 24 23 +12 2 quad 12 18 27 24 +13 3 quad 28 31 30 29 +14 3 quad 29 30 37 36 +15 3 quad 31 41 40 30 +16 3 quad 30 40 44 37 +17 3 quad 1 4 3 2 +18 3 quad 2 3 10 9 +19 3 quad 4 14 13 3 +20 3 quad 3 13 17 10 +21 4 quad 40 41 43 42 +22 4 quad 44 40 42 45 +23 4 quad 42 43 53 52 +24 4 quad 45 42 52 54 +25 100 quad 13 14 16 15 +26 100 quad 17 13 15 18 +27 100 quad 15 16 26 25 +28 100 quad 18 15 25 27 +29 101 quad 28 29 33 32 +30 101 quad 29 36 38 33 +31 101 quad 32 33 47 46 +32 101 quad 33 38 50 47 diff --git a/deal.II/tests/mesh_converter/meshes/3d/CC_cubit_old.inp b/deal.II/tests/mesh_converter/meshes/3d/CC_cubit_old.inp new file mode 100644 index 0000000000..7d2752f363 --- /dev/null +++ b/deal.II/tests/mesh_converter/meshes/3d/CC_cubit_old.inp @@ -0,0 +1,113 @@ +*HEADING +cubit(rrent/Geometry/Contact_test/contact_cubes_NTS_1_close.inp): 03/31/2010: 17 +*NODE + 1, -5.000000e-01, -5.000000e-01, 5.000000e-01 + 2, -5.000000e-01, -5.000000e-01, 0.000000e+00 + 3, -5.000000e-01, 0.000000e+00, 0.000000e+00 + 4, -5.000000e-01, 0.000000e+00, 5.000000e-01 + 5, 0.000000e+00, -5.000000e-01, 5.000000e-01 + 6, 0.000000e+00, -5.000000e-01, 0.000000e+00 + 7, 0.000000e+00, 0.000000e+00, 0.000000e+00 + 8, 0.000000e+00, 0.000000e+00, 5.000000e-01 + 9, -5.000000e-01, -5.000000e-01, -5.000000e-01 + 10, -5.000000e-01, 0.000000e+00, -5.000000e-01 + 11, 0.000000e+00, -5.000000e-01, -5.000000e-01 + 12, 0.000000e+00, 0.000000e+00, -5.000000e-01 + 13, -5.000000e-01, 5.000000e-01, 0.000000e+00 + 14, -5.000000e-01, 5.000000e-01, 5.000000e-01 + 15, 0.000000e+00, 5.000000e-01, 0.000000e+00 + 16, 0.000000e+00, 5.000000e-01, 5.000000e-01 + 17, -5.000000e-01, 5.000000e-01, -5.000000e-01 + 18, 0.000000e+00, 5.000000e-01, -5.000000e-01 + 19, 5.000000e-01, -5.000000e-01, 5.000000e-01 + 20, 5.000000e-01, -5.000000e-01, 0.000000e+00 + 21, 5.000000e-01, 0.000000e+00, 0.000000e+00 + 22, 5.000000e-01, 0.000000e+00, 5.000000e-01 + 23, 5.000000e-01, -5.000000e-01, -5.000000e-01 + 24, 5.000000e-01, 0.000000e+00, -5.000000e-01 + 25, 5.000000e-01, 5.000000e-01, 0.000000e+00 + 26, 5.000000e-01, 5.000000e-01, 5.000000e-01 + 27, 5.000000e-01, 5.000000e-01, -5.000000e-01 + 28, -2.500000e-01, 5.500000e-01, 2.500000e-01 + 29, -2.500000e-01, 5.500000e-01, 0.000000e+00 + 30, -2.500000e-01, 8.000000e-01, 0.000000e+00 + 31, -2.500000e-01, 8.000000e-01, 2.500000e-01 + 32, 0.000000e+00, 5.500000e-01, 2.500000e-01 + 33, 0.000000e+00, 5.500000e-01, 0.000000e+00 + 34, 0.000000e+00, 8.000000e-01, 0.000000e+00 + 35, 0.000000e+00, 8.000000e-01, 2.500000e-01 + 36, -2.500000e-01, 5.500000e-01, -2.500000e-01 + 37, -2.500000e-01, 8.000000e-01, -2.500000e-01 + 38, 0.000000e+00, 5.500000e-01, -2.500000e-01 + 39, 0.000000e+00, 8.000000e-01, -2.500000e-01 + 40, -2.500000e-01, 1.050000e+00, 0.000000e+00 + 41, -2.500000e-01, 1.050000e+00, 2.500000e-01 + 42, 0.000000e+00, 1.050000e+00, 0.000000e+00 + 43, 0.000000e+00, 1.050000e+00, 2.500000e-01 + 44, -2.500000e-01, 1.050000e+00, -2.500000e-01 + 45, 0.000000e+00, 1.050000e+00, -2.500000e-01 + 46, 2.500000e-01, 5.500000e-01, 2.500000e-01 + 47, 2.500000e-01, 5.500000e-01, 0.000000e+00 + 48, 2.500000e-01, 8.000000e-01, 0.000000e+00 + 49, 2.500000e-01, 8.000000e-01, 2.500000e-01 + 50, 2.500000e-01, 5.500000e-01, -2.500000e-01 + 51, 2.500000e-01, 8.000000e-01, -2.500000e-01 + 52, 2.500000e-01, 1.050000e+00, 0.000000e+00 + 53, 2.500000e-01, 1.050000e+00, 2.500000e-01 + 54, 2.500000e-01, 1.050000e+00, -2.500000e-01 +*ELEMENT, TYPE=C3D8R, ELSET=EB1 + 1, 1, 2, 3, 4, 5, 6, 7, 8 + 2, 2, 9, 10, 3, 6, 11, 12, 7 + 3, 4, 3, 13, 14, 8, 7, 15, 16 + 4, 3, 10, 17, 13, 7, 12, 18, 15 + 5, 5, 6, 7, 8, 19, 20, 21, 22 + 6, 6, 11, 12, 7, 20, 23, 24, 21 + 7, 8, 7, 15, 16, 22, 21, 25, 26 + 8, 7, 12, 18, 15, 21, 24, 27, 25 +*ELEMENT, TYPE=C3D8R, ELSET=EB2 + 9, 28, 29, 30, 31, 32, 33, 34, 35 + 10, 29, 36, 37, 30, 33, 38, 39, 34 + 11, 31, 30, 40, 41, 35, 34, 42, 43 + 12, 30, 37, 44, 40, 34, 39, 45, 42 + 13, 32, 33, 34, 35, 46, 47, 48, 49 + 14, 33, 38, 39, 34, 47, 50, 51, 48 + 15, 35, 34, 42, 43, 49, 48, 52, 53 + 16, 34, 39, 45, 42, 48, 51, 54, 52 +*SURFACE, NAME=SS1 + 1, S3 + 2, S3 + 5, S3 + 6, S3 +*SURFACE, NAME=SS2 + 10, S4 + 12, S4 + 14, S4 + 16, S4 + 2, S4 + 4, S4 + 6, S4 + 8, S4 +*SURFACE, NAME=SS3 + 9, S1 + 10, S1 + 11, S1 + 12, S1 + 1, S1 + 2, S1 + 3, S1 + 4, S1 +*SURFACE, NAME=SS4 + 11, S5 + 12, S5 + 15, S5 + 16, S5 +*SURFACE, NAME=SS100 + 3, S5 + 4, S5 + 7, S5 + 8, S5 +*SURFACE, NAME=SS101 + 9, S3 + 10, S3 + 13, S3 + 14, S3 diff --git a/deal.II/tests/mesh_converter/meshes/3d/CC_cubit_old.ucd b/deal.II/tests/mesh_converter/meshes/3d/CC_cubit_old.ucd new file mode 100644 index 0000000000..bc68a51d6d --- /dev/null +++ b/deal.II/tests/mesh_converter/meshes/3d/CC_cubit_old.ucd @@ -0,0 +1,106 @@ +# FEM Mesh Converter +# Mesh type: AVS UCD + +54 48 0 0 0 +1 -5.00000000e-01 -5.00000000e-01 5.00000000e-01 +2 -5.00000000e-01 -5.00000000e-01 0.00000000e+00 +3 -5.00000000e-01 0.00000000e+00 0.00000000e+00 +4 -5.00000000e-01 0.00000000e+00 5.00000000e-01 +5 0.00000000e+00 -5.00000000e-01 5.00000000e-01 +6 0.00000000e+00 -5.00000000e-01 0.00000000e+00 +7 0.00000000e+00 0.00000000e+00 0.00000000e+00 +8 0.00000000e+00 0.00000000e+00 5.00000000e-01 +9 -5.00000000e-01 -5.00000000e-01 -5.00000000e-01 +10 -5.00000000e-01 0.00000000e+00 -5.00000000e-01 +11 0.00000000e+00 -5.00000000e-01 -5.00000000e-01 +12 0.00000000e+00 0.00000000e+00 -5.00000000e-01 +13 -5.00000000e-01 5.00000000e-01 0.00000000e+00 +14 -5.00000000e-01 5.00000000e-01 5.00000000e-01 +15 0.00000000e+00 5.00000000e-01 0.00000000e+00 +16 0.00000000e+00 5.00000000e-01 5.00000000e-01 +17 -5.00000000e-01 5.00000000e-01 -5.00000000e-01 +18 0.00000000e+00 5.00000000e-01 -5.00000000e-01 +19 5.00000000e-01 -5.00000000e-01 5.00000000e-01 +20 5.00000000e-01 -5.00000000e-01 0.00000000e+00 +21 5.00000000e-01 0.00000000e+00 0.00000000e+00 +22 5.00000000e-01 0.00000000e+00 5.00000000e-01 +23 5.00000000e-01 -5.00000000e-01 -5.00000000e-01 +24 5.00000000e-01 0.00000000e+00 -5.00000000e-01 +25 5.00000000e-01 5.00000000e-01 0.00000000e+00 +26 5.00000000e-01 5.00000000e-01 5.00000000e-01 +27 5.00000000e-01 5.00000000e-01 -5.00000000e-01 +28 -2.50000000e-01 5.50000000e-01 2.50000000e-01 +29 -2.50000000e-01 5.50000000e-01 0.00000000e+00 +30 -2.50000000e-01 8.00000000e-01 0.00000000e+00 +31 -2.50000000e-01 8.00000000e-01 2.50000000e-01 +32 0.00000000e+00 5.50000000e-01 2.50000000e-01 +33 0.00000000e+00 5.50000000e-01 0.00000000e+00 +34 0.00000000e+00 8.00000000e-01 0.00000000e+00 +35 0.00000000e+00 8.00000000e-01 2.50000000e-01 +36 -2.50000000e-01 5.50000000e-01 -2.50000000e-01 +37 -2.50000000e-01 8.00000000e-01 -2.50000000e-01 +38 0.00000000e+00 5.50000000e-01 -2.50000000e-01 +39 0.00000000e+00 8.00000000e-01 -2.50000000e-01 +40 -2.50000000e-01 1.05000000e+00 0.00000000e+00 +41 -2.50000000e-01 1.05000000e+00 2.50000000e-01 +42 0.00000000e+00 1.05000000e+00 0.00000000e+00 +43 0.00000000e+00 1.05000000e+00 2.50000000e-01 +44 -2.50000000e-01 1.05000000e+00 -2.50000000e-01 +45 0.00000000e+00 1.05000000e+00 -2.50000000e-01 +46 2.50000000e-01 5.50000000e-01 2.50000000e-01 +47 2.50000000e-01 5.50000000e-01 0.00000000e+00 +48 2.50000000e-01 8.00000000e-01 0.00000000e+00 +49 2.50000000e-01 8.00000000e-01 2.50000000e-01 +50 2.50000000e-01 5.50000000e-01 -2.50000000e-01 +51 2.50000000e-01 8.00000000e-01 -2.50000000e-01 +52 2.50000000e-01 1.05000000e+00 0.00000000e+00 +53 2.50000000e-01 1.05000000e+00 2.50000000e-01 +54 2.50000000e-01 1.05000000e+00 -2.50000000e-01 +1 1 hex 1 2 3 4 5 6 7 8 +2 1 hex 2 9 10 3 6 11 12 7 +3 1 hex 4 3 13 14 8 7 15 16 +4 1 hex 3 10 17 13 7 12 18 15 +5 1 hex 5 6 7 8 19 20 21 22 +6 1 hex 6 11 12 7 20 23 24 21 +7 1 hex 8 7 15 16 22 21 25 26 +8 1 hex 7 12 18 15 21 24 27 25 +9 2 hex 28 29 30 31 32 33 34 35 +10 2 hex 29 36 37 30 33 38 39 34 +11 2 hex 31 30 40 41 35 34 42 43 +12 2 hex 30 37 44 40 34 39 45 42 +13 2 hex 32 33 34 35 46 47 48 49 +14 2 hex 33 38 39 34 47 50 51 48 +15 2 hex 35 34 42 43 49 48 52 53 +16 2 hex 34 39 45 42 48 51 54 52 +1 1 quad 1 2 6 5 +2 1 quad 2 9 11 6 +3 1 quad 5 6 20 19 +4 1 quad 6 11 23 20 +5 2 quad 36 37 39 38 +6 2 quad 37 44 45 39 +7 2 quad 38 39 51 50 +8 2 quad 39 45 54 51 +9 2 quad 9 10 12 11 +10 2 quad 10 17 18 12 +11 2 quad 11 12 24 23 +12 2 quad 12 18 27 24 +13 3 quad 28 31 30 29 +14 3 quad 29 30 37 36 +15 3 quad 31 41 40 30 +16 3 quad 30 40 44 37 +17 3 quad 1 4 3 2 +18 3 quad 2 3 10 9 +19 3 quad 4 14 13 3 +20 3 quad 3 13 17 10 +21 4 quad 40 41 43 42 +22 4 quad 44 40 42 45 +23 4 quad 42 43 53 52 +24 4 quad 45 42 52 54 +25 100 quad 13 14 16 15 +26 100 quad 17 13 15 18 +27 100 quad 15 16 26 25 +28 100 quad 18 15 25 27 +29 101 quad 28 29 33 32 +30 101 quad 29 36 38 33 +31 101 quad 32 33 47 46 +32 101 quad 33 38 50 47 diff --git a/deal.II/tests/mesh_converter/meshes/3d/other_simple.inp b/deal.II/tests/mesh_converter/meshes/3d/other_simple.inp new file mode 100644 index 0000000000..1250a41207 --- /dev/null +++ b/deal.II/tests/mesh_converter/meshes/3d/other_simple.inp @@ -0,0 +1,17 @@ +*NODE +1, 0, 0, 0 +2, 0, 0, 50 +3, 50, 0, 50 +4, 50, 0, 0 +5, 0, 50, 0 +6, 0.705725, 50, 50 +7, 50, 50, 50 +8, 50, 50, 0 +** +** +*ELEMENT, TYPE=C3D8 +1, 1, 2, 3, 4, 5, 6, 7, 8 +*NSET, NSET=n, GENERATE +1, 8 +*ELSET, ELSET=e, GENERATE +1, 1 diff --git a/deal.II/tests/mesh_converter/meshes/3d/other_simple.ucd b/deal.II/tests/mesh_converter/meshes/3d/other_simple.ucd new file mode 100644 index 0000000000..ea545e612d --- /dev/null +++ b/deal.II/tests/mesh_converter/meshes/3d/other_simple.ucd @@ -0,0 +1,13 @@ +# FEM Mesh Converter +# Mesh type: AVS UCD + +8 1 0 0 0 +1 0.00000000e+00 0.00000000e+00 0.00000000e+00 +2 0.00000000e+00 0.00000000e+00 5.00000000e+01 +3 5.00000000e+01 0.00000000e+00 5.00000000e+01 +4 5.00000000e+01 0.00000000e+00 0.00000000e+00 +5 0.00000000e+00 5.00000000e+01 0.00000000e+00 +6 7.05725000e-01 5.00000000e+01 5.00000000e+01 +7 5.00000000e+01 5.00000000e+01 5.00000000e+01 +8 5.00000000e+01 5.00000000e+01 0.00000000e+00 +1 0 hex 1 2 3 4 5 6 7 8 diff --git a/deal.II/tests/mesh_converter/meshes/3d/test_cube_1.cub b/deal.II/tests/mesh_converter/meshes/3d/test_cube_1.cub new file mode 100644 index 0000000000..4b577a2472 Binary files /dev/null and b/deal.II/tests/mesh_converter/meshes/3d/test_cube_1.cub differ diff --git a/deal.II/tests/mesh_converter/meshes/3d/test_cube_1.inp b/deal.II/tests/mesh_converter/meshes/3d/test_cube_1.inp new file mode 100644 index 0000000000..6e3a898f13 --- /dev/null +++ b/deal.II/tests/mesh_converter/meshes/3d/test_cube_1.inp @@ -0,0 +1,25 @@ +*HEADING +cubit(t/Geometry/Mesh_conversion/mesh/Test_cube/test_cube_1.inp): 11/06/2008: 09 +*NODE + 1, -1.000000e+00, -1.000000e+00, 1.000000e+00 + 2, -1.000000e+00, -1.000000e+00, -1.000000e+00 + 3, -1.000000e+00, 1.000000e+00, -1.000000e+00 + 4, -1.000000e+00, 1.000000e+00, 1.000000e+00 + 5, 1.000000e+00, -1.000000e+00, 1.000000e+00 + 6, 1.000000e+00, -1.000000e+00, -1.000000e+00 + 7, 1.000000e+00, 1.000000e+00, -1.000000e+00 + 8, 1.000000e+00, 1.000000e+00, 1.000000e+00 +*ELEMENT, TYPE=C3D8R, ELSET=EB1 + 1, 1, 2, 3, 4, 5, 6, 7, 8 +*SURFACE, NAME=SS1 + 1, S6 +*SURFACE, NAME=SS2 + 1, S4 +*SURFACE, NAME=SS3 + 1, S3 +*SURFACE, NAME=SS4 + 1, S1 +*SURFACE, NAME=SS5 + 1, S5 +*SURFACE, NAME=SS6 + 1, S2 diff --git a/deal.II/tests/mesh_converter/meshes/3d/test_cube_1.ucd b/deal.II/tests/mesh_converter/meshes/3d/test_cube_1.ucd new file mode 100644 index 0000000000..5f81d2d6b6 --- /dev/null +++ b/deal.II/tests/mesh_converter/meshes/3d/test_cube_1.ucd @@ -0,0 +1,19 @@ +# FEM Mesh Converter +# Mesh type: AVS UCD + +8 7 0 0 0 +1 -1.00000000e+00 -1.00000000e+00 1.00000000e+00 +2 -1.00000000e+00 -1.00000000e+00 -1.00000000e+00 +3 -1.00000000e+00 1.00000000e+00 -1.00000000e+00 +4 -1.00000000e+00 1.00000000e+00 1.00000000e+00 +5 1.00000000e+00 -1.00000000e+00 1.00000000e+00 +6 1.00000000e+00 -1.00000000e+00 -1.00000000e+00 +7 1.00000000e+00 1.00000000e+00 -1.00000000e+00 +8 1.00000000e+00 1.00000000e+00 1.00000000e+00 +1 1 hex 1 2 3 4 5 6 7 8 +1 1 quad 1 5 8 4 +2 2 quad 2 3 7 6 +3 3 quad 1 2 6 5 +4 4 quad 1 4 3 2 +5 5 quad 3 4 8 7 +6 6 quad 5 8 7 6 diff --git a/deal.II/tests/mesh_converter/meshes/3d/test_cube_pave_1.cub b/deal.II/tests/mesh_converter/meshes/3d/test_cube_pave_1.cub new file mode 100644 index 0000000000..3222831cf2 Binary files /dev/null and b/deal.II/tests/mesh_converter/meshes/3d/test_cube_pave_1.cub differ diff --git a/deal.II/tests/mesh_converter/meshes/3d/test_cube_pave_1.inp b/deal.II/tests/mesh_converter/meshes/3d/test_cube_pave_1.inp new file mode 100644 index 0000000000..336d4424e0 --- /dev/null +++ b/deal.II/tests/mesh_converter/meshes/3d/test_cube_pave_1.inp @@ -0,0 +1,97 @@ +*HEADING +cubit(metry/Mesh_conversion/mesh/Test_cube/test_cube_pave_1.inp): 11/06/2008: 10 +*NODE + 1, 1.000000e+00, 1.000000e+00, 0.000000e+00 + 2, 5.000000e-01, 1.000000e+00, 0.000000e+00 + 3, 5.583559e-01, 3.929331e-01, 0.000000e+00 + 4, 1.000000e+00, 0.000000e+00, 0.000000e+00 + 5, 1.000000e+00, 1.000000e+00, 1.000000e+00 + 6, 5.000000e-01, 1.000000e+00, 1.000000e+00 + 7, 5.583559e-01, 3.929331e-01, 1.000000e+00 + 8, 1.000000e+00, 0.000000e+00, 1.000000e+00 + 9, 0.000000e+00, 1.000000e+00, 0.000000e+00 + 10, 1.562409e-01, 1.799288e-01, 0.000000e+00 + 11, 0.000000e+00, 1.000000e+00, 1.000000e+00 + 12, 1.562409e-01, 1.799288e-01, 1.000000e+00 + 13, -5.000000e-01, 1.000000e+00, 0.000000e+00 + 14, -3.897075e-01, 6.804246e-02, 0.000000e+00 + 15, -5.000000e-01, 1.000000e+00, 1.000000e+00 + 16, -3.897075e-01, 6.804246e-02, 1.000000e+00 + 17, -1.000000e+00, 1.000000e+00, 0.000000e+00 + 18, -1.000000e+00, 0.000000e+00, 0.000000e+00 + 19, -1.000000e+00, 1.000000e+00, 1.000000e+00 + 20, -1.000000e+00, 0.000000e+00, 1.000000e+00 + 21, -1.000000e+00, -1.000000e+00, 0.000000e+00 + 22, 0.000000e+00, -1.000000e+00, 0.000000e+00 + 23, -1.000000e+00, -1.000000e+00, 1.000000e+00 + 24, 0.000000e+00, -1.000000e+00, 1.000000e+00 + 25, 1.000000e+00, -1.000000e+00, 0.000000e+00 + 26, 1.000000e+00, -1.000000e+00, 1.000000e+00 + 27, 1.000000e+00, 1.000000e+00, -1.000000e+00 + 28, 5.000000e-01, 1.000000e+00, -1.000000e+00 + 29, 5.583559e-01, 3.929331e-01, -1.000000e+00 + 30, 1.000000e+00, 0.000000e+00, -1.000000e+00 + 31, 0.000000e+00, 1.000000e+00, -1.000000e+00 + 32, 1.562409e-01, 1.799288e-01, -1.000000e+00 + 33, -5.000000e-01, 1.000000e+00, -1.000000e+00 + 34, -3.897075e-01, 6.804246e-02, -1.000000e+00 + 35, -1.000000e+00, 1.000000e+00, -1.000000e+00 + 36, -1.000000e+00, 0.000000e+00, -1.000000e+00 + 37, -1.000000e+00, -1.000000e+00, -1.000000e+00 + 38, 0.000000e+00, -1.000000e+00, -1.000000e+00 + 39, 1.000000e+00, -1.000000e+00, -1.000000e+00 +*ELEMENT, TYPE=C3D8R, ELSET=EB1 + 1, 1, 2, 3, 4, 5, 6, 7, 8 + 2, 2, 9, 10, 3, 6, 11, 12, 7 + 3, 9, 13, 14, 10, 11, 15, 16, 12 + 4, 13, 17, 18, 14, 15, 19, 20, 16 + 5, 21, 22, 14, 18, 23, 24, 16, 20 + 6, 10, 14, 22, 25, 12, 16, 24, 26 + 7, 25, 4, 3, 10, 26, 8, 7, 12 + 8, 27, 28, 29, 30, 1, 2, 3, 4 + 9, 28, 31, 32, 29, 2, 9, 10, 3 + 10, 31, 33, 34, 32, 9, 13, 14, 10 + 11, 33, 35, 36, 34, 13, 17, 18, 14 + 12, 37, 38, 34, 36, 21, 22, 14, 18 + 13, 32, 34, 38, 39, 10, 14, 22, 25 + 14, 39, 30, 29, 32, 25, 4, 3, 10 +*SURFACE, NAME=SS1 + 1, S2 + 2, S2 + 3, S2 + 4, S2 + 5, S2 + 6, S2 + 7, S2 +*SURFACE, NAME=SS2 + 8, S1 + 9, S1 + 10, S1 + 11, S1 + 12, S1 + 13, S1 + 14, S1 +*SURFACE, NAME=SS3 + 5, S3 + 6, S5 + 12, S3 + 13, S5 +*SURFACE, NAME=SS4 + 4, S4 + 5, S6 + 11, S4 + 12, S6 +*SURFACE, NAME=SS5 + 1, S3 + 2, S3 + 3, S3 + 4, S3 + 8, S3 + 9, S3 + 10, S3 + 11, S3 +*SURFACE, NAME=SS6 + 1, S6 + 7, S3 + 8, S6 + 14, S3 diff --git a/deal.II/tests/mesh_converter/meshes/3d/test_cube_pave_1.ucd b/deal.II/tests/mesh_converter/meshes/3d/test_cube_pave_1.ucd new file mode 100644 index 0000000000..71be64e634 --- /dev/null +++ b/deal.II/tests/mesh_converter/meshes/3d/test_cube_pave_1.ucd @@ -0,0 +1,91 @@ +# FEM Mesh Converter +# Mesh type: AVS UCD + +39 48 0 0 0 +1 1.00000000e+00 1.00000000e+00 0.00000000e+00 +2 5.00000000e-01 1.00000000e+00 0.00000000e+00 +3 5.58355900e-01 3.92933100e-01 0.00000000e+00 +4 1.00000000e+00 0.00000000e+00 0.00000000e+00 +5 1.00000000e+00 1.00000000e+00 1.00000000e+00 +6 5.00000000e-01 1.00000000e+00 1.00000000e+00 +7 5.58355900e-01 3.92933100e-01 1.00000000e+00 +8 1.00000000e+00 0.00000000e+00 1.00000000e+00 +9 0.00000000e+00 1.00000000e+00 0.00000000e+00 +10 1.56240900e-01 1.79928800e-01 0.00000000e+00 +11 0.00000000e+00 1.00000000e+00 1.00000000e+00 +12 1.56240900e-01 1.79928800e-01 1.00000000e+00 +13 -5.00000000e-01 1.00000000e+00 0.00000000e+00 +14 -3.89707500e-01 6.80424600e-02 0.00000000e+00 +15 -5.00000000e-01 1.00000000e+00 1.00000000e+00 +16 -3.89707500e-01 6.80424600e-02 1.00000000e+00 +17 -1.00000000e+00 1.00000000e+00 0.00000000e+00 +18 -1.00000000e+00 0.00000000e+00 0.00000000e+00 +19 -1.00000000e+00 1.00000000e+00 1.00000000e+00 +20 -1.00000000e+00 0.00000000e+00 1.00000000e+00 +21 -1.00000000e+00 -1.00000000e+00 0.00000000e+00 +22 0.00000000e+00 -1.00000000e+00 0.00000000e+00 +23 -1.00000000e+00 -1.00000000e+00 1.00000000e+00 +24 0.00000000e+00 -1.00000000e+00 1.00000000e+00 +25 1.00000000e+00 -1.00000000e+00 0.00000000e+00 +26 1.00000000e+00 -1.00000000e+00 1.00000000e+00 +27 1.00000000e+00 1.00000000e+00 -1.00000000e+00 +28 5.00000000e-01 1.00000000e+00 -1.00000000e+00 +29 5.58355900e-01 3.92933100e-01 -1.00000000e+00 +30 1.00000000e+00 0.00000000e+00 -1.00000000e+00 +31 0.00000000e+00 1.00000000e+00 -1.00000000e+00 +32 1.56240900e-01 1.79928800e-01 -1.00000000e+00 +33 -5.00000000e-01 1.00000000e+00 -1.00000000e+00 +34 -3.89707500e-01 6.80424600e-02 -1.00000000e+00 +35 -1.00000000e+00 1.00000000e+00 -1.00000000e+00 +36 -1.00000000e+00 0.00000000e+00 -1.00000000e+00 +37 -1.00000000e+00 -1.00000000e+00 -1.00000000e+00 +38 0.00000000e+00 -1.00000000e+00 -1.00000000e+00 +39 1.00000000e+00 -1.00000000e+00 -1.00000000e+00 +1 1 hex 1 2 3 4 5 6 7 8 +2 1 hex 2 9 10 3 6 11 12 7 +3 1 hex 9 13 14 10 11 15 16 12 +4 1 hex 13 17 18 14 15 19 20 16 +5 1 hex 21 22 14 18 23 24 16 20 +6 1 hex 10 14 22 25 12 16 24 26 +7 1 hex 25 4 3 10 26 8 7 12 +8 1 hex 27 28 29 30 1 2 3 4 +9 1 hex 28 31 32 29 2 9 10 3 +10 1 hex 31 33 34 32 9 13 14 10 +11 1 hex 33 35 36 34 13 17 18 14 +12 1 hex 37 38 34 36 21 22 14 18 +13 1 hex 32 34 38 39 10 14 22 25 +14 1 hex 39 30 29 32 25 4 3 10 +1 1 quad 5 8 7 6 +2 1 quad 6 7 12 11 +3 1 quad 11 12 16 15 +4 1 quad 15 16 20 19 +5 1 quad 23 20 16 24 +6 1 quad 12 26 24 16 +7 1 quad 26 12 7 8 +8 2 quad 27 30 29 28 +9 2 quad 28 29 32 31 +10 2 quad 31 32 34 33 +11 2 quad 33 34 36 35 +12 2 quad 37 36 34 38 +13 2 quad 32 39 38 34 +14 2 quad 39 32 29 30 +15 3 quad 21 22 24 23 +16 3 quad 22 25 26 24 +17 3 quad 37 38 22 21 +18 3 quad 38 39 25 22 +19 4 quad 17 18 20 19 +20 4 quad 21 23 20 18 +21 4 quad 35 36 18 17 +22 4 quad 37 21 18 36 +23 5 quad 1 2 6 5 +24 5 quad 2 9 11 6 +25 5 quad 9 13 15 11 +26 5 quad 13 17 19 15 +27 5 quad 27 28 2 1 +28 5 quad 28 31 9 2 +29 5 quad 31 33 13 9 +30 5 quad 33 35 17 13 +31 6 quad 1 5 8 4 +32 6 quad 25 4 8 26 +33 6 quad 27 1 4 30 +34 6 quad 39 30 4 25 diff --git a/deal.II/tests/mesh_converter/meshes/3d/test_cube_two_materials.cub b/deal.II/tests/mesh_converter/meshes/3d/test_cube_two_materials.cub new file mode 100644 index 0000000000..abfe09593e Binary files /dev/null and b/deal.II/tests/mesh_converter/meshes/3d/test_cube_two_materials.cub differ diff --git a/deal.II/tests/mesh_converter/meshes/3d/test_cube_two_materials.inp b/deal.II/tests/mesh_converter/meshes/3d/test_cube_two_materials.inp new file mode 100644 index 0000000000..1e329c1a61 --- /dev/null +++ b/deal.II/tests/mesh_converter/meshes/3d/test_cube_two_materials.inp @@ -0,0 +1,67 @@ +*HEADING +cubit(rsion/Mesh_conversion/mesh/3d/test_cube_two_materials.inp): 01/20/2009: 07 +*NODE + 1, -1.000000e+00, -1.000000e+00, 1.000000e+00 + 2, -1.000000e+00, -1.000000e+00, 0.000000e+00 + 3, -1.000000e+00, 0.000000e+00, 0.000000e+00 + 4, -1.000000e+00, 0.000000e+00, 1.000000e+00 + 5, 0.000000e+00, -1.000000e+00, 1.000000e+00 + 6, 0.000000e+00, -1.000000e+00, 0.000000e+00 + 7, 0.000000e+00, 0.000000e+00, 0.000000e+00 + 8, 0.000000e+00, 0.000000e+00, 1.000000e+00 + 9, -1.000000e+00, -1.000000e+00, -1.000000e+00 + 10, -1.000000e+00, 0.000000e+00, -1.000000e+00 + 11, 0.000000e+00, -1.000000e+00, -1.000000e+00 + 12, 0.000000e+00, 0.000000e+00, -1.000000e+00 + 13, -1.000000e+00, 1.000000e+00, 0.000000e+00 + 14, -1.000000e+00, 1.000000e+00, 1.000000e+00 + 15, 0.000000e+00, 1.000000e+00, 0.000000e+00 + 16, 0.000000e+00, 1.000000e+00, 1.000000e+00 + 17, -1.000000e+00, 1.000000e+00, -1.000000e+00 + 18, 0.000000e+00, 1.000000e+00, -1.000000e+00 + 19, 1.000000e+00, -1.000000e+00, 1.000000e+00 + 20, 1.000000e+00, -1.000000e+00, 0.000000e+00 + 21, 1.000000e+00, 0.000000e+00, 0.000000e+00 + 22, 1.000000e+00, 0.000000e+00, 1.000000e+00 + 23, 1.000000e+00, -1.000000e+00, -1.000000e+00 + 24, 1.000000e+00, 0.000000e+00, -1.000000e+00 + 25, 1.000000e+00, 1.000000e+00, 0.000000e+00 + 26, 1.000000e+00, 1.000000e+00, 1.000000e+00 + 27, 1.000000e+00, 1.000000e+00, -1.000000e+00 +*ELEMENT, TYPE=C3D8R, ELSET=EB1 + 1, 1, 2, 3, 4, 5, 6, 7, 8 + 2, 2, 9, 10, 3, 6, 11, 12, 7 + 3, 4, 3, 13, 14, 8, 7, 15, 16 + 4, 3, 10, 17, 13, 7, 12, 18, 15 + 5, 5, 6, 7, 8, 19, 20, 21, 22 +*ELEMENT, TYPE=C3D8R, ELSET=EB2 + 6, 6, 11, 12, 7, 20, 23, 24, 21 + 7, 8, 7, 15, 16, 22, 21, 25, 26 + 8, 7, 12, 18, 15, 21, 24, 27, 25 +*SURFACE, NAME=SS1 + 1, S6 + 3, S6 + 5, S6 + 7, S6 + 3, S5 + 4, S5 + 7, S5 + 8, S5 + 1, S1 + 2, S1 + 3, S1 + 4, S1 + 1, S3 + 2, S3 + 5, S3 + 6, S3 +*SURFACE, NAME=SS2 + 2, S4 + 4, S4 + 8, S4 + 8, S2 +*SURFACE, NAME=SS3 + 5, S2 + 6, S4 + 6, S2 + 7, S2 diff --git a/deal.II/tests/mesh_converter/meshes/3d/test_cube_two_materials.ucd b/deal.II/tests/mesh_converter/meshes/3d/test_cube_two_materials.ucd new file mode 100644 index 0000000000..a81424856c --- /dev/null +++ b/deal.II/tests/mesh_converter/meshes/3d/test_cube_two_materials.ucd @@ -0,0 +1,63 @@ +# FEM Mesh Converter +# Mesh type: AVS UCD + +27 32 0 0 0 +1 -1.00000000e+00 -1.00000000e+00 1.00000000e+00 +2 -1.00000000e+00 -1.00000000e+00 0.00000000e+00 +3 -1.00000000e+00 0.00000000e+00 0.00000000e+00 +4 -1.00000000e+00 0.00000000e+00 1.00000000e+00 +5 0.00000000e+00 -1.00000000e+00 1.00000000e+00 +6 0.00000000e+00 -1.00000000e+00 0.00000000e+00 +7 0.00000000e+00 0.00000000e+00 0.00000000e+00 +8 0.00000000e+00 0.00000000e+00 1.00000000e+00 +9 -1.00000000e+00 -1.00000000e+00 -1.00000000e+00 +10 -1.00000000e+00 0.00000000e+00 -1.00000000e+00 +11 0.00000000e+00 -1.00000000e+00 -1.00000000e+00 +12 0.00000000e+00 0.00000000e+00 -1.00000000e+00 +13 -1.00000000e+00 1.00000000e+00 0.00000000e+00 +14 -1.00000000e+00 1.00000000e+00 1.00000000e+00 +15 0.00000000e+00 1.00000000e+00 0.00000000e+00 +16 0.00000000e+00 1.00000000e+00 1.00000000e+00 +17 -1.00000000e+00 1.00000000e+00 -1.00000000e+00 +18 0.00000000e+00 1.00000000e+00 -1.00000000e+00 +19 1.00000000e+00 -1.00000000e+00 1.00000000e+00 +20 1.00000000e+00 -1.00000000e+00 0.00000000e+00 +21 1.00000000e+00 0.00000000e+00 0.00000000e+00 +22 1.00000000e+00 0.00000000e+00 1.00000000e+00 +23 1.00000000e+00 -1.00000000e+00 -1.00000000e+00 +24 1.00000000e+00 0.00000000e+00 -1.00000000e+00 +25 1.00000000e+00 1.00000000e+00 0.00000000e+00 +26 1.00000000e+00 1.00000000e+00 1.00000000e+00 +27 1.00000000e+00 1.00000000e+00 -1.00000000e+00 +1 1 hex 1 2 3 4 5 6 7 8 +2 1 hex 2 9 10 3 6 11 12 7 +3 1 hex 4 3 13 14 8 7 15 16 +4 1 hex 3 10 17 13 7 12 18 15 +5 1 hex 5 6 7 8 19 20 21 22 +6 2 hex 6 11 12 7 20 23 24 21 +7 2 hex 8 7 15 16 22 21 25 26 +8 2 hex 7 12 18 15 21 24 27 25 +1 1 quad 1 5 8 4 +2 1 quad 4 8 16 14 +3 1 quad 5 19 22 8 +4 1 quad 8 22 26 16 +5 1 quad 13 14 16 15 +6 1 quad 17 13 15 18 +7 1 quad 15 16 26 25 +8 1 quad 18 15 25 27 +9 1 quad 1 4 3 2 +10 1 quad 2 3 10 9 +11 1 quad 4 14 13 3 +12 1 quad 3 13 17 10 +13 1 quad 1 2 6 5 +14 1 quad 2 9 11 6 +15 1 quad 5 6 20 19 +16 1 quad 6 11 23 20 +17 2 quad 9 10 12 11 +18 2 quad 10 17 18 12 +19 2 quad 12 18 27 24 +20 2 quad 21 25 27 24 +21 3 quad 19 22 21 20 +22 3 quad 11 12 24 23 +23 3 quad 20 21 24 23 +24 3 quad 22 26 25 21 diff --git a/deal.II/tests/quick_tests/CMakeLists.txt b/deal.II/tests/quick_tests/CMakeLists.txt new file mode 100644 index 0000000000..e8cac30ca9 --- /dev/null +++ b/deal.II/tests/quick_tests/CMakeLists.txt @@ -0,0 +1,56 @@ +## --------------------------------------------------------------------- +## $Id$ +## +## Copyright (C) 2013 by the deal.II authors +## +## This file is part of the deal.II library. +## +## The deal.II library is free software; you can use it, redistribute +## it, and/or modify it under the terms of the GNU Lesser General +## Public License as published by the Free Software Foundation; either +## version 2.1 of the License, or (at your option) any later version. +## The full text of the license can be found in the file LICENSE at +## the top level of the deal.II distribution. +## +## --------------------------------------------------------------------- + +# +# A minimalistic set of tests: +# + +ENABLE_TESTING() + +FOREACH(_build ${DEAL_II_BUILD_TYPES}) + STRING(TOLOWER ${_build} _build_lowercase) + + # + # Test whether thread affinity is well behaved: + # + SET(_target affinity.${_build_lowercase}) + + ADD_EXECUTABLE(${_target} EXCLUDE_FROM_ALL affinity.cc) + DEAL_II_INSOURCE_SETUP_TARGET(${_target} ${_build}) + + ADD_CUSTOM_COMMAND(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/${_target}-OK + COMMAND ${_target} > ${CMAKE_CURRENT_BINARY_DIR}/${_target}-OK 2>&1 || (rm ${_target}-OK && exit 1) + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} + ) + ADD_CUSTOM_TARGET(${_target}.run DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/${_target}-OK) + + ADD_TEST(NAME ${_target} + COMMAND ${CMAKE_COMMAND} -DTRGT=${_target}.run -DTEST=${_target} + -DDEAL_II_BINARY_DIR=${CMAKE_BINARY_DIR} + -P ${CMAKE_SOURCE_DIR}/cmake/scripts/run_test.cmake + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} + ) + SET_TESTS_PROPERTIES(${_target} PROPERTIES LABEL "sanity checks") + +ENDFOREACH() + +# +# A custom test target: +# +ADD_CUSTOM_TARGET(test + COMMAND ${CMAKE_CTEST_COMMAND} --force-new-ctest-process + WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} + ) diff --git a/deal.II/contrib/test_affinity/test_affinity.cc b/deal.II/tests/quick_tests/affinity.cc similarity index 78% rename from deal.II/contrib/test_affinity/test_affinity.cc rename to deal.II/tests/quick_tests/affinity.cc index 81549ae370..96a6d79fa1 100644 --- a/deal.II/contrib/test_affinity/test_affinity.cc +++ b/deal.II/tests/quick_tests/affinity.cc @@ -1,4 +1,5 @@ #include +#include #include int main () @@ -18,7 +19,9 @@ int main () return -1; } - unsigned int bits_set = CPU_COUNT(&my_set); + unsigned int bits_set = 0;//not supported on old kernels: CPU_COUNT(&my_set); + for (int i=0;i