From ca88ef19f6d140deeefd42ac2387b475975c9bdc Mon Sep 17 00:00:00 2001 From: Matthias Maier Date: Tue, 23 Apr 2013 08:32:57 +0000 Subject: [PATCH] Switch to bundled tbb version "4.1 Update 2013/04/01" - Enable compilation with clang version 3.0 by making a cast in ASM code explicit - Suppress warnings about pointer-to-object to pointer-to-function casts git-svn-id: https://svn.dealii.org/trunk@29365 0785d39b-7218-0410-832d-ea1e28bc413d --- deal.II/bundled/CMakeLists.txt | 4 +- deal.II/bundled/functionparser/CMakeLists.txt | 2 +- deal.II/bundled/tbb30_104oss/CHANGES | 973 ----- deal.II/bundled/tbb30_104oss/COPYING | 353 -- deal.II/bundled/tbb30_104oss/README | 11 - .../bundled/tbb30_104oss/build/AIX.gcc.inc | 85 - deal.II/bundled/tbb30_104oss/build/AIX.inc | 74 - .../tbb30_104oss/build/FreeBSD.gcc.inc | 93 - .../bundled/tbb30_104oss/build/FreeBSD.inc | 82 - .../bundled/tbb30_104oss/build/Makefile.rml | 156 - .../bundled/tbb30_104oss/build/Makefile.tbb | 131 - .../tbb30_104oss/build/Makefile.tbbmalloc | 200 - .../bundled/tbb30_104oss/build/Makefile.test | 290 -- .../bundled/tbb30_104oss/build/SunOS.gcc.inc | 100 - deal.II/bundled/tbb30_104oss/build/SunOS.inc | 91 - .../tbb30_104oss/build/SunOS.suncc.inc | 98 - .../bundled/tbb30_104oss/build/codecov.txt | 7 - deal.II/bundled/tbb30_104oss/build/common.inc | 108 - .../tbb30_104oss/build/common_rules.inc | 134 - deal.II/bundled/tbb30_104oss/build/detect.js | 129 - .../tbb30_104oss/build/generate_tbbvars.bat | 74 - .../tbb30_104oss/build/generate_tbbvars.sh | 76 - deal.II/bundled/tbb30_104oss/build/index.html | 230 -- .../bundled/tbb30_104oss/build/linux.gcc.inc | 109 - .../bundled/tbb30_104oss/build/linux.icc.inc | 103 - deal.II/bundled/tbb30_104oss/build/linux.inc | 122 - .../bundled/tbb30_104oss/build/macos.gcc.inc | 90 - .../bundled/tbb30_104oss/build/macos.icc.inc | 75 - deal.II/bundled/tbb30_104oss/build/macos.inc | 85 - .../tbb30_104oss/build/suncc.map.pause | 1 - .../tbb30_104oss/build/test_launcher.bat | 36 - .../tbb30_104oss/build/test_launcher.sh | 42 - .../tbb30_104oss/build/version_info_aix.sh | 42 - .../tbb30_104oss/build/version_info_linux.sh | 42 - .../tbb30_104oss/build/version_info_macos.sh | 39 - .../tbb30_104oss/build/version_info_sunos.sh | 39 - .../build/version_info_windows.js | 136 - .../tbb30_104oss/build/vsproject/index.html | 31 - .../tbb30_104oss/build/vsproject/makefile.sln | 100 - .../tbb30_104oss/build/vsproject/tbb.vcproj | 506 --- .../build/vsproject/tbbmalloc.vcproj | 452 --- .../build/vsproject/tbbmalloc_proxy.vcproj | 206 - .../build/vsproject/version_string.tmp | 1 - .../bundled/tbb30_104oss/build/windows.cl.inc | 123 - .../tbb30_104oss/build/windows.gcc.inc | 132 - .../tbb30_104oss/build/windows.icl.inc | 151 - .../bundled/tbb30_104oss/build/windows.inc | 102 - .../bundled/tbb30_104oss/build/xbox360.cl.inc | 119 - .../bundled/tbb30_104oss/build/xbox360.inc | 77 - .../bundled/tbb30_104oss/include/index.html | 24 - .../include/tbb/_concurrent_queue_internal.h | 1016 ----- .../tbb/_concurrent_unordered_internal.h | 1408 ------- .../tbb30_104oss/include/tbb/_tbb_windef.h | 81 - .../tbb30_104oss/include/tbb/aligned_space.h | 55 - .../bundled/tbb30_104oss/include/tbb/atomic.h | 363 -- .../tbb30_104oss/include/tbb/blocked_range.h | 129 - .../include/tbb/blocked_range2d.h | 97 - .../include/tbb/blocked_range3d.h | 116 - .../include/tbb/cache_aligned_allocator.h | 133 - .../tbb30_104oss/include/tbb/combinable.h | 80 - .../include/tbb/compat/condition_variable | 459 --- .../tbb30_104oss/include/tbb/compat/ppl.h | 68 - .../tbb30_104oss/include/tbb/compat/thread | 54 - .../include/tbb/concurrent_hash_map.h | 1406 ------- .../include/tbb/concurrent_queue.h | 413 -- .../include/tbb/concurrent_unordered_map.h | 241 -- .../include/tbb/concurrent_vector.h | 1060 ------ .../include/tbb/critical_section.h | 141 - .../include/tbb/enumerable_thread_specific.h | 999 ----- .../tbb30_104oss/include/tbb/index.html | 28 - .../include/tbb/machine/ibm_aix51.h | 60 - .../include/tbb/machine/linux_common.h | 91 - .../include/tbb/machine/linux_ia32.h | 216 -- .../include/tbb/machine/linux_ia64.h | 170 - .../include/tbb/machine/linux_intel64.h | 143 - .../include/tbb/machine/mac_ppc.h | 82 - .../include/tbb/machine/macos_common.h | 126 - .../include/tbb/machine/sunos_sparc.h | 228 -- .../include/tbb/machine/windows_api.h | 56 - .../include/tbb/machine/windows_ia32.h | 200 - .../include/tbb/machine/windows_intel64.h | 132 - .../include/tbb/machine/xbox360_ppc.h | 121 - .../bundled/tbb30_104oss/include/tbb/mutex.h | 240 -- .../tbb30_104oss/include/tbb/null_mutex.h | 63 - .../tbb30_104oss/include/tbb/null_rw_mutex.h | 65 - .../tbb30_104oss/include/tbb/parallel_do.h | 508 --- .../tbb30_104oss/include/tbb/parallel_for.h | 241 -- .../include/tbb/parallel_for_each.h | 77 - .../include/tbb/parallel_invoke.h | 359 -- .../include/tbb/parallel_reduce.h | 387 -- .../tbb30_104oss/include/tbb/parallel_scan.h | 351 -- .../tbb30_104oss/include/tbb/parallel_sort.h | 227 -- .../tbb30_104oss/include/tbb/parallel_while.h | 194 - .../tbb30_104oss/include/tbb/partitioner.h | 228 -- .../tbb30_104oss/include/tbb/pipeline.h | 559 --- .../tbb30_104oss/include/tbb/queuing_mutex.h | 131 - .../include/tbb/queuing_rw_mutex.h | 173 - .../include/tbb/reader_writer_lock.h | 240 -- .../include/tbb/recursive_mutex.h | 240 -- .../include/tbb/scalable_allocator.h | 205 - .../tbb30_104oss/include/tbb/spin_mutex.h | 192 - .../tbb30_104oss/include/tbb/spin_rw_mutex.h | 228 -- .../bundled/tbb30_104oss/include/tbb/task.h | 838 ----- .../tbb30_104oss/include/tbb/task_group.h | 248 -- .../include/tbb/task_scheduler_init.h | 106 - .../include/tbb/task_scheduler_observer.h | 74 - .../bundled/tbb30_104oss/include/tbb/tbb.h | 80 - .../tbb30_104oss/include/tbb/tbb_allocator.h | 214 -- .../tbb30_104oss/include/tbb/tbb_config.h | 208 - .../tbb30_104oss/include/tbb/tbb_exception.h | 362 -- .../tbb30_104oss/include/tbb/tbb_machine.h | 691 ---- .../tbb30_104oss/include/tbb/tbb_profiling.h | 105 - .../tbb30_104oss/include/tbb/tbb_stddef.h | 334 -- .../tbb30_104oss/include/tbb/tbb_thread.h | 293 -- .../include/tbb/tbbmalloc_proxy.h | 74 - .../tbb30_104oss/include/tbb/tick_count.h | 155 - deal.II/bundled/tbb30_104oss/index.html | 44 - .../bundled/tbb30_104oss/src/CMakeLists.txt | 78 - deal.II/bundled/tbb30_104oss/src/index.html | 77 - .../src/old/concurrent_queue_v2.cpp | 382 -- .../src/old/concurrent_queue_v2.h | 328 -- .../src/old/concurrent_vector_v2.cpp | 277 -- .../src/old/concurrent_vector_v2.h | 522 --- .../tbb30_104oss/src/old/spin_rw_mutex_v2.cpp | 166 - .../tbb30_104oss/src/old/spin_rw_mutex_v2.h | 185 - .../bundled/tbb30_104oss/src/old/task_v2.cpp | 46 - .../src/old/test_concurrent_queue_v2.cpp | 356 -- .../src/old/test_concurrent_vector_v2.cpp | 565 --- .../tbb30_104oss/src/old/test_mutex_v2.cpp | 268 -- .../src/perf/fibonacci_cutoff.cpp | 134 - .../src/perf/fibonacci_impl_tbb.cpp | 86 - .../bundled/tbb30_104oss/src/perf/perf.cpp | 859 ----- deal.II/bundled/tbb30_104oss/src/perf/perf.h | 265 -- .../tbb30_104oss/src/perf/perf_sched.cpp | 423 --- .../tbb30_104oss/src/perf/run_statistics.sh | 40 - .../tbb30_104oss/src/perf/statistics.cpp | 452 --- .../tbb30_104oss/src/perf/statistics.h | 194 - .../tbb30_104oss/src/perf/statistics_xml.h | 208 - .../tbb30_104oss/src/perf/time_framework.h | 359 -- .../tbb30_104oss/src/perf/time_hash_map.cpp | 268 -- .../src/perf/time_hash_map_fill.cpp | 170 - .../src/perf/time_locked_work.cpp | 174 - .../tbb30_104oss/src/perf/time_sandbox.h | 179 - .../tbb30_104oss/src/perf/time_vector.cpp | 257 -- .../tbb30_104oss/src/rml/client/index.html | 43 - .../src/rml/client/library_assert.h | 41 - .../src/rml/client/omp_dynamic_link.cpp | 32 - .../src/rml/client/omp_dynamic_link.h | 37 - .../tbb30_104oss/src/rml/client/rml_factory.h | 111 - .../tbb30_104oss/src/rml/client/rml_omp.cpp | 44 - .../tbb30_104oss/src/rml/client/rml_tbb.cpp | 46 - .../tbb30_104oss/src/rml/include/index.html | 30 - .../tbb30_104oss/src/rml/include/rml_base.h | 196 - .../tbb30_104oss/src/rml/include/rml_omp.h | 138 - .../tbb30_104oss/src/rml/include/rml_tbb.h | 108 - .../bundled/tbb30_104oss/src/rml/index.html | 32 - .../src/rml/perfor/omp_nested.cpp | 152 - .../src/rml/perfor/omp_simple.cpp | 168 - .../src/rml/perfor/tbb_multi_omp.cpp | 194 - .../src/rml/perfor/tbb_simple.cpp | 199 - .../src/rml/perfor/thread_level.h | 142 - .../tbb30_104oss/src/rml/server/index.html | 19 - .../tbb30_104oss/src/rml/server/irml.rc | 126 - .../src/rml/server/job_automaton.h | 153 - .../src/rml/server/lin-rml-export.def | 38 - .../src/rml/server/rml_server.cpp | 3330 ----------------- .../src/rml/server/thread_monitor.h | 256 -- .../src/rml/server/wait_counter.h | 81 - .../src/rml/server/win32-rml-export.def | 35 - .../src/rml/server/win64-rml-export.def | 35 - .../src/rml/test/rml_omp_stub.cpp | 71 - .../src/rml/test/test_job_automaton.cpp | 153 - .../src/rml/test/test_rml_mixed.cpp | 253 -- .../src/rml/test/test_rml_omp.cpp | 196 - .../src/rml/test/test_rml_omp_c_linkage.c | 34 - .../src/rml/test/test_rml_tbb.cpp | 201 - .../tbb30_104oss/src/rml/test/test_server.h | 452 --- .../src/rml/test/test_thread_monitor.cpp | 118 - .../bundled/tbb30_104oss/src/tbb/arena.cpp | 442 --- deal.II/bundled/tbb30_104oss/src/tbb/arena.h | 504 --- .../src/tbb/cache_aligned_allocator.cpp | 277 -- .../tbb30_104oss/src/tbb/cilk-tbb-interop.h | 120 - .../src/tbb/concurrent_hash_map.cpp | 66 - .../src/tbb/concurrent_monitor.cpp | 109 - .../tbb30_104oss/src/tbb/concurrent_monitor.h | 203 - .../tbb30_104oss/src/tbb/concurrent_queue.cpp | 613 --- .../src/tbb/concurrent_vector.cpp | 603 --- .../src/tbb/condition_variable.cpp | 213 -- .../tbb30_104oss/src/tbb/critical_section.cpp | 39 - .../tbb30_104oss/src/tbb/custom_scheduler.h | 485 --- .../tbb30_104oss/src/tbb/dynamic_link.cpp | 138 - .../tbb30_104oss/src/tbb/dynamic_link.h | 102 - .../bundled/tbb30_104oss/src/tbb/governor.cpp | 340 -- .../bundled/tbb30_104oss/src/tbb/governor.h | 197 - .../src/tbb/ia32-masm/atomic_support.asm | 196 - .../src/tbb/ia32-masm/lock_byte.asm | 46 - .../src/tbb/ia64-gas/atomic_support.s | 678 ---- .../tbb30_104oss/src/tbb/ia64-gas/ia64_misc.s | 35 - .../tbb30_104oss/src/tbb/ia64-gas/lock_byte.s | 54 - .../tbb30_104oss/src/tbb/ia64-gas/log2.s | 67 - .../tbb30_104oss/src/tbb/ia64-gas/pause.s | 41 - .../src/tbb/ibm_aix51/atomic_support.c | 55 - .../bundled/tbb30_104oss/src/tbb/index.html | 32 - .../src/tbb/intel64-masm/atomic_support.asm | 80 - .../tbb30_104oss/src/tbb/intrusive_list.h | 255 -- .../tbb30_104oss/src/tbb/itt_notify.cpp | 80 - .../bundled/tbb30_104oss/src/tbb/itt_notify.h | 126 - .../tbb30_104oss/src/tbb/lin32-tbb-export.def | 375 -- .../tbb30_104oss/src/tbb/lin64-tbb-export.def | 357 -- .../src/tbb/lin64ipf-tbb-export.def | 401 -- .../tbb30_104oss/src/tbb/mac32-tbb-export.def | 343 -- .../tbb30_104oss/src/tbb/mac64-tbb-export.def | 339 -- .../bundled/tbb30_104oss/src/tbb/mailbox.h | 191 - .../bundled/tbb30_104oss/src/tbb/market.cpp | 304 -- deal.II/bundled/tbb30_104oss/src/tbb/market.h | 210 -- .../bundled/tbb30_104oss/src/tbb/mutex.cpp | 148 - .../tbb30_104oss/src/tbb/observer_proxy.cpp | 237 -- .../tbb30_104oss/src/tbb/observer_proxy.h | 69 - .../bundled/tbb30_104oss/src/tbb/pipeline.cpp | 748 ---- .../tbb30_104oss/src/tbb/private_server.cpp | 388 -- .../tbb30_104oss/src/tbb/queuing_mutex.cpp | 117 - .../tbb30_104oss/src/tbb/queuing_rw_mutex.cpp | 505 --- .../src/tbb/reader_writer_lock.cpp | 356 -- .../tbb30_104oss/src/tbb/recursive_mutex.cpp | 143 - .../tbb30_104oss/src/tbb/scheduler.cpp | 1176 ------ .../bundled/tbb30_104oss/src/tbb/scheduler.h | 556 --- .../tbb30_104oss/src/tbb/scheduler_common.h | 192 - .../tbb30_104oss/src/tbb/scheduler_utility.h | 141 - .../bundled/tbb30_104oss/src/tbb/semaphore.h | 132 - .../tbb30_104oss/src/tbb/spin_mutex.cpp | 68 - .../tbb30_104oss/src/tbb/spin_rw_mutex.cpp | 174 - deal.II/bundled/tbb30_104oss/src/tbb/task.cpp | 278 -- .../src/tbb/task_group_context.cpp | 279 -- .../tbb30_104oss/src/tbb/task_stream.h | 170 - .../tbb30_104oss/src/tbb/tbb_assert_impl.h | 101 - .../bundled/tbb30_104oss/src/tbb/tbb_main.cpp | 253 -- .../bundled/tbb30_104oss/src/tbb/tbb_main.h | 106 - .../bundled/tbb30_104oss/src/tbb/tbb_misc.cpp | 230 -- .../bundled/tbb30_104oss/src/tbb/tbb_misc.h | 161 - .../tbb30_104oss/src/tbb/tbb_resource.rc | 126 - .../tbb30_104oss/src/tbb/tbb_statistics.cpp | 174 - .../tbb30_104oss/src/tbb/tbb_statistics.h | 204 - .../tbb30_104oss/src/tbb/tbb_thread.cpp | 172 - .../tbb30_104oss/src/tbb/tbb_version.h | 101 - deal.II/bundled/tbb30_104oss/src/tbb/tls.h | 119 - .../src/tbb/tools_api/disable_warnings.h | 47 - .../src/tbb/tools_api/internal/ittnotify.h | 661 ---- .../src/tbb/tools_api/ittnotify.h | 1409 ------- .../src/tbb/tools_api/ittnotify_config.h | 105 - .../src/tbb/tools_api/ittnotify_static.c | 640 ---- .../src/tbb/tools_api/ittnotify_static.h | 231 -- .../src/tbb/tools_api/ittnotify_types.h | 75 - .../src/tbb/tools_api/legacy/ittnotify.h | 817 ---- .../src/tbb/tools_api/prototype/ittnotify.h | 148 - .../tbb30_104oss/src/tbb/win32-tbb-export.def | 297 -- .../src/tbb/win64-gcc-tbb-export.def | 365 -- .../tbb30_104oss/src/tbb/win64-tbb-export.def | 293 -- .../src/tbb/xbox360-tbb-export.def | 234 -- .../tbb30_104oss/src/tbbmalloc/Customize.h | 128 - .../tbb30_104oss/src/tbbmalloc/LifoList.h | 106 - .../tbb30_104oss/src/tbbmalloc/MapMemory.h | 101 - .../tbb30_104oss/src/tbbmalloc/Statistics.h | 137 - .../src/tbbmalloc/TypeDefinitions.h | 105 - .../tbb30_104oss/src/tbbmalloc/backend.cpp | 275 -- .../tbb30_104oss/src/tbbmalloc/backref.cpp | 240 -- .../tbb30_104oss/src/tbbmalloc/frontend.cpp | 2057 ---------- .../src/tbbmalloc/large_objects.cpp | 272 -- .../src/tbbmalloc/lin-tbbmalloc-export.def | 70 - .../src/tbbmalloc/lin32-proxy-export.def | 59 - .../src/tbbmalloc/lin64-proxy-export.def | 59 - .../src/tbbmalloc/lin64ipf-proxy-export.def | 59 - .../src/tbbmalloc/mac32-tbbmalloc-export.def | 36 - .../src/tbbmalloc/mac64-tbbmalloc-export.def | 36 - .../tbb30_104oss/src/tbbmalloc/proxy.cpp | 472 --- .../tbb30_104oss/src/tbbmalloc/proxy.h | 72 - .../tbbmalloc/tbb_function_replacement.cpp | 476 --- .../src/tbbmalloc/tbb_function_replacement.h | 84 - .../tbb30_104oss/src/tbbmalloc/tbbmalloc.cpp | 221 -- .../tbb30_104oss/src/tbbmalloc/tbbmalloc.rc | 129 - .../src/tbbmalloc/tbbmalloc_internal.h | 279 -- .../tbbmalloc/win-gcc-tbbmalloc-export.def | 45 - .../src/tbbmalloc/win32-tbbmalloc-export.def | 42 - .../src/tbbmalloc/win64-tbbmalloc-export.def | 42 - .../tbbmalloc/xbox360-tbbmalloc-export.def | 42 - .../tbb41_20130401oss/src/CMakeLists.txt | 8 +- .../src/tbb/tools_api/ittnotify_config.h | 2 +- 286 files changed, 11 insertions(+), 67079 deletions(-) delete mode 100644 deal.II/bundled/tbb30_104oss/CHANGES delete mode 100644 deal.II/bundled/tbb30_104oss/COPYING delete mode 100644 deal.II/bundled/tbb30_104oss/README delete mode 100644 deal.II/bundled/tbb30_104oss/build/AIX.gcc.inc delete mode 100644 deal.II/bundled/tbb30_104oss/build/AIX.inc delete mode 100644 deal.II/bundled/tbb30_104oss/build/FreeBSD.gcc.inc delete mode 100644 deal.II/bundled/tbb30_104oss/build/FreeBSD.inc delete mode 100644 deal.II/bundled/tbb30_104oss/build/Makefile.rml delete mode 100644 deal.II/bundled/tbb30_104oss/build/Makefile.tbb delete mode 100644 deal.II/bundled/tbb30_104oss/build/Makefile.tbbmalloc delete mode 100644 deal.II/bundled/tbb30_104oss/build/Makefile.test delete mode 100644 deal.II/bundled/tbb30_104oss/build/SunOS.gcc.inc delete mode 100644 deal.II/bundled/tbb30_104oss/build/SunOS.inc delete mode 100644 deal.II/bundled/tbb30_104oss/build/SunOS.suncc.inc delete mode 100644 deal.II/bundled/tbb30_104oss/build/codecov.txt delete mode 100644 deal.II/bundled/tbb30_104oss/build/common.inc delete mode 100644 deal.II/bundled/tbb30_104oss/build/common_rules.inc delete mode 100644 deal.II/bundled/tbb30_104oss/build/detect.js delete mode 100644 deal.II/bundled/tbb30_104oss/build/generate_tbbvars.bat delete mode 100644 deal.II/bundled/tbb30_104oss/build/generate_tbbvars.sh delete mode 100644 deal.II/bundled/tbb30_104oss/build/index.html delete mode 100644 deal.II/bundled/tbb30_104oss/build/linux.gcc.inc delete mode 100644 deal.II/bundled/tbb30_104oss/build/linux.icc.inc delete mode 100644 deal.II/bundled/tbb30_104oss/build/linux.inc delete mode 100644 deal.II/bundled/tbb30_104oss/build/macos.gcc.inc delete mode 100644 deal.II/bundled/tbb30_104oss/build/macos.icc.inc delete mode 100644 deal.II/bundled/tbb30_104oss/build/macos.inc delete mode 100644 deal.II/bundled/tbb30_104oss/build/suncc.map.pause delete mode 100644 deal.II/bundled/tbb30_104oss/build/test_launcher.bat delete mode 100644 deal.II/bundled/tbb30_104oss/build/test_launcher.sh delete mode 100644 deal.II/bundled/tbb30_104oss/build/version_info_aix.sh delete mode 100644 deal.II/bundled/tbb30_104oss/build/version_info_linux.sh delete mode 100644 deal.II/bundled/tbb30_104oss/build/version_info_macos.sh delete mode 100644 deal.II/bundled/tbb30_104oss/build/version_info_sunos.sh delete mode 100644 deal.II/bundled/tbb30_104oss/build/version_info_windows.js delete mode 100644 deal.II/bundled/tbb30_104oss/build/vsproject/index.html delete mode 100644 deal.II/bundled/tbb30_104oss/build/vsproject/makefile.sln delete mode 100644 deal.II/bundled/tbb30_104oss/build/vsproject/tbb.vcproj delete mode 100644 deal.II/bundled/tbb30_104oss/build/vsproject/tbbmalloc.vcproj delete mode 100644 deal.II/bundled/tbb30_104oss/build/vsproject/tbbmalloc_proxy.vcproj delete mode 100644 deal.II/bundled/tbb30_104oss/build/vsproject/version_string.tmp delete mode 100644 deal.II/bundled/tbb30_104oss/build/windows.cl.inc delete mode 100644 deal.II/bundled/tbb30_104oss/build/windows.gcc.inc delete mode 100644 deal.II/bundled/tbb30_104oss/build/windows.icl.inc delete mode 100644 deal.II/bundled/tbb30_104oss/build/windows.inc delete mode 100644 deal.II/bundled/tbb30_104oss/build/xbox360.cl.inc delete mode 100644 deal.II/bundled/tbb30_104oss/build/xbox360.inc delete mode 100644 deal.II/bundled/tbb30_104oss/include/index.html delete mode 100644 deal.II/bundled/tbb30_104oss/include/tbb/_concurrent_queue_internal.h delete mode 100644 deal.II/bundled/tbb30_104oss/include/tbb/_concurrent_unordered_internal.h delete mode 100644 deal.II/bundled/tbb30_104oss/include/tbb/_tbb_windef.h delete mode 100644 deal.II/bundled/tbb30_104oss/include/tbb/aligned_space.h delete mode 100644 deal.II/bundled/tbb30_104oss/include/tbb/atomic.h delete mode 100644 deal.II/bundled/tbb30_104oss/include/tbb/blocked_range.h delete mode 100644 deal.II/bundled/tbb30_104oss/include/tbb/blocked_range2d.h delete mode 100644 deal.II/bundled/tbb30_104oss/include/tbb/blocked_range3d.h delete mode 100644 deal.II/bundled/tbb30_104oss/include/tbb/cache_aligned_allocator.h delete mode 100644 deal.II/bundled/tbb30_104oss/include/tbb/combinable.h delete mode 100644 deal.II/bundled/tbb30_104oss/include/tbb/compat/condition_variable delete mode 100644 deal.II/bundled/tbb30_104oss/include/tbb/compat/ppl.h delete mode 100644 deal.II/bundled/tbb30_104oss/include/tbb/compat/thread delete mode 100644 deal.II/bundled/tbb30_104oss/include/tbb/concurrent_hash_map.h delete mode 100644 deal.II/bundled/tbb30_104oss/include/tbb/concurrent_queue.h delete mode 100644 deal.II/bundled/tbb30_104oss/include/tbb/concurrent_unordered_map.h delete mode 100644 deal.II/bundled/tbb30_104oss/include/tbb/concurrent_vector.h delete mode 100644 deal.II/bundled/tbb30_104oss/include/tbb/critical_section.h delete mode 100644 deal.II/bundled/tbb30_104oss/include/tbb/enumerable_thread_specific.h delete mode 100644 deal.II/bundled/tbb30_104oss/include/tbb/index.html delete mode 100644 deal.II/bundled/tbb30_104oss/include/tbb/machine/ibm_aix51.h delete mode 100644 deal.II/bundled/tbb30_104oss/include/tbb/machine/linux_common.h delete mode 100644 deal.II/bundled/tbb30_104oss/include/tbb/machine/linux_ia32.h delete mode 100644 deal.II/bundled/tbb30_104oss/include/tbb/machine/linux_ia64.h delete mode 100644 deal.II/bundled/tbb30_104oss/include/tbb/machine/linux_intel64.h delete mode 100644 deal.II/bundled/tbb30_104oss/include/tbb/machine/mac_ppc.h delete mode 100644 deal.II/bundled/tbb30_104oss/include/tbb/machine/macos_common.h delete mode 100644 deal.II/bundled/tbb30_104oss/include/tbb/machine/sunos_sparc.h delete mode 100644 deal.II/bundled/tbb30_104oss/include/tbb/machine/windows_api.h delete mode 100644 deal.II/bundled/tbb30_104oss/include/tbb/machine/windows_ia32.h delete mode 100644 deal.II/bundled/tbb30_104oss/include/tbb/machine/windows_intel64.h delete mode 100644 deal.II/bundled/tbb30_104oss/include/tbb/machine/xbox360_ppc.h delete mode 100644 deal.II/bundled/tbb30_104oss/include/tbb/mutex.h delete mode 100644 deal.II/bundled/tbb30_104oss/include/tbb/null_mutex.h delete mode 100644 deal.II/bundled/tbb30_104oss/include/tbb/null_rw_mutex.h delete mode 100644 deal.II/bundled/tbb30_104oss/include/tbb/parallel_do.h delete mode 100644 deal.II/bundled/tbb30_104oss/include/tbb/parallel_for.h delete mode 100644 deal.II/bundled/tbb30_104oss/include/tbb/parallel_for_each.h delete mode 100644 deal.II/bundled/tbb30_104oss/include/tbb/parallel_invoke.h delete mode 100644 deal.II/bundled/tbb30_104oss/include/tbb/parallel_reduce.h delete mode 100644 deal.II/bundled/tbb30_104oss/include/tbb/parallel_scan.h delete mode 100644 deal.II/bundled/tbb30_104oss/include/tbb/parallel_sort.h delete mode 100644 deal.II/bundled/tbb30_104oss/include/tbb/parallel_while.h delete mode 100644 deal.II/bundled/tbb30_104oss/include/tbb/partitioner.h delete mode 100644 deal.II/bundled/tbb30_104oss/include/tbb/pipeline.h delete mode 100644 deal.II/bundled/tbb30_104oss/include/tbb/queuing_mutex.h delete mode 100644 deal.II/bundled/tbb30_104oss/include/tbb/queuing_rw_mutex.h delete mode 100644 deal.II/bundled/tbb30_104oss/include/tbb/reader_writer_lock.h delete mode 100644 deal.II/bundled/tbb30_104oss/include/tbb/recursive_mutex.h delete mode 100644 deal.II/bundled/tbb30_104oss/include/tbb/scalable_allocator.h delete mode 100644 deal.II/bundled/tbb30_104oss/include/tbb/spin_mutex.h delete mode 100644 deal.II/bundled/tbb30_104oss/include/tbb/spin_rw_mutex.h delete mode 100644 deal.II/bundled/tbb30_104oss/include/tbb/task.h delete mode 100644 deal.II/bundled/tbb30_104oss/include/tbb/task_group.h delete mode 100644 deal.II/bundled/tbb30_104oss/include/tbb/task_scheduler_init.h delete mode 100644 deal.II/bundled/tbb30_104oss/include/tbb/task_scheduler_observer.h delete mode 100644 deal.II/bundled/tbb30_104oss/include/tbb/tbb.h delete mode 100644 deal.II/bundled/tbb30_104oss/include/tbb/tbb_allocator.h delete mode 100644 deal.II/bundled/tbb30_104oss/include/tbb/tbb_config.h delete mode 100644 deal.II/bundled/tbb30_104oss/include/tbb/tbb_exception.h delete mode 100644 deal.II/bundled/tbb30_104oss/include/tbb/tbb_machine.h delete mode 100644 deal.II/bundled/tbb30_104oss/include/tbb/tbb_profiling.h delete mode 100644 deal.II/bundled/tbb30_104oss/include/tbb/tbb_stddef.h delete mode 100644 deal.II/bundled/tbb30_104oss/include/tbb/tbb_thread.h delete mode 100644 deal.II/bundled/tbb30_104oss/include/tbb/tbbmalloc_proxy.h delete mode 100644 deal.II/bundled/tbb30_104oss/include/tbb/tick_count.h delete mode 100644 deal.II/bundled/tbb30_104oss/index.html delete mode 100644 deal.II/bundled/tbb30_104oss/src/CMakeLists.txt delete mode 100644 deal.II/bundled/tbb30_104oss/src/index.html delete mode 100644 deal.II/bundled/tbb30_104oss/src/old/concurrent_queue_v2.cpp delete mode 100644 deal.II/bundled/tbb30_104oss/src/old/concurrent_queue_v2.h delete mode 100644 deal.II/bundled/tbb30_104oss/src/old/concurrent_vector_v2.cpp delete mode 100644 deal.II/bundled/tbb30_104oss/src/old/concurrent_vector_v2.h delete mode 100644 deal.II/bundled/tbb30_104oss/src/old/spin_rw_mutex_v2.cpp delete mode 100644 deal.II/bundled/tbb30_104oss/src/old/spin_rw_mutex_v2.h delete mode 100644 deal.II/bundled/tbb30_104oss/src/old/task_v2.cpp delete mode 100644 deal.II/bundled/tbb30_104oss/src/old/test_concurrent_queue_v2.cpp delete mode 100644 deal.II/bundled/tbb30_104oss/src/old/test_concurrent_vector_v2.cpp delete mode 100644 deal.II/bundled/tbb30_104oss/src/old/test_mutex_v2.cpp delete mode 100644 deal.II/bundled/tbb30_104oss/src/perf/fibonacci_cutoff.cpp delete mode 100644 deal.II/bundled/tbb30_104oss/src/perf/fibonacci_impl_tbb.cpp delete mode 100644 deal.II/bundled/tbb30_104oss/src/perf/perf.cpp delete mode 100644 deal.II/bundled/tbb30_104oss/src/perf/perf.h delete mode 100644 deal.II/bundled/tbb30_104oss/src/perf/perf_sched.cpp delete mode 100644 deal.II/bundled/tbb30_104oss/src/perf/run_statistics.sh delete mode 100644 deal.II/bundled/tbb30_104oss/src/perf/statistics.cpp delete mode 100644 deal.II/bundled/tbb30_104oss/src/perf/statistics.h delete mode 100644 deal.II/bundled/tbb30_104oss/src/perf/statistics_xml.h delete mode 100644 deal.II/bundled/tbb30_104oss/src/perf/time_framework.h delete mode 100644 deal.II/bundled/tbb30_104oss/src/perf/time_hash_map.cpp delete mode 100644 deal.II/bundled/tbb30_104oss/src/perf/time_hash_map_fill.cpp delete mode 100644 deal.II/bundled/tbb30_104oss/src/perf/time_locked_work.cpp delete mode 100644 deal.II/bundled/tbb30_104oss/src/perf/time_sandbox.h delete mode 100644 deal.II/bundled/tbb30_104oss/src/perf/time_vector.cpp delete mode 100644 deal.II/bundled/tbb30_104oss/src/rml/client/index.html delete mode 100644 deal.II/bundled/tbb30_104oss/src/rml/client/library_assert.h delete mode 100644 deal.II/bundled/tbb30_104oss/src/rml/client/omp_dynamic_link.cpp delete mode 100644 deal.II/bundled/tbb30_104oss/src/rml/client/omp_dynamic_link.h delete mode 100644 deal.II/bundled/tbb30_104oss/src/rml/client/rml_factory.h delete mode 100644 deal.II/bundled/tbb30_104oss/src/rml/client/rml_omp.cpp delete mode 100644 deal.II/bundled/tbb30_104oss/src/rml/client/rml_tbb.cpp delete mode 100644 deal.II/bundled/tbb30_104oss/src/rml/include/index.html delete mode 100644 deal.II/bundled/tbb30_104oss/src/rml/include/rml_base.h delete mode 100644 deal.II/bundled/tbb30_104oss/src/rml/include/rml_omp.h delete mode 100644 deal.II/bundled/tbb30_104oss/src/rml/include/rml_tbb.h delete mode 100644 deal.II/bundled/tbb30_104oss/src/rml/index.html delete mode 100644 deal.II/bundled/tbb30_104oss/src/rml/perfor/omp_nested.cpp delete mode 100644 deal.II/bundled/tbb30_104oss/src/rml/perfor/omp_simple.cpp delete mode 100644 deal.II/bundled/tbb30_104oss/src/rml/perfor/tbb_multi_omp.cpp delete mode 100644 deal.II/bundled/tbb30_104oss/src/rml/perfor/tbb_simple.cpp delete mode 100644 deal.II/bundled/tbb30_104oss/src/rml/perfor/thread_level.h delete mode 100644 deal.II/bundled/tbb30_104oss/src/rml/server/index.html delete mode 100644 deal.II/bundled/tbb30_104oss/src/rml/server/irml.rc delete mode 100644 deal.II/bundled/tbb30_104oss/src/rml/server/job_automaton.h delete mode 100644 deal.II/bundled/tbb30_104oss/src/rml/server/lin-rml-export.def delete mode 100644 deal.II/bundled/tbb30_104oss/src/rml/server/rml_server.cpp delete mode 100644 deal.II/bundled/tbb30_104oss/src/rml/server/thread_monitor.h delete mode 100644 deal.II/bundled/tbb30_104oss/src/rml/server/wait_counter.h delete mode 100644 deal.II/bundled/tbb30_104oss/src/rml/server/win32-rml-export.def delete mode 100644 deal.II/bundled/tbb30_104oss/src/rml/server/win64-rml-export.def delete mode 100644 deal.II/bundled/tbb30_104oss/src/rml/test/rml_omp_stub.cpp delete mode 100644 deal.II/bundled/tbb30_104oss/src/rml/test/test_job_automaton.cpp delete mode 100644 deal.II/bundled/tbb30_104oss/src/rml/test/test_rml_mixed.cpp delete mode 100644 deal.II/bundled/tbb30_104oss/src/rml/test/test_rml_omp.cpp delete mode 100644 deal.II/bundled/tbb30_104oss/src/rml/test/test_rml_omp_c_linkage.c delete mode 100644 deal.II/bundled/tbb30_104oss/src/rml/test/test_rml_tbb.cpp delete mode 100644 deal.II/bundled/tbb30_104oss/src/rml/test/test_server.h delete mode 100644 deal.II/bundled/tbb30_104oss/src/rml/test/test_thread_monitor.cpp delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/arena.cpp delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/arena.h delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/cache_aligned_allocator.cpp delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/cilk-tbb-interop.h delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/concurrent_hash_map.cpp delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/concurrent_monitor.cpp delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/concurrent_monitor.h delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/concurrent_queue.cpp delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/concurrent_vector.cpp delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/condition_variable.cpp delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/critical_section.cpp delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/custom_scheduler.h delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/dynamic_link.cpp delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/dynamic_link.h delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/governor.cpp delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/governor.h delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/ia32-masm/atomic_support.asm delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/ia32-masm/lock_byte.asm delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/ia64-gas/atomic_support.s delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/ia64-gas/ia64_misc.s delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/ia64-gas/lock_byte.s delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/ia64-gas/log2.s delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/ia64-gas/pause.s delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/ibm_aix51/atomic_support.c delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/index.html delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/intel64-masm/atomic_support.asm delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/intrusive_list.h delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/itt_notify.cpp delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/itt_notify.h delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/lin32-tbb-export.def delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/lin64-tbb-export.def delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/lin64ipf-tbb-export.def delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/mac32-tbb-export.def delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/mac64-tbb-export.def delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/mailbox.h delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/market.cpp delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/market.h delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/mutex.cpp delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/observer_proxy.cpp delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/observer_proxy.h delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/pipeline.cpp delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/private_server.cpp delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/queuing_mutex.cpp delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/queuing_rw_mutex.cpp delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/reader_writer_lock.cpp delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/recursive_mutex.cpp delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/scheduler.cpp delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/scheduler.h delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/scheduler_common.h delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/scheduler_utility.h delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/semaphore.h delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/spin_mutex.cpp delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/spin_rw_mutex.cpp delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/task.cpp delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/task_group_context.cpp delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/task_stream.h delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/tbb_assert_impl.h delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/tbb_main.cpp delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/tbb_main.h delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/tbb_misc.cpp delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/tbb_misc.h delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/tbb_resource.rc delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/tbb_statistics.cpp delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/tbb_statistics.h delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/tbb_thread.cpp delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/tbb_version.h delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/tls.h delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/tools_api/disable_warnings.h delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/tools_api/internal/ittnotify.h delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/tools_api/ittnotify.h delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/tools_api/ittnotify_config.h delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/tools_api/ittnotify_static.c delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/tools_api/ittnotify_static.h delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/tools_api/ittnotify_types.h delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/tools_api/legacy/ittnotify.h delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/tools_api/prototype/ittnotify.h delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/win32-tbb-export.def delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/win64-gcc-tbb-export.def delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/win64-tbb-export.def delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbb/xbox360-tbb-export.def delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbbmalloc/Customize.h delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbbmalloc/LifoList.h delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbbmalloc/MapMemory.h delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbbmalloc/Statistics.h delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbbmalloc/TypeDefinitions.h delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbbmalloc/backend.cpp delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbbmalloc/backref.cpp delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbbmalloc/frontend.cpp delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbbmalloc/large_objects.cpp delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbbmalloc/lin-tbbmalloc-export.def delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbbmalloc/lin32-proxy-export.def delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbbmalloc/lin64-proxy-export.def delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbbmalloc/lin64ipf-proxy-export.def delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbbmalloc/mac32-tbbmalloc-export.def delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbbmalloc/mac64-tbbmalloc-export.def delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbbmalloc/proxy.cpp delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbbmalloc/proxy.h delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbbmalloc/tbb_function_replacement.cpp delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbbmalloc/tbb_function_replacement.h delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbbmalloc/tbbmalloc.cpp delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbbmalloc/tbbmalloc.rc delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbbmalloc/tbbmalloc_internal.h delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbbmalloc/win-gcc-tbbmalloc-export.def delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbbmalloc/win32-tbbmalloc-export.def delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbbmalloc/win64-tbbmalloc-export.def delete mode 100644 deal.II/bundled/tbb30_104oss/src/tbbmalloc/xbox360-tbbmalloc-export.def diff --git a/deal.II/bundled/CMakeLists.txt b/deal.II/bundled/CMakeLists.txt index dde445cb75..44418f02d4 100644 --- a/deal.II/bundled/CMakeLists.txt +++ b/deal.II/bundled/CMakeLists.txt @@ -68,13 +68,13 @@ ENDIF() # IF(NOT CMAKE_SYSTEM_NAME MATCHES "CYGWIN") # - # Cygwin is currently unsupported by tbb + # Cygwin is unsupported by tbb # SET(FEATURE_THREADS_HAVE_BUNDLED TRUE) OPTION(DEAL_II_FORCE_BUNDLED_THREADS "Always use the bundled tbb library instead of an external one." OFF) - SET(TBB_FOLDER "${CMAKE_SOURCE_DIR}/bundled/tbb30_104oss") + SET(TBB_FOLDER "${CMAKE_SOURCE_DIR}/bundled/tbb41_20130401oss") ENDIF() diff --git a/deal.II/bundled/functionparser/CMakeLists.txt b/deal.II/bundled/functionparser/CMakeLists.txt index ed9c017b31..45afe46683 100644 --- a/deal.II/bundled/functionparser/CMakeLists.txt +++ b/deal.II/bundled/functionparser/CMakeLists.txt @@ -14,7 +14,7 @@ # # Remove -Wall and -pedantic from CMAKE_CXX_FLAGS (in directory scope) to -# aboid some annoying warnings... +# avoid some annoying warnings... # STRIP_FLAG(CMAKE_CXX_FLAGS "-Wall") STRIP_FLAG(CMAKE_CXX_FLAGS "-pedantic") diff --git a/deal.II/bundled/tbb30_104oss/CHANGES b/deal.II/bundled/tbb30_104oss/CHANGES deleted file mode 100644 index 4a17d934fb..0000000000 --- a/deal.II/bundled/tbb30_104oss/CHANGES +++ /dev/null @@ -1,973 +0,0 @@ -TBB 3.0 Update 3 commercial-aligned release - -Changes (w.r.t. TBB 3.0 Update 2 commercial-aligned release): - -- cache_aligned_allocator class reworked to use scalable_aligned_malloc. -- Improved performance of count() and equal_range() methods - in concurrent_unordered_map. -- Improved implementation of 64-bit atomic loads and stores on 32-bit - platforms, including compilation with VC 7.1. -- Added implementation of atomic operations on top of OSAtomic API - provided by Mac OS* X. -- Fixed a data race in task scheduler destruction that on rare occasion - could result in memory corruption. -- Removed gratuitous try/catch blocks surrounding thread function calls - in tbb_thread. -- Xcode* projects were added for sudoku and game_of_life examples. -- Xcode* projects were updated to work without TBB framework. - -Open-source contributions integrated: - -- MinGW-64 basic support by brsomoza (partially). -- Patch for atomic.h by Andrey Semashev. -- Support for AIX & GCC on PowerPC by Giannis Papadopoulos. -- Various improvements by Raf Schietekat. - ------------------------------------------------------------------------- -TBB 3.0 Update 2 commercial-aligned release - -Changes (w.r.t. TBB 3.0 Update 1 commercial-aligned release): - -- Destructor of tbb::task_group class throws missing_wait exception - if there are tasks running when it is invoked. -- Cilk-TBB interop layer added to protect TBB TLS in case of - "Cilk-TBB-Cilk nesting" usage model. -- Compilation fix for dependent template names in concurrent_queue. -- Memory allocator code refactored to ease development and maintenance. - -Bug Fixes: -- Improved interoperability with other Intel software tools on Linux in - case of dynamic replacement of memory allocator (1700) -- Fixed install issues that prevented installation on - Mac OS* X 10.6.4 (1711). - ------------------------------------------------------------------------- -TBB 3.0 Update 1 commercial-aligned release - -Changes (w.r.t. TBB 3.0 commercial-aligned release): - -- Decreased memory fragmentation by allocations bigger than 8K. -- Lazily allocate worker threads, to avoid creating unnecessary stacks. - -Bugs fixed: - -- TBB allocator used much more memory than malloc (1703) - see above. -- Deadlocks happened in some specific initialization scenarios - of the TBB allocator (1701, 1704). -- Regression in enumerable_thread_specific: excessive requirements - for object constructors. -- A bug in construction of parallel_pipeline filters when body instance - was a temporary object. -- Incorrect usage of memory fences on PowerPC and XBOX360 platforms. -- A subtle issue in task group context binding that could result - in cancelation signal being missed by nested task groups. -- Incorrect construction of concurrent_unordered_map if specified - number of buckets is not power of two. -- Broken count() and equal_range() of concurrent_unordered_map. -- Return type of postfix form of operator++ for hash map's iterators. - ------------------------------------------------------------------------- -TBB 3.0 commercial-aligned release - -Changes (w.r.t. TBB 2.2 Update 3 commercial-aligned release): - -- All open-source-release changes down to TBB 2.2 U3 below - were incorporated into this release. - ------------------------------------------------------------------------- -20100406 open-source release - -Changes (w.r.t. 20100310 open-source release): - -- Added support for Microsoft* Visual Studio* 2010, including binaries. -- Added a PDF file with recommended Design Patterns for TBB. -- Added parallel_pipeline function and companion classes and functions - that provide a strongly typed lambda-friendly pipeline interface. -- Reworked enumerable_thread_specific to use a custom implementation of - hash map that is more efficient for ETS usage models. -- Added example for class task_group; see examples/task_group/sudoku. -- Removed two examples, as they were long outdated and superceded: - pipeline/text_filter (use pipeline/square); - parallel_while/parallel_preorder (use parallel_do/parallel_preorder). -- PDF documentation updated. -- Other fixes and changes in code, tests, and examples. - -Bugs fixed: - -- Eliminated build errors with MinGW32. -- Fixed post-build step and other issues in VS projects for examples. -- Fixed discrepancy between scalable_realloc and scalable_msize that - caused crashes with malloc replacement on Windows. - ------------------------------------------------------------------------- -20100310 open-source release - -Changes (w.r.t. TBB 2.2 Update 3 commercial-aligned release): - -- Version macros changed in anticipation of a future release. -- Directory structure aligned with Intel(R) C++ Compiler; - now TBB binaries reside in //[bin|lib] - (in TBB 2.x, it was [bin|lib]//). -- Visual Studio projects changed for examples: instead of separate set - of files for each VS version, now there is single 'msvs' directory - that contains workspaces for MS C++ compiler (_cl.sln) and - Intel C++ compiler (_icl.sln). Works with VS 2005 and above. -- The name versioning scheme for backward compatibility was improved; - now compatibility-breaking changes are done in a separate namespace. -- Added concurrent_unordered_map implementation based on a prototype - developed in Microsoft for a future version of PPL. -- Added PPL-compatible writer-preference RW lock (reader_writer_lock). -- Added TBB_IMPLEMENT_CPP0X macro to control injection of C++0x names - implemented in TBB into namespace std. -- Added almost-C++0x-compatible std::condition_variable, plus a bunch - of other C++0x classes required by condition_variable. -- With TBB_IMPLEMENT_CPP0X, tbb_thread can be also used as std::thread. -- task.cpp was split into several translation units to structure - TBB scheduler sources layout. Static data layout and library - initialization logic were also updated. -- TBB scheduler reworked to prevent master threads from stealing - work belonging to other masters. -- Class task was extended with enqueue() method, and slightly changed - semantics of methods spawn() and destroy(). For exact semantics, - refer to TBB Reference manual. -- task_group_context now allows for destruction by non-owner threads. -- Added TBB_USE_EXCEPTIONS macro to control use of exceptions in TBB - headers. It turns off (i.e. sets to 0) automatically if specified - compiler options disable exception handling. -- TBB is enabled to run on top of Microsoft's Concurrency Runtime - on Windows* 7 (via our worker dispatcher known as RML). -- Removed old unused busy-waiting code in concurrent_queue. -- Described the advanced build & test options in src/index.html. -- Warning level for GCC raised with -Wextra and a few other options. -- Multiple fixes and improvements in code, tests, examples, and docs. - -Open-source contributions integrated: - -- Xbox support by Roman Lut (Deep Shadows), though further changes are - required to make it working; e.g. post-2.1 entry points are missing. -- "Eventcount" by Dmitry Vyukov evolved into concurrent_monitor, - an internal class used in the implementation of concurrent_queue. - ------------------------------------------------------------------------- -TBB 2.2 Update 3 commercial-aligned release - -Changes (w.r.t. TBB 2.2 Update 2 commercial-aligned release): - -- PDF documentation updated. - -Bugs fixed: - -- concurrent_hash_map compatibility issue exposed on Linux in case - two versions of the container were used by different modules. -- enforce 16 byte stack alignment for consistence with GCC; required - to work correctly with 128-bit variables processed by SSE. -- construct() methods of allocator classes now use global operator new. - ------------------------------------------------------------------------- -TBB 2.2 Update 2 commercial-aligned release - -Changes (w.r.t. TBB 2.2 Update 1 commercial-aligned release): - -- parallel_invoke and parallel_for_each now take function objects - by const reference, not by value. -- Building TBB with /MT is supported, to avoid dependency on particular - versions of Visual C++* runtime DLLs. TBB DLLs built with /MT - are located in vc_mt directory. -- Class critical_section introduced. -- Improvements in exception support: new exception classes introduced, - all exceptions are thrown via an out-of-line internal method. -- Improvements and fixes in the TBB allocator and malloc replacement, - including robust memory identification, and more reliable dynamic - function substitution on Windows*. -- Method swap() added to class tbb_thread. -- Methods rehash() and bucket_count() added to concurrent_hash_map. -- Added support for Visual Studio* 2010 Beta2. No special binaries - provided, but CRT-independent DLLs (vc_mt) should work. -- Other fixes and improvements in code, tests, examples, and docs. - -Open-source contributions integrated: - -- The fix to build 32-bit TBB on Mac OS* X 10.6. -- GCC-based port for SPARC Solaris by Michailo Matijkiw, with use of - earlier work by Raf Schietekat. - -Bugs fixed: - -- 159 - TBB build for PowerPC* running Mac OS* X. -- 160 - IBM* Java segfault if used with TBB allocator. -- crash in concurrent_queue (1616). - ------------------------------------------------------------------------- -TBB 2.2 Update 1 commercial-aligned release - -Changes (w.r.t. TBB 2.2 commercial-aligned release): - -- Incorporates all changes from open-source releases below. -- Documentation was updated. -- TBB scheduler auto-initialization now covers all possible use cases. -- concurrent_queue: made argument types of sizeof used in paddings - consistent with those actually used. -- Memory allocator was improved: supported corner case of user's malloc - calling scalable_malloc (non-Windows), corrected processing of - memory allocation requests during tbb memory allocator startup - (Linux). -- Windows malloc replacement has got better support for static objects. -- In pipeline setups that do not allow actual parallelism, execution - by a single thread is guaranteed, idle spinning eliminated, and - performance improved. -- RML refactoring and clean-up. -- New constructor for concurrent_hash_map allows reserving space for - a number of items. -- Operator delete() added to the TBB exception classes. -- Lambda support was improved in parallel_reduce. -- gcc 4.3 warnings were fixed for concurrent_queue. -- Fixed possible initialization deadlock in modules using TBB entities - during construction of global static objects. -- Copy constructor in concurrent_hash_map was fixed. -- Fixed a couple of rare crashes in the scheduler possible before - in very specific use cases. -- Fixed a rare crash in the TBB allocator running out of memory. -- New tests were implemented, including test_lambda.cpp that checks - support for lambda expressions. -- A few other small changes in code, tests, and documentation. - ------------------------------------------------------------------------- -20090809 open-source release - -Changes (w.r.t. TBB 2.2 commercial-aligned release): - -- Fixed known exception safety issues in concurrent_vector. -- Better concurrency of simultaneous grow requests in concurrent_vector. -- TBB allocator further improves performance of large object allocation. -- Problem with source of text relocations was fixed on Linux -- Fixed bugs related to malloc replacement under Windows -- A few other small changes in code and documentation. - ------------------------------------------------------------------------- -TBB 2.2 commercial-aligned release - -Changes (w.r.t. TBB 2.1 U4 commercial-aligned release): - -- Incorporates all changes from open-source releases below. -- Architecture folders renamed from em64t to intel64 and from itanium - to ia64. -- Major Interface version changed from 3 to 4. Deprecated interfaces - might be removed in future releases. -- Parallel algorithms that use partitioners have switched to use - the auto_partitioner by default. -- Improved memory allocator performance for allocations bigger than 8K. -- Added new thread-bound filters functionality for pipeline. -- New implementation of concurrent_hash_map that improves performance - significantly. -- A few other small changes in code and documentation. - ------------------------------------------------------------------------- -20090511 open-source release - -Changes (w.r.t. previous open-source release): - -- Basic support for MinGW32 development kit. -- Added tbb::zero_allocator class that initializes memory with zeros. - It can be used as an adaptor to any STL-compatible allocator class. -- Added tbb::parallel_for_each template function as alias to parallel_do. -- Added more overloads for tbb::parallel_for. -- Added support for exact exception propagation (can only be used with - compilers that support C++0x std::exception_ptr). -- tbb::atomic template class can be used with enumerations. -- mutex, recursive_mutex, spin_mutex, spin_rw_mutex classes extended - with explicit lock/unlock methods. -- Fixed size() and grow_to_at_least() methods of tbb::concurrent_vector - to provide space allocation guarantees. More methods added for - compatibility with std::vector, including some from C++0x. -- Preview of a lambda-friendly interface for low-level use of tasks. -- scalable_msize function added to the scalable allocator (Windows only). -- Rationalized internal auxiliary functions for spin-waiting and backoff. -- Several tests undergo decent refactoring. - -Changes affecting backward compatibility: - -- Improvements in concurrent_queue, including limited API changes. - The previous version is deprecated; its functionality is accessible - via methods of the new tbb::concurrent_bounded_queue class. -- grow* and push_back methods of concurrent_vector changed to return - iterators; old semantics is deprecated. - ------------------------------------------------------------------------- -TBB 2.1 Update 4 commercial-aligned release - -Changes (w.r.t. TBB 2.1 U3 commercial-aligned release): - -- Added tests for aligned memory allocations and malloc replacement. -- Several improvements for better bundling with Intel(R) C++ Compiler. -- A few other small changes in code and documentaion. - -Bugs fixed: - -- 150 - request to build TBB examples with debug info in release mode. -- backward compatibility issue with concurrent_queue on Windows. -- dependency on VS 2005 SP1 runtime libraries removed. -- compilation of GUI examples under Xcode* 3.1 (1577). -- On Windows, TBB allocator classes can be instantiated with const types - for compatibility with MS implementation of STL containers (1566). - ------------------------------------------------------------------------- -20090313 open-source release - -Changes (w.r.t. 20081109 open-source release): - -- Includes all changes introduced in TBB 2.1 Update 2 & Update 3 - commercial-aligned releases (see below for details). -- Added tbb::parallel_invoke template function. It runs up to 10 - user-defined functions in parallel and waits for them to complete. -- Added a special library providing ability to replace the standard - memory allocation routines in Microsoft* C/C++ RTL (malloc/free, - global new/delete, etc.) with the TBB memory allocator. - Usage details are described in include/tbb/tbbmalloc_proxy.h file. -- Task scheduler switched to use new implementation of its core - functionality (deque based task pool, new structure of arena slots). -- Preview of Microsoft* Visual Studio* 2005 project files for - building the library is available in build/vsproject folder. -- Added tests for aligned memory allocations and malloc replacement. -- Added parallel_for/game_of_life.net example (for Windows only) - showing TBB usage in a .NET application. -- A number of other fixes and improvements to code, tests, makefiles, - examples and documents. - -Bugs fixed: - -- The same list as in TBB 2.1 Update 4 right above. - ------------------------------------------------------------------------- -TBB 2.1 Update 3 commercial-aligned release - -Changes (w.r.t. TBB 2.1 U2 commercial-aligned release): - -- Added support for aligned allocations to the TBB memory allocator. -- Added a special library to use with LD_PRELOAD on Linux* in order to - replace the standard memory allocation routines in C/C++ with the - TBB memory allocator. -- Added null_mutex and null_rw_mutex: no-op classes interface-compliant - to other TBB mutexes. -- Improved performance of parallel_sort, to close most of the serial gap - with std::sort, and beat it on 2 and more cores. -- A few other small changes. - -Bugs fixed: - -- the problem where parallel_for hanged after exception throw - if affinity_partitioner was used (1556). -- get rid of VS warnings about mbstowcs deprecation (1560), - as well as some other warnings. -- operator== for concurrent_vector::iterator fixed to work correctly - with different vector instances. - ------------------------------------------------------------------------- -TBB 2.1 Update 2 commercial-aligned release - -Changes (w.r.t. TBB 2.1 U1 commercial-aligned release): - -- Incorporates all open-source-release changes down to TBB 2.1 U1, - except for: - - 20081019 addition of enumerable_thread_specific; -- Warning level for Microsoft* Visual C++* compiler raised to /W4 /Wp64; - warnings found on this level were cleaned or suppressed. -- Added TBB_runtime_interface_version API function. -- Added new example: pipeline/square. -- Added exception handling and cancellation support - for parallel_do and pipeline. -- Added copy constructor and [begin,end) constructor to concurrent_queue. -- Added some support for beta version of Intel(R) Parallel Amplifier. -- Added scripts to set environment for cross-compilation of 32-bit - applications on 64-bit Linux with Intel(R) C++ Compiler. -- Fixed semantics of concurrent_vector::clear() to not deallocate - internal arrays. Fixed compact() to perform such deallocation later. -- Fixed the issue with atomic when T is incomplete type. -- Improved support for PowerPC* Macintosh*, including the fix - for a bug in masked compare-and-swap reported by a customer. -- As usual, a number of other improvements everywhere. - ------------------------------------------------------------------------- -20081109 open-source release - -Changes (w.r.t. previous open-source release): - -- Added new serial out of order filter for tbb::pipeline. -- Fixed the issue with atomic::operator= reported at the forum. -- Fixed the issue with using tbb::task::self() in task destructor - reported at the forum. -- A number of other improvements to code, tests, makefiles, examples - and documents. - -Open-source contributions integrated: -- Changes in the memory allocator were partially integrated. - ------------------------------------------------------------------------- -20081019 open-source release - -Changes (w.r.t. previous open-source release): - -- Introduced enumerable_thread_specific. This new class provides a - wrapper around native thread local storage as well as iterators and - ranges for accessing the thread local copies (1533). -- Improved support for Intel(R) Threading Analysis Tools - on Intel(R) 64 architecture. -- Dependency from Microsoft* CRT was integrated to the libraries using - manifests, to avoid issues if called from code that uses different - version of Visual C++* runtime than the library. -- Introduced new defines TBB_USE_ASSERT, TBB_USE_DEBUG, - TBB_USE_PERFORMANCE_WARNINGS, TBB_USE_THREADING_TOOLS. -- A number of other improvements to code, tests, makefiles, examples - and documents. - -Open-source contributions integrated: - -- linker optimization: /incremental:no . - ------------------------------------------------------------------------- -20080925 open-source release - -Changes (w.r.t. previous open-source release): - -- Same fix for a memory leak in the memory allocator as in TBB 2.1 U1. -- Improved support for lambda functions. -- Fixed more concurrent_queue issues reported at the forum. -- A number of other improvements to code, tests, makefiles, examples - and documents. - ------------------------------------------------------------------------- -TBB 2.1 Update 1 commercial-aligned release - -Changes (w.r.t. TBB 2.1 commercial-aligned release): - -- Fixed small memory leak in the memory allocator. -- Incorporates all open-source-release changes since TBB 2.1, except for: - - 20080825 changes for parallel_do; - ------------------------------------------------------------------------- -20080825 open-source release - -Changes (w.r.t. previous open-source release): - -- Added exception handling and cancellation support for parallel_do. -- Added default HashCompare template argument for concurrent_hash_map. -- Fixed concurrent_queue.clear() issues due to incorrect assumption - about clear() being private method. -- Added the possibility to use TBB in applications that change - default calling conventions (Windows* only). -- Many improvements to code, tests, examples, makefiles and documents. - -Bugs fixed: - -- 120, 130 - memset declaration missed in concurrent_hash_map.h - ------------------------------------------------------------------------- -20080724 open-source release - -Changes (w.r.t. previous open-source release): - -- Inline assembly for atomic operations improved for gcc 4.3 -- A few more improvements to the code. - ------------------------------------------------------------------------- -20080709 open-source release - -Changes (w.r.t. previous open-source release): - -- operator=() was added to the tbb_thread class according to - the current working draft for std::thread. -- Recognizing SPARC* in makefiles for Linux* and Sun Solaris*. - -Bugs fixed: - -- 127 - concurrent_hash_map::range fixed to split correctly. - -Open-source contributions integrated: - -- fix_set_midpoint.diff by jyasskin -- SPARC* support in makefiles by Raf Schietekat - ------------------------------------------------------------------------- -20080622 open-source release - -Changes (w.r.t. previous open-source release): - -- Fixed a hang that rarely happened on Linux - during deinitialization of the TBB scheduler. -- Improved support for Intel(R) Thread Checker. -- A few more improvements to the code. - ------------------------------------------------------------------------- -TBB 2.1 commercial-aligned release - -Changes (w.r.t. TBB 2.0 U3 commercial-aligned release): - -- All open-source-release changes down to, and including, TBB 2.0 below, - were incorporated into this release. - ------------------------------------------------------------------------- -20080605 open-source release - -Changes (w.r.t. previous open-source release): - -- Explicit control of exported symbols by version scripts added on Linux. -- Interfaces polished for exception handling & algorithm cancellation. -- Cache behavior improvements in the scalable allocator. -- Improvements in text_filter, polygon_overlay, and other examples. -- A lot of other stability improvements in code, tests, and makefiles. -- First release where binary packages include headers/docs/examples, so - binary packages are now self-sufficient for using TBB. - -Open-source contributions integrated: - -- atomics patch (partially). -- tick_count warning patch. - -Bugs fixed: - -- 118 - fix for boost compatibility. -- 123 - fix for tbb_machine.h. - ------------------------------------------------------------------------- -20080512 open-source release - -Changes (w.r.t. previous open-source release): - -- Fixed a problem with backward binary compatibility - of debug Linux builds. -- Sun* Studio* support added. -- soname support added on Linux via linker script. To restore backward - binary compatibility, *.so -> *.so.2 softlinks should be created. -- concurrent_hash_map improvements - added few new forms of insert() - method and fixed precondition and guarantees of erase() methods. - Added runtime warning reporting about bad hash function used for - the container. Various improvements for performance and concurrency. -- Cancellation mechanism reworked so that it does not hurt scalability. -- Algorithm parallel_do reworked. Requirement for Body::argument_type - definition removed, and work item argument type can be arbitrarily - cv-qualified. -- polygon_overlay example added. -- A few more improvements to code, tests, examples and Makefiles. - -Open-source contributions integrated: - -- Soname support patch for Bugzilla #112. - -Bugs fixed: - -- 112 - fix for soname support. - ------------------------------------------------------------------------- -TBB 2.0 U3 commercial-aligned release (package 017, April 20, 2008) - -Corresponds to commercial 019 (for Linux*, 020; for Mac OS* X, 018) -packages. - -Changes (w.r.t. TBB 2.0 U2 commercial-aligned release): - -- Does not contain open-source-release changes below; this release is - only a minor update of TBB 2.0 U2. -- Removed spin-waiting in pipeline and concurrent_queue. -- A few more small bug fixes from open-source releases below. - ------------------------------------------------------------------------- -20080408 open-source release - -Changes (w.r.t. previous open-source release): - -- count_strings example reworked: new word generator implemented, hash - function replaced, and tbb_allocator is used with std::string class. -- Static methods of spin_rw_mutex were replaced by normal member - functions, and the class name was versioned. -- tacheon example was renamed to tachyon. -- Improved support for Intel(R) Thread Checker. -- A few more minor improvements. - -Open-source contributions integrated: - -- Two sets of Sun patches for IA Solaris support. - ------------------------------------------------------------------------- -20080402 open-source release - -Changes (w.r.t. previous open-source release): - -- Exception handling and cancellation support for tasks and algorithms - fully enabled. -- Exception safety guaranties defined and fixed for all concurrent - containers. -- User-defined memory allocator support added to all concurrent - containers. -- Performance improvement of concurrent_hash_map, spin_rw_mutex. -- Critical fix for a rare race condition during scheduler - initialization/de-initialization. -- New methods added for concurrent containers to be closer to STL, - as well as automatic filters removal from pipeline - and __TBB_AtomicAND function. -- The volatile keyword dropped from where it is not really needed. -- A few more minor improvements. - ------------------------------------------------------------------------- -20080319 open-source release - -Changes (w.r.t. previous open-source release): - -- Support for gcc version 4.3 was added. -- tbb_thread class, near compatible with std::thread expected in C++0x, - was added. - -Bugs fixed: - -- 116 - fix for compilation issues with gcc version 4.2.1. -- 120 - fix for compilation issues with gcc version 4.3. - ------------------------------------------------------------------------- -20080311 open-source release - -Changes (w.r.t. previous open-source release): - -- An enumerator added for pipeline filter types (serial vs. parallel). -- New task_scheduler_observer class introduced, to observe when - threads start and finish interacting with the TBB task scheduler. -- task_scheduler_init reverted to not use internal versioned class; - binary compatibility guaranteed with stable releases only. -- Various improvements to code, tests, examples and Makefiles. - ------------------------------------------------------------------------- -20080304 open-source release - -Changes (w.r.t. previous open-source release): - -- Task-to-thread affinity support, previously kept under a macro, - now fully legalized. -- Work-in-progress on cache_aligned_allocator improvements. -- Pipeline really supports parallel input stage; it's no more serialized. -- Various improvements to code, tests, examples and Makefiles. - -Bugs fixed: - -- 119 - fix for scalable_malloc sometimes failing to return a big block. -- TR575 - fixed a deadlock occurring on Windows in startup/shutdown - under some conditions. - ------------------------------------------------------------------------- -20080226 open-source release - -Changes (w.r.t. previous open-source release): - -- Introduced tbb_allocator to select between standard allocator and - tbb::scalable_allocator when available. -- Removed spin-waiting in pipeline and concurrent_queue. -- Improved performance of concurrent_hash_map by using tbb_allocator. -- Improved support for Intel(R) Thread Checker. -- Various improvements to code, tests, examples and Makefiles. - ------------------------------------------------------------------------- -TBB 2.0 U2 commercial-aligned release (package 017, February 14, 2008) - -Corresponds to commercial 017 (for Linux*, 018; for Mac OS* X, 016) -packages. - -Changes (w.r.t. TBB 2.0 U1 commercial-aligned release): - -- Does not contain open-source-release changes below; this release is - only a minor update of TBB 2.0 U1. -- Add support for Microsoft* Visual Studio* 2008, including binary - libraries and VS2008 projects for examples. -- Use SwitchToThread() not Sleep() to yield threads on Windows*. -- Enhancements to Doxygen-readable comments in source code. -- A few more small bug fixes from open-source releases below. - -Bugs fixed: - -- TR569 - Memory leak in concurrent_queue. - ------------------------------------------------------------------------- -20080207 open-source release - -Changes (w.r.t. previous open-source release): - -- Improvements and minor fixes in VS2008 projects for examples. -- Improvements in code for gating worker threads that wait for work, - previously consolidated under #if IMPROVED_GATING, now legalized. -- Cosmetic changes in code, examples, tests. - -Bugs fixed: - -- 113 - Iterators and ranges should be convertible to their const - counterparts. -- TR569 - Memory leak in concurrent_queue. - ------------------------------------------------------------------------- -20080122 open-source release - -Changes (w.r.t. previous open-source release): - -- Updated examples/parallel_for/seismic to improve the visuals and to - use the affinity_partitioner (20071127 and forward) for better - performance. -- Minor improvements to unittests and performance tests. - ------------------------------------------------------------------------- -20080115 open-source release - -Changes (w.r.t. previous open-source release): - -- Cleanup, simplifications and enhancements to the Makefiles for - building the libraries (see build/index.html for high-level - changes) and the examples. -- Use SwitchToThread() not Sleep() to yield threads on Windows*. -- Engineering work-in-progress on exception safety/support. -- Engineering work-in-progress on affinity_partitioner for - parallel_reduce. -- Engineering work-in-progress on improved gating for worker threads - (idle workers now block in the OS instead of spinning). -- Enhancements to Doxygen-readable comments in source code. - -Bugs fixed: - -- 102 - Support for parallel build with gmake -j -- 114 - /Wp64 build warning on Windows*. - ------------------------------------------------------------------------- -20071218 open-source release - -Changes (w.r.t. previous open-source release): - -- Full support for Microsoft* Visual Studio* 2008 in open-source. - Binaries for vc9/ will be available in future stable releases. -- New recursive_mutex class. -- Full support for 32-bit PowerMac including export files for builds. -- Improvements to parallel_do. - ------------------------------------------------------------------------- -20071206 open-source release - -Changes (w.r.t. previous open-source release): - -- Support for Microsoft* Visual Studio* 2008 in building libraries - from source as well as in vc9/ projects for examples. -- Small fixes to the affinity_partitioner first introduced in 20071127. -- Small fixes to the thread-stack size hook first introduced in 20071127. -- Engineering work in progress on concurrent_vector. -- Engineering work in progress on exception behavior. -- Unittest improvements. - ------------------------------------------------------------------------- -20071127 open-source release - -Changes (w.r.t. previous open-source release): - -- Task-to-thread affinity support (affinity partitioner) first appears. -- More work on concurrent_vector. -- New parallel_do algorithm (function-style version of parallel while) - and parallel_do/parallel_preorder example. -- New task_scheduler_init() hooks for getting default_num_threads() and - for setting thread stack size. -- Support for weak memory consistency models in the code base. -- Futex usage in the task scheduler (Linux). -- Started adding 32-bit PowerMac support. -- Intel(R) 9.1 compilers are now the base supported Intel(R) compiler - version. -- TBB libraries added to link line automatically on Microsoft Windows* - systems via #pragma comment linker directives. - -Open-source contributions integrated: - -- FreeBSD platform support patches. -- AIX weak memory model patch. - -Bugs fixed: - -- 108 - Removed broken affinity.h reference. -- 101 - Does not build on Debian Lenny (replaced arch with uname -m). - ------------------------------------------------------------------------- -20071030 open-source release - -Changes (w.r.t. previous open-source release): - -- More work on concurrent_vector. -- Better support for building with -Wall -Werror (or not) as desired. -- A few fixes to eliminate extraneous warnings. -- Begin introduction of versioning hooks so that the internal/API - version is tracked via TBB_INTERFACE_VERSION. The newest binary - libraries should always work with previously-compiled code when- - ever possible. -- Engineering work in progress on using futex inside the mutexes (Linux). -- Engineering work in progress on exception behavior. -- Engineering work in progress on a new parallel_do algorithm. -- Unittest improvements. - ------------------------------------------------------------------------- -20070927 open-source release - -Changes (w.r.t. TBB 2.0 U1 commercial-aligned release): - -- Minor update to TBB 2.0 U1 below. -- Begin introduction of new concurrent_vector interfaces not released - with TBB 2.0 U1. - ------------------------------------------------------------------------- -TBB 2.0 U1 commercial-aligned release (package 014, October 1, 2007) - -Corresponds to commercial 014 (for Linux*, 016) packages. - -Changes (w.r.t. TBB 2.0 commercial-aligned release): - -- All open-source-release changes down to, and including, TBB 2.0 below, - were incorporated into this release. -- Made a number of changes to the officially supported OS list: - Added Linux* OSs: - Asianux* 3, Debian* 4.0, Fedora Core* 6, Fedora* 7, - Turbo Linux* 11, Ubuntu* 7.04; - Dropped Linux* OSs: - Asianux* 2, Fedora Core* 4, Haansoft* Linux 2006 Server, - Mandriva/Mandrake* 10.1, Miracle Linux* 4.0, - Red Flag* DC Server 5.0; - Only Mac OS* X 10.4.9 (and forward) and Xcode* tool suite 2.4.1 (and - forward) are now supported. -- Commercial installers on Linux* fixed to recommend the correct - binaries to use in more cases, with less unnecessary warnings. -- Changes to eliminate spurious build warnings. - -Open-source contributions integrated: - -- Two small header guard macro patches; it also fixed bug #94. -- New blocked_range3d class. - -Bugs fixed: - -- 93 - Removed misleading comments in task.h. -- 94 - See above. - ------------------------------------------------------------------------- -20070815 open-source release - -Changes: - -- Changes to eliminate spurious build warnings. -- Engineering work in progress on concurrent_vector allocator behavior. -- Added hooks to use the Intel(R) compiler code coverage tools. - -Open-source contributions integrated: - -- Mac OS* X build warning patch. - -Bugs fixed: - -- 88 - Fixed TBB compilation errors if both VS2005 and Windows SDK are - installed. - ------------------------------------------------------------------------- -20070719 open-source release - -Changes: - -- Minor update to TBB 2.0 commercial-aligned release below. -- Changes to eliminate spurious build warnings. - ------------------------------------------------------------------------- -TBB 2.0 commercial-aligned release (package 010, July 19, 2007) - -Corresponds to commercial 010 (for Linux*, 012) packages. - -- TBB open-source debut release. - ------------------------------------------------------------------------- -TBB 1.1 commercial release (April 10, 2007) - -Changes (w.r.t. TBB 1.0 commercial release): - -- auto_partitioner which offered an automatic alternative to specifying - a grain size parameter to estimate the best granularity for tasks. -- The release was added to the Intel(R) C++ Compiler 10.0 Pro. - ------------------------------------------------------------------------- -TBB 1.0 Update 2 commercial release - -Changes (w.r.t. TBB 1.0 Update 1 commercial release): - -- Mac OS* X 64-bit support added. -- Source packages for commercial releases introduced. - ------------------------------------------------------------------------- -TBB 1.0 Update 1 commercial-aligned release - -Changes (w.r.t. TBB 1.0 commercial release): - -- Fix for critical package issue on Mac OS* X. - ------------------------------------------------------------------------- -TBB 1.0 commercial release (August 29, 2006) - -Changes (w.r.t. TBB 1.0 beta commercial release): - -- New namespace (and compatibility headers for old namespace). - Namespaces are tbb and tbb::internal and all classes are in the - underscore_style not the WindowsStyle. -- New class: scalable_allocator (and cache_aligned_allocator using that - if it exists). -- Added parallel_for/tacheon example. -- Removed C-style casts from headers for better C++ compliance. -- Bug fixes. -- Documentation improvements. -- Improved performance of the concurrent_hash_map class. -- Upgraded parallel_sort() to support STL-style random-access iterators - instead of just pointers. -- The Windows vs7_1 directories renamed to vs7.1 in examples. -- New class: spin version of reader-writer lock. -- Added push_back() interface to concurrent_vector(). - ------------------------------------------------------------------------- -TBB 1.0 beta commercial release - -Initial release. - -Features / APIs: - -- Concurrent containers: ConcurrentHashTable, ConcurrentVector, - ConcurrentQueue. -- Parallel algorithms: ParallelFor, ParallelReduce, ParallelScan, - ParallelWhile, Pipeline, ParallelSort. -- Support: AlignedSpace, BlockedRange (i.e., 1D), BlockedRange2D -- Task scheduler with multi-master support. -- Atomics: read, write, fetch-and-store, fetch-and-add, compare-and-swap. -- Locks: spin, reader-writer, queuing, OS-wrapper. -- Memory allocation: STL-style memory allocator that avoids false - sharing. -- Timers. - -Tools Support: -- Thread Checker 3.0. -- Thread Profiler 3.0. - -Documentation: -- First Use Documents: README.txt, INSTALL.txt, Release_Notes.txt, - Doc_Index.html, Getting_Started.pdf, Tutorial.pdf, Reference.pdf. -- Class hierarchy HTML pages (Doxygen). -- Tree of index.html pages for navigating the installed package, esp. - for the examples. - -Examples: -- One for each of these TBB features: ConcurrentHashTable, ParallelFor, - ParallelReduce, ParallelWhile, Pipeline, Task. -- Live copies of examples from Getting_Started.pdf. -- TestAll example that exercises every class and header in the package - (i.e., a "liveness test"). -- Compilers: see Release_Notes.txt. -- APIs: OpenMP, WinThreads, Pthreads. - -Packaging: -- Package for Windows installs IA-32 and EM64T bits. -- Package for Linux installs IA-32, EM64T and IPF bits. -- Package for Mac OS* X installs IA-32 bits. -- All packages support Intel(R) software setup assistant (ISSA) and - install-time FLEXlm license checking. -- ISSA support allows license file to be specified directly in case of - no Internet connection or problems with IRC or serial #s. -- Linux installer allows root or non-root, RPM or non-RPM installs. -- FLEXlm license servers (for those who need floating/counted licenses) - are provided separately on Intel(R) Premier. - ------------------------------------------------------------------------- -* Other names and brands may be claimed as the property of others. diff --git a/deal.II/bundled/tbb30_104oss/COPYING b/deal.II/bundled/tbb30_104oss/COPYING deleted file mode 100644 index 5af6ed874d..0000000000 --- a/deal.II/bundled/tbb30_104oss/COPYING +++ /dev/null @@ -1,353 +0,0 @@ - GNU GENERAL PUBLIC LICENSE - Version 2, June 1991 - - Copyright (C) 1989, 1991 Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The licenses for most software are designed to take away your -freedom to share and change it. By contrast, the GNU General Public -License is intended to guarantee your freedom to share and change free -software--to make sure the software is free for all its users. This -General Public License applies to most of the Free Software -Foundation's software and to any other program whose authors commit to -using it. (Some other Free Software Foundation software is covered by -the GNU Lesser General Public License instead.) You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -this service if you wish), that you receive source code or can get it -if you want it, that you can change the software or use pieces of it -in new free programs; and that you know you can do these things. - - To protect your rights, we need to make restrictions that forbid -anyone to deny you these rights or to ask you to surrender the rights. -These restrictions translate to certain responsibilities for you if you -distribute copies of the software, or if you modify it. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must give the recipients all the rights that -you have. You must make sure that they, too, receive or can get the -source code. And you must show them these terms so they know their -rights. - - We protect your rights with two steps: (1) copyright the software, and -(2) offer you this license which gives you legal permission to copy, -distribute and/or modify the software. - - Also, for each author's protection and ours, we want to make certain -that everyone understands that there is no warranty for this free -software. If the software is modified by someone else and passed on, we -want its recipients to know that what they have is not the original, so -that any problems introduced by others will not reflect on the original -authors' reputations. - - Finally, any free program is threatened constantly by software -patents. We wish to avoid the danger that redistributors of a free -program will individually obtain patent licenses, in effect making the -program proprietary. To prevent this, we have made it clear that any -patent must be licensed for everyone's free use or not licensed at all. - - The precise terms and conditions for copying, distribution and -modification follow. - - GNU GENERAL PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. This License applies to any program or other work which contains -a notice placed by the copyright holder saying it may be distributed -under the terms of this General Public License. The "Program", below, -refers to any such program or work, and a "work based on the Program" -means either the Program or any derivative work under copyright law: -that is to say, a work containing the Program or a portion of it, -either verbatim or with modifications and/or translated into another -language. (Hereinafter, translation is included without limitation in -the term "modification".) Each licensee is addressed as "you". - -Activities other than copying, distribution and modification are not -covered by this License; they are outside its scope. The act of -running the Program is not restricted, and the output from the Program -is covered only if its contents constitute a work based on the -Program (independent of having been made by running the Program). -Whether that is true depends on what the Program does. - - 1. You may copy and distribute verbatim copies of the Program's -source code as you receive it, in any medium, provided that you -conspicuously and appropriately publish on each copy an appropriate -copyright notice and disclaimer of warranty; keep intact all the -notices that refer to this License and to the absence of any warranty; -and give any other recipients of the Program a copy of this License -along with the Program. - -You may charge a fee for the physical act of transferring a copy, and -you may at your option offer warranty protection in exchange for a fee. - - 2. You may modify your copy or copies of the Program or any portion -of it, thus forming a work based on the Program, and copy and -distribute such modifications or work under the terms of Section 1 -above, provided that you also meet all of these conditions: - - a) You must cause the modified files to carry prominent notices - stating that you changed the files and the date of any change. - - b) You must cause any work that you distribute or publish, that in - whole or in part contains or is derived from the Program or any - part thereof, to be licensed as a whole at no charge to all third - parties under the terms of this License. - - c) If the modified program normally reads commands interactively - when run, you must cause it, when started running for such - interactive use in the most ordinary way, to print or display an - announcement including an appropriate copyright notice and a - notice that there is no warranty (or else, saying that you provide - a warranty) and that users may redistribute the program under - these conditions, and telling the user how to view a copy of this - License. (Exception: if the Program itself is interactive but - does not normally print such an announcement, your work based on - the Program is not required to print an announcement.) - -These requirements apply to the modified work as a whole. If -identifiable sections of that work are not derived from the Program, -and can be reasonably considered independent and separate works in -themselves, then this License, and its terms, do not apply to those -sections when you distribute them as separate works. But when you -distribute the same sections as part of a whole which is a work based -on the Program, the distribution of the whole must be on the terms of -this License, whose permissions for other licensees extend to the -entire whole, and thus to each and every part regardless of who wrote it. - -Thus, it is not the intent of this section to claim rights or contest -your rights to work written entirely by you; rather, the intent is to -exercise the right to control the distribution of derivative or -collective works based on the Program. - -In addition, mere aggregation of another work not based on the Program -with the Program (or with a work based on the Program) on a volume of -a storage or distribution medium does not bring the other work under -the scope of this License. - - 3. You may copy and distribute the Program (or a work based on it, -under Section 2) in object code or executable form under the terms of -Sections 1 and 2 above provided that you also do one of the following: - - a) Accompany it with the complete corresponding machine-readable - source code, which must be distributed under the terms of Sections - 1 and 2 above on a medium customarily used for software interchange; or, - - b) Accompany it with a written offer, valid for at least three - years, to give any third party, for a charge no more than your - cost of physically performing source distribution, a complete - machine-readable copy of the corresponding source code, to be - distributed under the terms of Sections 1 and 2 above on a medium - customarily used for software interchange; or, - - c) Accompany it with the information you received as to the offer - to distribute corresponding source code. (This alternative is - allowed only for noncommercial distribution and only if you - received the program in object code or executable form with such - an offer, in accord with Subsection b above.) - -The source code for a work means the preferred form of the work for -making modifications to it. For an executable work, complete source -code means all the source code for all modules it contains, plus any -associated interface definition files, plus the scripts used to -control compilation and installation of the executable. However, as a -special exception, the source code distributed need not include -anything that is normally distributed (in either source or binary -form) with the major components (compiler, kernel, and so on) of the -operating system on which the executable runs, unless that component -itself accompanies the executable. - -If distribution of executable or object code is made by offering -access to copy from a designated place, then offering equivalent -access to copy the source code from the same place counts as -distribution of the source code, even though third parties are not -compelled to copy the source along with the object code. - - 4. You may not copy, modify, sublicense, or distribute the Program -except as expressly provided under this License. Any attempt -otherwise to copy, modify, sublicense or distribute the Program is -void, and will automatically terminate your rights under this License. -However, parties who have received copies, or rights, from you under -this License will not have their licenses terminated so long as such -parties remain in full compliance. - - 5. You are not required to accept this License, since you have not -signed it. However, nothing else grants you permission to modify or -distribute the Program or its derivative works. These actions are -prohibited by law if you do not accept this License. Therefore, by -modifying or distributing the Program (or any work based on the -Program), you indicate your acceptance of this License to do so, and -all its terms and conditions for copying, distributing or modifying -the Program or works based on it. - - 6. Each time you redistribute the Program (or any work based on the -Program), the recipient automatically receives a license from the -original licensor to copy, distribute or modify the Program subject to -these terms and conditions. You may not impose any further -restrictions on the recipients' exercise of the rights granted herein. -You are not responsible for enforcing compliance by third parties to -this License. - - 7. If, as a consequence of a court judgment or allegation of patent -infringement or for any other reason (not limited to patent issues), -conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot -distribute so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you -may not distribute the Program at all. For example, if a patent -license would not permit royalty-free redistribution of the Program by -all those who receive copies directly or indirectly through you, then -the only way you could satisfy both it and this License would be to -refrain entirely from distribution of the Program. - -If any portion of this section is held invalid or unenforceable under -any particular circumstance, the balance of the section is intended to -apply and the section as a whole is intended to apply in other -circumstances. - -It is not the purpose of this section to induce you to infringe any -patents or other property right claims or to contest validity of any -such claims; this section has the sole purpose of protecting the -integrity of the free software distribution system, which is -implemented by public license practices. Many people have made -generous contributions to the wide range of software distributed -through that system in reliance on consistent application of that -system; it is up to the author/donor to decide if he or she is willing -to distribute software through any other system and a licensee cannot -impose that choice. - -This section is intended to make thoroughly clear what is believed to -be a consequence of the rest of this License. - - 8. If the distribution and/or use of the Program is restricted in -certain countries either by patents or by copyrighted interfaces, the -original copyright holder who places the Program under this License -may add an explicit geographical distribution limitation excluding -those countries, so that distribution is permitted only in or among -countries not thus excluded. In such case, this License incorporates -the limitation as if written in the body of this License. - - 9. The Free Software Foundation may publish revised and/or new versions -of the General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - -Each version is given a distinguishing version number. If the Program -specifies a version number of this License which applies to it and "any -later version", you have the option of following the terms and conditions -either of that version or of any later version published by the Free -Software Foundation. If the Program does not specify a version number of -this License, you may choose any version ever published by the Free Software -Foundation. - - 10. If you wish to incorporate parts of the Program into other free -programs whose distribution conditions are different, write to the author -to ask for permission. For software which is copyrighted by the Free -Software Foundation, write to the Free Software Foundation; we sometimes -make exceptions for this. Our decision will be guided by the two goals -of preserving the free status of all derivatives of our free software and -of promoting the sharing and reuse of software generally. - - NO WARRANTY - - 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY -FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN -OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES -PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED -OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS -TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE -PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, -REPAIR OR CORRECTION. - - 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR -REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, -INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING -OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED -TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY -YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER -PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE -POSSIBILITY OF SUCH DAMAGES. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -convey the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License along - with this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - -Also add information on how to contact you by electronic and paper mail. - -If the program is interactive, make it output a short notice like this -when it starts in an interactive mode: - - Gnomovision version 69, Copyright (C) year name of author - Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, the commands you use may -be called something other than `show w' and `show c'; they could even be -mouse-clicks or menu items--whatever suits your program. - -You should also get your employer (if you work as a programmer) or your -school, if any, to sign a "copyright disclaimer" for the program, if -necessary. Here is a sample; alter the names: - - Yoyodyne, Inc., hereby disclaims all copyright interest in the program - `Gnomovision' (which makes passes at compilers) written by James Hacker. - - , 1 April 1989 - Ty Coon, President of Vice - -This General Public License does not permit incorporating your program into -proprietary programs. If your program is a subroutine library, you may -consider it more useful to permit linking proprietary applications with the -library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. ----------------- END OF Gnu General Public License ---------------- - -The source code of Threading Building Blocks is distributed under version 2 -of the GNU General Public License, with the so-called "runtime exception," -as follows (or see any header or implementation file): - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. diff --git a/deal.II/bundled/tbb30_104oss/README b/deal.II/bundled/tbb30_104oss/README deleted file mode 100644 index 67ab8ad2e0..0000000000 --- a/deal.II/bundled/tbb30_104oss/README +++ /dev/null @@ -1,11 +0,0 @@ -Threading Building Blocks - README - -See index.html for directions and documentation. - -If source is present (./Makefile and src/ directories), -type 'gmake' in this directory to build and test. - -See examples/index.html for runnable examples and directions. - -See http://threadingbuildingblocks.org for full documentation -and software information. diff --git a/deal.II/bundled/tbb30_104oss/build/AIX.gcc.inc b/deal.II/bundled/tbb30_104oss/build/AIX.gcc.inc deleted file mode 100644 index c248205ac7..0000000000 --- a/deal.II/bundled/tbb30_104oss/build/AIX.gcc.inc +++ /dev/null @@ -1,85 +0,0 @@ -# Copyright 2005-2010 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. -# -# Threading Building Blocks is free software; you can redistribute it -# and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. -# -# Threading Building Blocks is distributed in the hope that it will be -# useful, but WITHOUT ANY WARRANTY; without even the implied warranty -# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Threading Building Blocks; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software -# library without restriction. Specifically, if other files instantiate -# templates or use macros or inline functions from this file, or you compile -# this file and link it with other files to produce an executable, this -# file does not by itself cause the resulting executable to be covered by -# the GNU General Public License. This exception does not however -# invalidate any other reasons why the executable file might be covered by -# the GNU General Public License. - -COMPILE_ONLY = -c -MMD -PREPROC_ONLY = -E -x c -INCLUDE_KEY = -I -DEFINE_KEY = -D -OUTPUT_KEY = -o # -OUTPUTOBJ_KEY = -o # -PIC_KEY = -fPIC -WARNING_AS_ERROR_KEY = -Werror -WARNING_KEY = -Wall -DYLIB_KEY = -shared -LIBDL = -ldl - -TBB_NOSTRICT = 1 - -CPLUS = g++ -CONLY = gcc -LIB_LINK_FLAGS = -shared -LIBS = -lpthread -ldl -C_FLAGS = $(CPLUS_FLAGS) -x c - -ifeq ($(cfg), release) - CPLUS_FLAGS = -O2 -DUSE_PTHREAD -pthread -endif -ifeq ($(cfg), debug) - CPLUS_FLAGS = -DTBB_USE_DEBUG -g -O0 -DUSE_PTHREAD -pthread -endif - -ASM= -ASM_FLAGS= - -TBB_ASM.OBJ= - -ifeq (powerpc,$(arch)) - CPLUS_FLAGS += -maix64 -Wl,-G - LIB_LINK_FLAGS += -maix64 -Wl,-b64 -Wl,-brtl -Wl,-G -endif - -#------------------------------------------------------------------------------ -# Setting assembler data. -#------------------------------------------------------------------------------ - -ASSEMBLY_SOURCE=ibm_aix51 -ifeq (powerpc,$(arch)) - TBB_ASM.OBJ = atomic_support.o -endif - -#------------------------------------------------------------------------------ -# End of setting assembler data. -#------------------------------------------------------------------------------ - -#------------------------------------------------------------------------------ -# Setting tbbmalloc data. -#------------------------------------------------------------------------------ - -M_CPLUS_FLAGS = $(CPLUS_FLAGS) -fno-rtti -fno-exceptions -fno-schedule-insns2 - -#------------------------------------------------------------------------------ -# End of setting tbbmalloc data. -#------------------------------------------------------------------------------ diff --git a/deal.II/bundled/tbb30_104oss/build/AIX.inc b/deal.II/bundled/tbb30_104oss/build/AIX.inc deleted file mode 100644 index 0d1c561baa..0000000000 --- a/deal.II/bundled/tbb30_104oss/build/AIX.inc +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright 2005-2010 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. -# -# Threading Building Blocks is free software; you can redistribute it -# and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. -# -# Threading Building Blocks is distributed in the hope that it will be -# useful, but WITHOUT ANY WARRANTY; without even the implied warranty -# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Threading Building Blocks; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software -# library without restriction. Specifically, if other files instantiate -# templates or use macros or inline functions from this file, or you compile -# this file and link it with other files to produce an executable, this -# file does not by itself cause the resulting executable to be covered by -# the GNU General Public License. This exception does not however -# invalidate any other reasons why the executable file might be covered by -# the GNU General Public License. - -ifndef arch - arch:=$(shell uname -p) - export arch -endif - -ifndef runtime - gcc_version:=$(shell gcc -v 2>&1 | grep 'gcc version' | sed -e 's/^gcc version //' | sed -e 's/ .*$$//') - os_version:=$(shell uname -r) - os_kernel_version:=$(shell uname -r | sed -e 's/-.*$$//') - export runtime:=cc$(gcc_version)_kernel$(os_kernel_version) -endif - -native_compiler := gcc -export compiler ?= gcc -debugger ?= gdb - -CMD=$(SHELL) -c -CWD=$(shell pwd) -RM?=rm -f -RD?=rmdir -MD?=mkdir -p -NUL= /dev/null -SLASH=/ -MAKE_VERSIONS=sh $(tbb_root)/build/version_info_aix.sh $(CPLUS) $(CPLUS_FLAGS) $(INCLUDES) >version_string.tmp -MAKE_TBBVARS=sh $(tbb_root)/build/generate_tbbvars.sh - -ifdef LIBPATH - export LIBPATH := .:$(LIBPATH) -else - export LIBPATH := . -endif - -####### Build settings ######################################################## - -OBJ = o -DLL = so - -TBB.DEF = -TBB.DLL = libtbb$(DEBUG_SUFFIX).$(DLL) -TBB.LIB = $(TBB.DLL) -LINK_TBB.LIB = $(TBB.LIB) - -MALLOC.DLL = libtbbmalloc$(DEBUG_SUFFIX).$(DLL) -MALLOC.LIB = $(MALLOC.DLL) - -TBB_NOSTRICT=1 - -TEST_LAUNCHER=sh $(tbb_root)/build/test_launcher.sh diff --git a/deal.II/bundled/tbb30_104oss/build/FreeBSD.gcc.inc b/deal.II/bundled/tbb30_104oss/build/FreeBSD.gcc.inc deleted file mode 100644 index 7c65a71f0b..0000000000 --- a/deal.II/bundled/tbb30_104oss/build/FreeBSD.gcc.inc +++ /dev/null @@ -1,93 +0,0 @@ -# Copyright 2005-2010 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. -# -# Threading Building Blocks is free software; you can redistribute it -# and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. -# -# Threading Building Blocks is distributed in the hope that it will be -# useful, but WITHOUT ANY WARRANTY; without even the implied warranty -# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Threading Building Blocks; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software -# library without restriction. Specifically, if other files instantiate -# templates or use macros or inline functions from this file, or you compile -# this file and link it with other files to produce an executable, this -# file does not by itself cause the resulting executable to be covered by -# the GNU General Public License. This exception does not however -# invalidate any other reasons why the executable file might be covered by -# the GNU General Public License. - -COMPILE_ONLY = -c -MMD -PREPROC_ONLY = -E -x c -INCLUDE_KEY = -I -DEFINE_KEY = -D -OUTPUT_KEY = -o # -OUTPUTOBJ_KEY = -o # -PIC_KEY = -fPIC -WARNING_AS_ERROR_KEY = -Werror -WARNING_KEY = -Wall -DYLIB_KEY = -shared - -TBB_NOSTRICT = 1 - -CPLUS = g++ -CONLY = gcc -LIB_LINK_FLAGS = -shared -LIBS = -lpthread -C_FLAGS = $(CPLUS_FLAGS) - -ifeq ($(cfg), release) - CPLUS_FLAGS = -O2 -DUSE_PTHREAD -endif -ifeq ($(cfg), debug) - CPLUS_FLAGS = -DTBB_USE_DEBUG -g -O0 -DUSE_PTHREAD -endif - -ASM= -ASM_FLAGS= - -TBB_ASM.OBJ= - -ifeq (ia64,$(arch)) -# Position-independent code (PIC) is a must on IA-64, even for regular (not shared) executables - CPLUS_FLAGS += $(PIC_KEY) -endif - -ifeq (intel64,$(arch)) - CPLUS_FLAGS += -m64 - LIB_LINK_FLAGS += -m64 -endif - -ifeq (ia32,$(arch)) - CPLUS_FLAGS += -m32 - LIB_LINK_FLAGS += -m32 -endif - -#------------------------------------------------------------------------------ -# Setting assembler data. -#------------------------------------------------------------------------------ -ASSEMBLY_SOURCE=$(arch)-gas -ifeq (ia64,$(arch)) - ASM=as - TBB_ASM.OBJ = atomic_support.o lock_byte.o log2.o pause.o -endif -#------------------------------------------------------------------------------ -# End of setting assembler data. -#------------------------------------------------------------------------------ - -#------------------------------------------------------------------------------ -# Setting tbbmalloc data. -#------------------------------------------------------------------------------ - -M_CPLUS_FLAGS = $(CPLUS_FLAGS) -fno-rtti -fno-exceptions -fno-schedule-insns2 - -#------------------------------------------------------------------------------ -# End of setting tbbmalloc data. -#------------------------------------------------------------------------------ diff --git a/deal.II/bundled/tbb30_104oss/build/FreeBSD.inc b/deal.II/bundled/tbb30_104oss/build/FreeBSD.inc deleted file mode 100644 index 8f2d8c89cb..0000000000 --- a/deal.II/bundled/tbb30_104oss/build/FreeBSD.inc +++ /dev/null @@ -1,82 +0,0 @@ -# Copyright 2005-2010 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. -# -# Threading Building Blocks is free software; you can redistribute it -# and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. -# -# Threading Building Blocks is distributed in the hope that it will be -# useful, but WITHOUT ANY WARRANTY; without even the implied warranty -# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Threading Building Blocks; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software -# library without restriction. Specifically, if other files instantiate -# templates or use macros or inline functions from this file, or you compile -# this file and link it with other files to produce an executable, this -# file does not by itself cause the resulting executable to be covered by -# the GNU General Public License. This exception does not however -# invalidate any other reasons why the executable file might be covered by -# the GNU General Public License. - -ifndef arch - ifeq ($(shell uname -m),i386) - export arch:=ia32 - endif - ifeq ($(shell uname -m),ia64) - export arch:=ia64 - endif - ifeq ($(shell uname -m),amd64) - export arch:=intel64 - endif -endif - -ifndef runtime - gcc_version:=$(shell gcc -v 2>&1 | grep 'gcc version' | sed -e 's/^gcc version //' | sed -e 's/ .*$$//') - os_version:=$(shell uname -r) - os_kernel_version:=$(shell uname -r | sed -e 's/-.*$$//') - export runtime:=cc$(gcc_version)_kernel$(os_kernel_version) -endif - -native_compiler := gcc -export compiler ?= gcc -debugger ?= gdb - -CMD=$(SHELL) -c -CWD=$(shell pwd) -RM?=rm -f -RD?=rmdir -MD?=mkdir -p -NUL= /dev/null -SLASH=/ -MAKE_VERSIONS=sh $(tbb_root)/build/version_info_linux.sh $(CPLUS) $(CPLUS_FLAGS) $(INCLUDES) >version_string.tmp -MAKE_TBBVARS=sh $(tbb_root)/build/generate_tbbvars.sh - -ifdef LD_LIBRARY_PATH - export LD_LIBRARY_PATH := .:$(LD_LIBRARY_PATH) -else - export LD_LIBRARY_PATH := . -endif - -####### Build settings ######################################################## - -OBJ = o -DLL = so -LIBEXT=so - -TBB.DEF = -TBB.DLL = libtbb$(DEBUG_SUFFIX).$(DLL) -TBB.LIB = $(TBB.DLL) -LINK_TBB.LIB = $(TBB.LIB) - -MALLOC.DLL = libtbbmalloc$(DEBUG_SUFFIX).$(DLL) -MALLOC.LIB = $(MALLOC.DLL) - -TBB_NOSTRICT=1 - -TEST_LAUNCHER=sh $(tbb_root)/build/test_launcher.sh diff --git a/deal.II/bundled/tbb30_104oss/build/Makefile.rml b/deal.II/bundled/tbb30_104oss/build/Makefile.rml deleted file mode 100644 index aa211e98b0..0000000000 --- a/deal.II/bundled/tbb30_104oss/build/Makefile.rml +++ /dev/null @@ -1,156 +0,0 @@ -# Copyright 2005-2010 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. -# -# Threading Building Blocks is free software; you can redistribute it -# and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. -# -# Threading Building Blocks is distributed in the hope that it will be -# useful, but WITHOUT ANY WARRANTY; without even the implied warranty -# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Threading Building Blocks; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software -# library without restriction. Specifically, if other files instantiate -# templates or use macros or inline functions from this file, or you compile -# this file and link it with other files to produce an executable, this -# file does not by itself cause the resulting executable to be covered by -# the GNU General Public License. This exception does not however -# invalidate any other reasons why the executable file might be covered by -# the GNU General Public License. - -tbb_root ?= $(TBB30_INSTALL_DIR) -BUILDING_PHASE=1 -TEST_RESOURCE = $(RML.RES) -include $(tbb_root)/build/common.inc -DEBUG_SUFFIX=$(findstring _debug,_$(cfg)) - -# default target -default_rml: rml rml_test - -RML_ROOT ?= $(tbb_root)/src/rml -RML_SERVER_ROOT = $(RML_ROOT)/server - -VPATH = $(tbb_root)/src/tbb $(tbb_root)/src/tbb/$(ASSEMBLY_SOURCE) -VPATH += $(RML_ROOT)/server $(RML_ROOT)/client $(RML_ROOT)/test $(tbb_root)/src/test - -include $(tbb_root)/build/common_rules.inc - -#-------------------------------------------------------------------------- -# Define rules for making the RML server shared library and client objects. -#-------------------------------------------------------------------------- - -# Object files that make up RML server -RML_SERVER.OBJ = rml_server.$(OBJ) - -# Object files that RML clients need -RML_TBB_CLIENT.OBJ = rml_tbb.$(OBJ) dynamic_link.$(OBJ) -RML_OMP_CLIENT.OBJ = rml_omp.$(OBJ) omp_dynamic_link.$(OBJ) - -RML.OBJ = $(RML_SERVER.OBJ) $(RML_TBB_CLIENT.OBJ) $(RML_OMP_CLIENT.OBJ) -ifeq (windows,$(tbb_os)) -RML_ASM.OBJ = $(if $(findstring intel64,$(arch)),$(TBB_ASM.OBJ)) -endif -ifeq (linux,$(tbb_os)) -RML_ASM.OBJ = $(if $(findstring ia64,$(arch)),$(TBB_ASM.OBJ)) -endif - -RML_TBB_DEP= cache_aligned_allocator_rml.$(OBJ) dynamic_link_rml.$(OBJ) concurrent_vector_rml.$(OBJ) tbb_misc_rml.$(OBJ) -TBB_DEP_NON_RML_TEST= cache_aligned_allocator_rml.$(OBJ) dynamic_link_rml.$(OBJ) $(RML_ASM.OBJ) tbb_misc_rml.$(OBJ) -TBB_DEP_RML_TEST= $(RML_ASM.OBJ) -ifeq ($(cfg),debug) -RML_TBB_DEP+= spin_mutex_rml.$(OBJ) -TBB_DEP_RML_TEST+= tbb_misc_rml.$(OBJ) -endif -LIBS += $(LIBDL) - -INCLUDES += $(INCLUDE_KEY)$(RML_ROOT)/include $(INCLUDE_KEY). -T_INCLUDES = $(INCLUDES) $(INCLUDE_KEY)$(tbb_root)/src/test $(INCLUDE_KEY)$(RML_SERVER_ROOT) - -# Suppress superfluous warnings for RML compilation -R_CPLUS_FLAGS = $(subst DO_ITT_NOTIFY,DO_ITT_NOTIFY=0,$(CPLUS_FLAGS)) $(WARNING_SUPPRESS) \ - $(DEFINE_KEY)TBB_USE_THREADING_TOOLS=0 $(DEFINE_KEY)__TBB_RML_STATIC=1 $(DEFINE_KEY)__TBB_NO_IMPLICIT_LINKAGE=1 - -%.$(OBJ): %.cpp - $(CPLUS) $(COMPILE_ONLY) $(R_CPLUS_FLAGS) $(PIC_KEY) $(INCLUDES) $< - -tbb_misc_rml.$(OBJ): version_string.tmp - -RML_TEST.OBJ = test_job_automaton.$(OBJ) test_thread_monitor.$(OBJ) test_rml_tbb.$(OBJ) test_rml_omp.$(OBJ) test_rml_mixed.$(OBJ) - -$(RML_TBB_DEP): %_rml.$(OBJ): %.cpp - $(CPLUS) $(COMPILE_ONLY) $(OUTPUTOBJ_KEY)$@ $(R_CPLUS_FLAGS) $(PIC_KEY) $(INCLUDES) $< - -$(RML_TEST.OBJ): %.$(OBJ): %.cpp - $(CPLUS) $(COMPILE_ONLY) $(R_CPLUS_FLAGS) $(PIC_KEY) $(T_INCLUDES) $< - -ifneq (,$(RML.DEF)) -rml.def: $(RML.DEF) - $(CMD) "$(CPLUS) $(PREPROC_ONLY) $(RML.DEF) $(filter $(DEFINE_KEY)%,$(CPLUS_FLAGS)) >rml.def 2>$(NUL) || exit 0" - -LIB_LINK_FLAGS += $(EXPORT_KEY)rml.def -$(RML.DLL): rml.def -endif - -$(RML.DLL): BUILDING_LIBRARY = $(RML.DLL) -$(RML.DLL): $(RML_TBB_DEP) $(RML_SERVER.OBJ) $(RML.RES) $(RML_NO_VERSION.DLL) $(RML_ASM.OBJ) - $(LIB_LINK_CMD) $(LIB_OUTPUT_KEY)$(RML.DLL) $(RML_SERVER.OBJ) $(RML_TBB_DEP) $(RML_ASM.OBJ) $(RML.RES) $(LIB_LINK_LIBS) $(LIB_LINK_FLAGS) - -ifneq (,$(RML_NO_VERSION.DLL)) -$(RML_NO_VERSION.DLL): - echo "INPUT ($(RML.DLL))" > $(RML_NO_VERSION.DLL) -endif - -rml: $(RML.DLL) $(RML_TBB_CLIENT.OBJ) $(RML_OMP_CLIENT.OBJ) - -#------------------------------------------------------ -# End of rules for making the RML server shared library -#------------------------------------------------------ - -#------------------------------------------------------ -# Define rules for making the RML unit tests -#------------------------------------------------------ - -add_debug=$(basename $(1))_debug$(suffix $(1)) -cross_suffix=$(if $(crosstest),$(if $(DEBUG_SUFFIX),$(subst _debug,,$(1)),$(call add_debug,$(1))),$(1)) - -RML_TESTS = test_job_automaton.$(TEST_EXT) test_thread_monitor.$(TEST_EXT) test_rml_tbb.$(TEST_EXT) test_rml_omp.$(TEST_EXT) test_rml_mixed.$(TEST_EXT) test_rml_omp_c_linkage.$(TEST_EXT) - -test_rml_tbb.$(TEST_EXT): test_rml_tbb.$(OBJ) $(RML_TBB_CLIENT.OBJ) $(TBB_DEP_RML_TEST) - $(CPLUS) $(OUTPUT_KEY)$@ $(CPLUS_FLAGS) test_rml_tbb.$(OBJ) $(RML_TBB_CLIENT.OBJ) $(TBB_DEP_RML_TEST) $(LIBS) $(LINK_FLAGS) - -test_rml_omp.$(TEST_EXT): test_rml_omp.$(OBJ) $(RML_OMP_CLIENT.OBJ) $(TBB_DEP_NON_RML_TEST) - $(CPLUS) $(OUTPUT_KEY)$@ $(CPLUS_FLAGS) test_rml_omp.$(OBJ) $(RML_OMP_CLIENT.OBJ) $(TBB_DEP_NON_RML_TEST) $(LIBS) $(LINK_FLAGS) - -test_rml_mixed.$(TEST_EXT): test_rml_mixed.$(OBJ) $(RML_TBB_CLIENT.OBJ) $(RML_OMP_CLIENT.OBJ) $(TBB_DEP_RML_TEST) - $(CPLUS) $(OUTPUT_KEY)$@ $(CPLUS_FLAGS) test_rml_mixed.$(OBJ) $(RML_TBB_CLIENT.OBJ) $(RML_OMP_CLIENT.OBJ) $(TBB_DEP_RML_TEST) $(LIBS) $(LINK_FLAGS) - -rml_omp_stub.$(OBJ): rml_omp_stub.cpp - $(CPLUS) $(COMPILE_ONLY) $(M_CPLUS_FLAGS) $(WARNING_SUPPRESS) $(T_INCLUDES) $(PIC_KEY) $< - -test_rml_omp_c_linkage.$(TEST_EXT): test_rml_omp_c_linkage.$(OBJ) rml_omp_stub.$(OBJ) - $(CONLY) $(C_FLAGS) $(OUTPUT_KEY)$@ test_rml_omp_c_linkage.$(OBJ) rml_omp_stub.$(OBJ) $(LIBS) $(LINK_FLAGS) - -test_%.$(TEST_EXT): test_%.$(OBJ) $(TBB_DEP_NON_RML_TEST) - $(CPLUS) $(OUTPUT_KEY)$@ $(CPLUS_FLAGS) $< $(TBB_DEP_NON_RML_TEST) $(LIBS) $(LINK_FLAGS) - -### run_cmd is usually empty -rml_test: $(call cross_suffix,$(RML.DLL)) $(TEST_PREREQUISITE) $(RML_TESTS) - $(run_cmd) ./test_job_automaton.$(TEST_EXT) $(args) - $(run_cmd) ./test_thread_monitor.$(TEST_EXT) $(args) - $(run_cmd) ./test_rml_tbb.$(TEST_EXT) $(args) - $(run_cmd) ./test_rml_omp.$(TEST_EXT) $(args) - $(run_cmd) ./test_rml_mixed.$(TEST_EXT) $(args) - $(run_cmd) ./test_rml_omp_c_linkage.$(TEST_EXT) $(args) - -#------------------------------------------------------ -# End of rules for making the TBBMalloc unit tests -#------------------------------------------------------ - -# Include automatically generated dependences --include *.d diff --git a/deal.II/bundled/tbb30_104oss/build/Makefile.tbb b/deal.II/bundled/tbb30_104oss/build/Makefile.tbb deleted file mode 100644 index 942731a989..0000000000 --- a/deal.II/bundled/tbb30_104oss/build/Makefile.tbb +++ /dev/null @@ -1,131 +0,0 @@ -# Copyright 2005-2010 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. -# -# Threading Building Blocks is free software; you can redistribute it -# and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. -# -# Threading Building Blocks is distributed in the hope that it will be -# useful, but WITHOUT ANY WARRANTY; without even the implied warranty -# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Threading Building Blocks; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software -# library without restriction. Specifically, if other files instantiate -# templates or use macros or inline functions from this file, or you compile -# this file and link it with other files to produce an executable, this -# file does not by itself cause the resulting executable to be covered by -# the GNU General Public License. This exception does not however -# invalidate any other reasons why the executable file might be covered by -# the GNU General Public License. - -#------------------------------------------------------------------------------ -# Define rules for making the TBB shared library. -#------------------------------------------------------------------------------ - -tbb_root ?= "$(TBB30_INSTALL_DIR)" -BUILDING_PHASE=1 -include $(tbb_root)/build/common.inc -DEBUG_SUFFIX=$(findstring _debug,_$(cfg)) - -#------------------------------------------------------------ -# Define static pattern rules dealing with .cpp source files -#------------------------------------------------------------ -# $(warning CONFIG: cfg=$(cfg) arch=$(arch) compiler=$(compiler) os=$(tbb_os) runtime=$(runtime)) - -default_tbb: $(TBB.DLL) -.PHONY: default_tbb tbbvars clean -.PRECIOUS: %.$(OBJ) - -VPATH = $(tbb_root)/src/tbb/$(ASSEMBLY_SOURCE) $(tbb_root)/src/tbb $(tbb_root)/src/old $(tbb_root)/src/rml/client - -CPLUS_FLAGS += $(PIC_KEY) $(DEFINE_KEY)__TBB_BUILD=1 - -# A template to switch off strict-ansi for certain compilation units -# ifeq (1,$(TBB_NOSTRICT)) -# KNOWN_NOSTRICT = -# endif - -# Object files (that were compiled from C++ code) that gmake up TBB -TBB_CPLUS.OBJ = concurrent_hash_map.$(OBJ) \ - concurrent_queue.$(OBJ) \ - concurrent_vector.$(OBJ) \ - dynamic_link.$(OBJ) \ - itt_notify.$(OBJ) \ - cache_aligned_allocator.$(OBJ) \ - pipeline.$(OBJ) \ - queuing_mutex.$(OBJ) \ - queuing_rw_mutex.$(OBJ) \ - reader_writer_lock.$(OBJ) \ - spin_rw_mutex.$(OBJ) \ - spin_mutex.$(OBJ) \ - critical_section.$(OBJ) \ - task.$(OBJ) \ - tbb_misc.$(OBJ) \ - mutex.$(OBJ) \ - recursive_mutex.$(OBJ) \ - condition_variable.$(OBJ) \ - tbb_thread.$(OBJ) \ - concurrent_monitor.$(OBJ) \ - private_server.$(OBJ) \ - rml_tbb.$(OBJ) \ - task_group_context.$(OBJ) \ - governor.$(OBJ) \ - market.$(OBJ) \ - arena.$(OBJ) \ - scheduler.$(OBJ) \ - observer_proxy.$(OBJ) \ - tbb_statistics.$(OBJ) \ - tbb_main.$(OBJ) - -# OLD/Legacy object files for backward binary compatibility -ifeq (,$(findstring $(DEFINE_KEY)TBB_NO_LEGACY,$(CPLUS_FLAGS))) -TBB_CPLUS_OLD.OBJ = \ - concurrent_vector_v2.$(OBJ) \ - concurrent_queue_v2.$(OBJ) \ - spin_rw_mutex_v2.$(OBJ) \ - task_v2.$(OBJ) -endif - -# Object files that gmake up TBB (TBB_ASM.OBJ is platform-specific) -TBB.OBJ = $(TBB_CPLUS.OBJ) $(TBB_CPLUS_OLD.OBJ) $(TBB_ASM.OBJ) - -# Suppress superfluous warnings for TBB compilation -WARNING_KEY += $(WARNING_SUPPRESS) - -include $(tbb_root)/build/common_rules.inc - -ifneq (,$(TBB.DEF)) -tbb.def: $(TBB.DEF) - @echo "=====tbb=========================MT== $(cfg)/$@" - @$(CMD) "$(CPLUS) $(PREPROC_ONLY) $(TBB.DEF) $(INCLUDES) $(CPLUS_FLAGS) >tbb.def 2>$(NUL) || exit 0" - -LIB_LINK_FLAGS += $(EXPORT_KEY)tbb.def -$(TBB.DLL): tbb.def -endif - -tbbvars.sh: - @echo "=====tbb=========================MT== $(cfg)/$@" - @$(MAKE_TBBVARS) - -$(TBB.DLL): BUILDING_LIBRARY = $(TBB.DLL) -$(TBB.DLL): $(TBB.OBJ) $(TBB.RES) tbbvars.sh $(TBB_NO_VERSION.DLL) - @echo "=====tbb=========================MT== $(cfg)/$@" - @$(LIB_LINK_CMD) $(LIB_OUTPUT_KEY)$(TBB.DLL) $(TBB.OBJ) $(TBB.RES) $(LIB_LINK_LIBS) $(LIB_LINK_FLAGS) - -ifneq (,$(TBB_NO_VERSION.DLL)) -$(TBB_NO_VERSION.DLL): - @echo "=====tbb=========================MT== $(cfg)/$@" - @echo "INPUT ($(TBB.DLL))" > $(TBB_NO_VERSION.DLL) -endif - -#clean: -# $(RM) *.$(OBJ) *.$(DLL) *.res *.map *.ilk *.pdb *.exp *.manifest *.tmp *.d core core.*[0-9][0-9] - -# Include automatically generated dependences --include *.d diff --git a/deal.II/bundled/tbb30_104oss/build/Makefile.tbbmalloc b/deal.II/bundled/tbb30_104oss/build/Makefile.tbbmalloc deleted file mode 100644 index 19b85f5a6b..0000000000 --- a/deal.II/bundled/tbb30_104oss/build/Makefile.tbbmalloc +++ /dev/null @@ -1,200 +0,0 @@ -# Copyright 2005-2010 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. -# -# Threading Building Blocks is free software; you can redistribute it -# and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. -# -# Threading Building Blocks is distributed in the hope that it will be -# useful, but WITHOUT ANY WARRANTY; without even the implied warranty -# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Threading Building Blocks; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software -# library without restriction. Specifically, if other files instantiate -# templates or use macros or inline functions from this file, or you compile -# this file and link it with other files to produce an executable, this -# file does not by itself cause the resulting executable to be covered by -# the GNU General Public License. This exception does not however -# invalidate any other reasons why the executable file might be covered by -# the GNU General Public License. - -# default target -default_malloc: malloc malloc_test - -tbb_root ?= $(TBB30_INSTALL_DIR) -BUILDING_PHASE=1 -TEST_RESOURCE = $(MALLOC.RES) -include $(tbb_root)/build/common.inc -DEBUG_SUFFIX=$(findstring _debug,_$(cfg)) - -MALLOC_ROOT ?= $(tbb_root)/src/tbbmalloc -MALLOC_SOURCE_ROOT ?= $(MALLOC_ROOT) - -VPATH = $(tbb_root)/src/tbb/$(ASSEMBLY_SOURCE) $(tbb_root)/src/tbb $(tbb_root)/src/test -VPATH += $(MALLOC_ROOT) $(MALLOC_SOURCE_ROOT) - -CPLUS_FLAGS += $(if $(crosstest),$(DEFINE_KEY)__TBBMALLOC_NO_IMPLICIT_LINKAGE=1) - -include $(tbb_root)/build/common_rules.inc - -#------------------------------------------------------ -# Define rules for making the TBBMalloc shared library. -#------------------------------------------------------ - -# Object files that make up TBBMalloc -MALLOC_CPLUS.OBJ = tbbmalloc.$(OBJ) dynamic_link.$(OBJ) -MALLOC_CUSTOM.OBJ += tbb_misc_malloc.$(OBJ) -MALLOC_ASM.OBJ = $(TBB_ASM.OBJ) - -# MALLOC_CPLUS.OBJ is built in two steps due to Intel Compiler Tracker # C69574 -MALLOC_CPLUS.OBJ += frontend.$(OBJ) backend.$(OBJ) large_objects.$(OBJ) backref.$(OBJ) -MALLOC.OBJ := $(MALLOC_CPLUS.OBJ) $(MALLOC_ASM.OBJ) $(MALLOC_CUSTOM.OBJ) itt_notify.$(OBJ) -PROXY.OBJ := proxy.$(OBJ) tbb_function_replacement.$(OBJ) -M_CPLUS_FLAGS := $(subst $(WARNING_KEY),,$(M_CPLUS_FLAGS)) $(DEFINE_KEY)__TBB_BUILD=1 -M_INCLUDES = $(INCLUDES) $(INCLUDE_KEY)$(MALLOC_ROOT) $(INCLUDE_KEY)$(MALLOC_SOURCE_ROOT) - -# Suppress superfluous warnings for TBBmalloc compilation -$(MALLOC.OBJ): M_CPLUS_FLAGS += $(WARNING_SUPPRESS) - -itt_notify.$(OBJ): CPLUS_FLAGS += $(PIC_KEY) - -$(PROXY.OBJ): %.$(OBJ): %.cpp - $(CPLUS) $(COMPILE_ONLY) $(CPLUS_FLAGS) $(PIC_KEY) $(M_INCLUDES) $< - -$(MALLOC_CPLUS.OBJ): %.$(OBJ): %.cpp - $(CPLUS) $(COMPILE_ONLY) $(M_CPLUS_FLAGS) $(PIC_KEY) $(M_INCLUDES) $< - -tbb_misc_malloc.$(OBJ): tbb_misc.cpp version_string.tmp - $(CPLUS) $(COMPILE_ONLY) $(M_CPLUS_FLAGS) $(PIC_KEY) $(OUTPUTOBJ_KEY)$@ $(INCLUDE_KEY). $(INCLUDES) $< - -MALLOC_LINK_FLAGS = $(LIB_LINK_FLAGS) -PROXY_LINK_FLAGS = $(LIB_LINK_FLAGS) - -ifneq (,$(MALLOC.DEF)) -tbbmalloc.def: $(MALLOC.DEF) - $(CMD) "$(CPLUS) $(PREPROC_ONLY) $(MALLOC.DEF) $(CPLUS_FLAGS) >tbbmalloc.def 2>$(NUL) || exit 0" - -MALLOC_LINK_FLAGS += $(EXPORT_KEY)tbbmalloc.def -$(MALLOC.DLL): tbbmalloc.def -endif - -$(MALLOC.DLL): BUILDING_LIBRARY = $(MALLOC.DLL) -$(MALLOC.DLL): $(MALLOC.OBJ) $(MALLOC.RES) $(MALLOC_NO_VERSION.DLL) - $(LIB_LINK_CMD) $(LIB_OUTPUT_KEY)$(MALLOC.DLL) $(MALLOC.OBJ) $(MALLOC.RES) $(LIB_LINK_LIBS) $(MALLOC_LINK_FLAGS) - -ifneq (,$(MALLOCPROXY.DEF)) -tbbmallocproxy.def: $(MALLOCPROXY.DEF) - $(CMD) "$(CPLUS) $(PREPROC_ONLY) $(MALLOCPROXY.DEF) $(CPLUS_FLAGS) >tbbmallocproxy.def 2>$(NUL) || exit 0" - -PROXY_LINK_FLAGS += $(EXPORT_KEY)tbbmallocproxy.def -$(MALLOCPROXY.DLL): tbbmallocproxy.def -endif - -ifneq (,$(MALLOCPROXY.DLL)) -$(MALLOCPROXY.DLL): BUILDING_LIBRARY = $(MALLOCPROXY.DLL) -$(MALLOCPROXY.DLL): $(PROXY.OBJ) $(MALLOCPROXY_NO_VERSION.DLL) $(MALLOC.DLL) $(MALLOC.RES) - $(LIB_LINK_CMD) $(LIB_OUTPUT_KEY)$(MALLOCPROXY.DLL) $(PROXY.OBJ) $(MALLOC.RES) $(LIB_LINK_LIBS) $(LINK_MALLOC.LIB) $(PROXY_LINK_FLAGS) - -malloc: $(MALLOCPROXY.DLL) -endif - -ifneq (,$(MALLOC_NO_VERSION.DLL)) -$(MALLOC_NO_VERSION.DLL): - echo "INPUT ($(MALLOC.DLL))" > $(MALLOC_NO_VERSION.DLL) -endif - -ifneq (,$(MALLOCPROXY_NO_VERSION.DLL)) -$(MALLOCPROXY_NO_VERSION.DLL): - echo "INPUT ($(MALLOCPROXY.DLL))" > $(MALLOCPROXY_NO_VERSION.DLL) -endif - -malloc: $(MALLOC.DLL) $(MALLOCPROXY.DLL) - -malloc_dll: $(MALLOC.DLL) - -malloc_proxy_dll: $(MALLOCPROXY.DLL) - -.PHONY: malloc malloc_dll malloc_proxy_dll - -#------------------------------------------------------ -# End of rules for making the TBBMalloc shared library -#------------------------------------------------------ - -#------------------------------------------------------ -# Define rules for making the TBBMalloc unit tests -#------------------------------------------------------ - -add_debug=$(basename $(1))_debug$(suffix $(1)) -cross_suffix=$(if $(crosstest),$(if $(DEBUG_SUFFIX),$(subst _debug,,$(1)),$(call add_debug,$(1))),$(1)) - -MALLOC_MAIN_TESTS = test_ScalableAllocator.$(TEST_EXT) \ - test_ScalableAllocator_STL.$(TEST_EXT) \ - test_malloc_compliance.$(TEST_EXT) \ - test_malloc_regression.$(TEST_EXT) \ - test_malloc_init_shutdown.$(TEST_EXT) -MALLOC_OVERLOAD_TESTS = test_malloc_overload.$(TEST_EXT) test_malloc_overload_proxy.$(TEST_EXT) test_malloc_atexit.$(TEST_EXT) - -MALLOC_LIB = $(call cross_suffix,$(MALLOC.LIB)) -MALLOC_PROXY_LIB = $(call cross_suffix,$(MALLOCPROXY.LIB)) - -ifeq (windows.gcc,$(tbb_os).$(compiler)) -test_malloc_overload.$(TEST_EXT): LIBS += $(MALLOC_PROXY_LIB) -endif - -test_malloc_overload.$(TEST_EXT): test_malloc_overload.cpp - $(CPLUS) $(OUTPUT_KEY)$@ $(subst /MT,/MD,$(M_CPLUS_FLAGS)) $(M_INCLUDES) $< $(LIBDL) $(LIBS) $(LINK_FLAGS) -test_malloc_overload_proxy.$(TEST_EXT): test_malloc_overload.cpp $(MALLOC_PROXY_LIB) - $(CPLUS) $(OUTPUT_KEY)$@ $(subst /MT,/MD,$(M_CPLUS_FLAGS)) $(M_INCLUDES) $< $(LIBDL) $(MALLOC_PROXY_LIB) $(LIBS) $(LINK_FLAGS) - -test_malloc_whitebox.$(TEST_EXT): test_malloc_whitebox.cpp $(MALLOC_ASM.OBJ) tbb_misc_malloc.$(OBJ) - $(CPLUS) $(OUTPUT_KEY)$@ $(M_CPLUS_FLAGS) $(M_INCLUDES) $^ $(LIBS) $(LIBDL) $(LINK_FLAGS) - -test_malloc_lib_unload.$(TEST_EXT): test_malloc_lib_unload.cpp - $(CPLUS) $(OUTPUT_KEY)$@ $(M_CPLUS_FLAGS) $(M_INCLUDES) $^ $(LIBS) $(LIBDL) $(LINK_FLAGS) - -$(MALLOC_MAIN_TESTS): %.$(TEST_EXT): %.$(OBJ) $(MALLOC_LIB) - $(CPLUS) $(OUTPUT_KEY)$@ $(CPLUS_FLAGS) $< $(MALLOC_LIB) $(LIBS) $(LINK_FLAGS) - -ifeq (,$(NO_C_TESTS)) -MALLOC_C_TESTS = test_malloc_pure_c.$(TEST_EXT) - -$(MALLOC_C_TESTS): %.$(TEST_EXT): %.$(OBJ) $(MALLOC_LIB) - $(CPLUS) $(OUTPUT_KEY)$@ $(CPLUS_FLAGS) $^ $(LIBS) $(LINK_FLAGS) -endif - -# Rules for generating a test DLL -%_dll.$(DLL): %_dll.$(OBJ) - $(LIB_LINK_CMD) $(OUTPUT_KEY)$@ $(CPLUS_FLAGS) $(PIC_KEY) $< $(LIBS) $(DYLIB_KEY) $(LIB_LINK_FLAGS) - -test_malloc_atexit.$(TEST_EXT): test_malloc_atexit.$(OBJ) test_malloc_atexit_dll.$(DLL) - $(CPLUS) $(OUTPUT_KEY)$@ $(CPLUS_FLAGS) $< $(MALLOC_PROXY_LIB) $(MALLOC_LIB) test_malloc_atexit_dll.$(LIBEXT) $(LIBS) $(LINK_FLAGS) - -MALLOC_TESTS = $(MALLOC_MAIN_TESTS) $(MALLOC_OVERLOAD_TESTS) $(MALLOC_C_TESTS) test_malloc_whitebox.$(TEST_EXT) test_malloc_lib_unload.$(TEST_EXT) -# run_cmd is usually empty -malloc_test: $(call cross_suffix,$(MALLOC.DLL)) $(TEST_PREREQUISITE) $(MALLOC_TESTS) - $(run_cmd) ./test_malloc_atexit.$(TEST_EXT) $(args) - $(run_cmd) ./test_malloc_lib_unload.$(TEST_EXT) $(args) - $(run_cmd) ./test_malloc_whitebox.$(TEST_EXT) $(args) 1:4 - $(run_cmd) $(TEST_LAUNCHER) -l $(call cross_suffix,$(MALLOCPROXY.DLL)) test_malloc_overload.$(TEST_EXT) $(args) - $(run_cmd) $(TEST_LAUNCHER) test_malloc_overload_proxy.$(TEST_EXT) $(args) - $(run_cmd) $(TEST_LAUNCHER) test_malloc_compliance.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_ScalableAllocator.$(TEST_EXT) $(args) - $(run_cmd) ./test_ScalableAllocator_STL.$(TEST_EXT) $(args) - $(run_cmd) ./test_malloc_regression.$(TEST_EXT) $(args) - $(run_cmd) ./test_malloc_init_shutdown.$(TEST_EXT) $(args) -ifeq (,$(NO_C_TESTS)) - $(run_cmd) ./test_malloc_pure_c.$(TEST_EXT) $(args) -endif - -#------------------------------------------------------ -# End of rules for making the TBBMalloc unit tests -#------------------------------------------------------ - -# Include automatically generated dependences --include *.d diff --git a/deal.II/bundled/tbb30_104oss/build/Makefile.test b/deal.II/bundled/tbb30_104oss/build/Makefile.test deleted file mode 100644 index 90f51a41d7..0000000000 --- a/deal.II/bundled/tbb30_104oss/build/Makefile.test +++ /dev/null @@ -1,290 +0,0 @@ -# Copyright 2005-2010 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. -# -# Threading Building Blocks is free software; you can redistribute it -# and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. -# -# Threading Building Blocks is distributed in the hope that it will be -# useful, but WITHOUT ANY WARRANTY; without even the implied warranty -# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Threading Building Blocks; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software -# library without restriction. Specifically, if other files instantiate -# templates or use macros or inline functions from this file, or you compile -# this file and link it with other files to produce an executable, this -# file does not by itself cause the resulting executable to be covered by -# the GNU General Public License. This exception does not however -# invalidate any other reasons why the executable file might be covered by -# the GNU General Public License. - -#------------------------------------------------------------------------------ -# Define rules for making the TBB tests. -#------------------------------------------------------------------------------ -.PHONY: default test_tbb_plain test_tbb_old clean - -default: test_tbb_plain test_tbb_old - -tbb_root ?= $(TBB30_INSTALL_DIR) -BUILDING_PHASE=1 -TEST_RESOURCE = $(TBB.RES) -include $(tbb_root)/build/common.inc -DEBUG_SUFFIX=$(findstring _debug,$(call cross_cfg,_$(cfg))) - -#------------------------------------------------------------ -# Define static pattern rules dealing with .cpp source files -#------------------------------------------------------------ - -VPATH = $(tbb_root)/src/tbb/$(ASSEMBLY_SOURCE) $(tbb_root)/src/tbb $(tbb_root)/src/rml/client $(tbb_root)/src/old $(tbb_root)/src/test $(tbb_root)/src/perf - -CPLUS_FLAGS += $(if $(crosstest),$(DEFINE_KEY)__TBB_NO_IMPLICIT_LINKAGE=1) - -include $(tbb_root)/build/common_rules.inc - -# Rule for generating executable test -%.$(TEST_EXT): %.$(OBJ) $(TBB.LIB) - $(CPLUS) $(OUTPUT_KEY)$@ $(CPLUS_FLAGS) $< $(LINK_TBB.LIB) $(LIBS) $(AUX_LIBS) $(LINK_FLAGS) - -# Rules for generating a test DLL -%_dll.$(DLL): %_dll.$(OBJ) $(TBB.LIB) - $(CPLUS) $(OUTPUT_KEY)$@ $(CPLUS_FLAGS) $(PIC_KEY) $< $(LINK_TBB.LIB) $(LIBS) $(LINK_FLAGS) $(DYLIB_KEY) -.PRECIOUS: %_dll.$(OBJ) %_dll.$(DLL) - -# Rules for the tests, which use TBB in a dynamically loadable library -test_model_plugin.$(TEST_EXT): test_model_plugin.$(OBJ) test_model_plugin_dll.$(DLL) - $(CPLUS) $(OUTPUT_KEY)$@ $(CPLUS_FLAGS) $< $(LIBDL) $(LIBS) $(LINK_FLAGS) - -# tbb_misc.$(OBJ) has to be specified here (instead of harness_inject_scheduler.h) because it carries dependency on version_string.tmp -TASK_CPP_DEPENDENCIES = $(TBB_ASM.OBJ) tbb_misc.$(OBJ) -ifeq (,$(codecov)) - TASK_CPP_DEPENDENCIES += itt_notify.$(OBJ) -endif - -# These executables don't depend on the TBB library, but include core .cpp files directly -TASK_CPP_DIRECTLY_INCLUDED = test_eh_tasks.$(TEST_EXT) \ - test_task_leaks.$(TEST_EXT) \ - test_task_assertions.$(TEST_EXT) \ - test_fast_random.$(TEST_EXT) - -# Necessary to locate version_string.tmp referenced from directly included tbb_misc.cpp -INCLUDES += $(INCLUDE_KEY). - -$(TASK_CPP_DIRECTLY_INCLUDED): WARNING_KEY += $(WARNING_SUPPRESS) - -$(TASK_CPP_DIRECTLY_INCLUDED): %.$(TEST_EXT) : %.$(OBJ) $(TASK_CPP_DEPENDENCIES) - $(CPLUS) $(OUTPUT_KEY)$@ $(CPLUS_FLAGS) $^ $(LIBDL) $(LIBS) $(LINK_FLAGS) - -test_tbb_header2.$(OBJ): test_tbb_header.cpp - $(CPLUS) $(COMPILE_ONLY) $(CPLUS_FLAGS) $(CXX_ONLY_FLAGS) $(CXX_WARN_SUPPRESS) $(INCLUDES) $(DEFINE_KEY)__TBB_TEST_SECONDARY=1 $< $(OUTPUTOBJ_KEY)$@ - -# Detecting "multiple definition" linker error using the test that covers the whole library -test_tbb_header.$(TEST_EXT): test_tbb_header.$(OBJ) test_tbb_header2.$(OBJ) $(TBB.LIB) - $(CPLUS) $(OUTPUT_KEY)$@ $(CPLUS_FLAGS) test_tbb_header.$(OBJ) test_tbb_header2.$(OBJ) $(LINK_TBB.LIB) $(LIBS) $(LINK_FLAGS) - -# The main list of TBB tests -TEST_TBB_PLAIN.EXE = test_assembly.$(TEST_EXT) \ - test_aligned_space.$(TEST_EXT) \ - test_atomic.$(TEST_EXT) \ - test_blocked_range.$(TEST_EXT) \ - test_blocked_range2d.$(TEST_EXT) \ - test_blocked_range3d.$(TEST_EXT) \ - test_concurrent_queue.$(TEST_EXT) \ - test_concurrent_vector.$(TEST_EXT) \ - test_concurrent_unordered.$(TEST_EXT) \ - test_concurrent_hash_map.$(TEST_EXT) \ - test_enumerable_thread_specific.$(TEST_EXT) \ - test_handle_perror.$(TEST_EXT) \ - test_halt.$(TEST_EXT) \ - test_lambda.$(TEST_EXT) \ - test_model_plugin.$(TEST_EXT) \ - test_mutex.$(TEST_EXT) \ - test_mutex_native_threads.$(TEST_EXT) \ - test_rwm_upgrade_downgrade.$(TEST_EXT) \ - test_cache_aligned_allocator_STL.$(TEST_EXT) \ - test_cache_aligned_allocator.$(TEST_EXT) \ - test_parallel_for.$(TEST_EXT) \ - test_parallel_reduce.$(TEST_EXT) \ - test_parallel_sort.$(TEST_EXT) \ - test_parallel_scan.$(TEST_EXT) \ - test_parallel_while.$(TEST_EXT) \ - test_parallel_do.$(TEST_EXT) \ - test_pipeline.$(TEST_EXT) \ - test_pipeline_with_tbf.$(TEST_EXT) \ - test_parallel_pipeline.$(TEST_EXT) \ - test_task_scheduler_init.$(TEST_EXT) \ - test_task_scheduler_observer.$(TEST_EXT) \ - test_task.$(TEST_EXT) \ - test_tbb_thread.$(TEST_EXT) \ - test_std_thread.$(TEST_EXT) \ - test_tick_count.$(TEST_EXT) \ - test_inits_loop.$(TEST_EXT) \ - test_yield.$(TEST_EXT) \ - test_eh_algorithms.$(TEST_EXT) \ - test_parallel_invoke.$(TEST_EXT) \ - test_task_group.$(TEST_EXT) \ - test_ittnotify.$(TEST_EXT) \ - test_parallel_for_each.$(TEST_EXT) \ - test_tbb_header.$(TEST_EXT) \ - test_combinable.$(TEST_EXT) \ - test_task_auto_init.$(TEST_EXT) \ - test_concurrent_monitor.$(TEST_EXT) \ - test_critical_section.$(TEST_EXT) \ - test_semaphore.$(TEST_EXT) \ - test_reader_writer_lock.$(TEST_EXT) \ - test_tbb_condition_variable.$(TEST_EXT) \ - test_intrusive_list.$(TEST_EXT) \ - test_cilk_interop.$(TEST_EXT) \ - test_tbb_version.$(TEST_EXT) # insert new files right above - -ifdef OPENMP_FLAG - TEST_TBB_PLAIN.EXE += test_tbb_openmp -test_openmp.$(TEST_EXT): test_openmp.cpp - $(CPLUS) $(OPENMP_FLAG) $(OUTPUT_KEY)$@ $(CPLUS_FLAGS) $(INCLUDES) $< $(LIBS) $(LINK_TBB.LIB) $(LINK_FLAGS) -.PHONY: test_tbb_openmp -test_tbb_openmp: $(TEST_PREREQUISITE) test_openmp.$(TEST_EXT) - $(run_cmd) ./test_openmp.$(TEST_EXT) 1:4 - -endif - -$(TEST_TBB_PLAIN.EXE): WARNING_KEY += $(TEST_WARNING_KEY) - -# Run tests that are in TASK_CPP_DIRECTLY_INCLUDED and TEST_TBB_PLAIN.EXE -# The test are ordered so that simpler components are tested first. -# If a component Y uses component X, then tests for Y should come after tests for X. -# Note that usually run_cmd is empty, and tests run directly -test_tbb_plain: $(TEST_PREREQUISITE) $(TASK_CPP_DIRECTLY_INCLUDED) $(TEST_TBB_PLAIN.EXE) - $(run_cmd) ./test_tbb_version.$(TEST_EXT) $(args) - # Checking TBB version first to make sure the following testing has anything in it - $(run_cmd) ./test_assembly.$(TEST_EXT) $(args) - $(run_cmd) ./test_atomic.$(TEST_EXT) $(args) - # Yes, 4:8 is intended on the next line. - $(run_cmd) ./test_yield.$(TEST_EXT) $(args) 4:8 - $(run_cmd) ./test_handle_perror.$(TEST_EXT) $(args) - $(run_cmd) ./test_task_auto_init.$(TEST_EXT) $(args) - $(run_cmd) ./test_task_scheduler_init.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_task_scheduler_observer.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_task_assertions.$(TEST_EXT) $(args) - $(run_cmd) ./test_task.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_task_leaks.$(TEST_EXT) $(args) - $(run_cmd) ./test_cache_aligned_allocator.$(TEST_EXT) $(args) - $(run_cmd) ./test_cache_aligned_allocator_STL.$(TEST_EXT) $(args) - $(run_cmd) ./test_blocked_range.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_blocked_range2d.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_blocked_range3d.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_parallel_for.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_parallel_sort.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_aligned_space.$(TEST_EXT) $(args) - $(run_cmd) ./test_parallel_reduce.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_parallel_scan.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_parallel_while.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_parallel_do.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_inits_loop.$(TEST_EXT) $(args) - $(run_cmd) ./test_lambda.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_mutex.$(TEST_EXT) $(args) 1:3 - $(run_cmd) ./test_mutex_native_threads.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_rwm_upgrade_downgrade.$(TEST_EXT) $(args) 4 - # Yes, 4:8 is intended on the next line. - $(run_cmd) ./test_halt.$(TEST_EXT) $(args) 4:8 - $(run_cmd) ./test_pipeline.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_pipeline_with_tbf.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_parallel_pipeline.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_tick_count.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_concurrent_queue.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_concurrent_vector.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_concurrent_unordered.$(TEST_EXT) $(args) - $(run_cmd) ./test_concurrent_hash_map.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_enumerable_thread_specific.$(TEST_EXT) $(args) 0:4 - $(run_cmd) ./test_combinable.$(TEST_EXT) $(args) 0:4 - # $(run_cmd) ./test_model_plugin.$(TEST_EXT) $(args) 4 - $(run_cmd) ./test_eh_tasks.$(TEST_EXT) $(args) 2:4 - $(run_cmd) ./test_eh_algorithms.$(TEST_EXT) $(args) 2:4 - $(run_cmd) ./test_tbb_thread.$(TEST_EXT) $(args) - $(run_cmd) ./test_std_thread.$(TEST_EXT) $(args) - $(run_cmd) ./test_parallel_invoke.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_task_group.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_ittnotify.$(TEST_EXT) $(args) 2:2 - $(run_cmd) ./test_parallel_for_each.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_tbb_header.$(TEST_EXT) $(args) - $(run_cmd) ./test_concurrent_monitor.$(TEST_EXT) $(args) 6:8 - $(run_cmd) ./test_critical_section.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_semaphore.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_reader_writer_lock.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_tbb_condition_variable.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_intrusive_list.$(TEST_EXT) $(args) - $(run_cmd) ./test_cilk_interop.$(TEST_EXT) $(args) - $(run_cmd) ./test_fast_random.$(TEST_EXT) $(args) 1:16 - -CPLUS_FLAGS_DEPRECATED = $(DEFINE_KEY)TBB_DEPRECATED=1 $(subst $(WARNING_KEY),,$(CPLUS_FLAGS)) $(WARNING_SUPPRESS) - -TEST_TBB_OLD.OBJ = test_concurrent_vector_v2.$(OBJ) test_concurrent_queue_v2.$(OBJ) test_mutex_v2.$(OBJ) - -TEST_TBB_DEPRECATED.OBJ = test_concurrent_queue_deprecated.$(OBJ) \ - test_concurrent_vector_deprecated.$(OBJ) \ - - -# For deprecated files, we don't mind warnings etc., thus compilation rules are most relaxed -$(TEST_TBB_OLD.OBJ): %.$(OBJ): %.cpp - $(CPLUS) $(COMPILE_ONLY) $(CPLUS_FLAGS_DEPRECATED) $(INCLUDES) $< - -%_deprecated.$(OBJ): %.cpp - $(CPLUS) $(COMPILE_ONLY) $(OUTPUTOBJ_KEY)$@ $(CPLUS_FLAGS_DEPRECATED) $(INCLUDES) $< - -TEST_TBB_OLD.EXE = $(subst .$(OBJ),.$(TEST_EXT),$(TEST_TBB_OLD.OBJ) $(TEST_TBB_DEPRECATED.OBJ)) - -ifeq (,$(NO_LEGACY_TESTS)) -test_tbb_old: $(TEST_PREREQUISITE) $(TEST_TBB_OLD.EXE) - $(run_cmd) ./test_concurrent_vector_v2.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_concurrent_vector_deprecated.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_concurrent_queue_v2.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_concurrent_queue_deprecated.$(TEST_EXT) $(args) 1:4 - $(run_cmd) ./test_mutex_v2.$(TEST_EXT) $(args) 1 - $(run_cmd) ./test_mutex_v2.$(TEST_EXT) $(args) 2 - $(run_cmd) ./test_mutex_v2.$(TEST_EXT) $(args) 4 -else -test_tbb_old: - @echo Legacy tests skipped -endif - -ifneq (,$(codecov)) -codecov_gen: - profmerge - codecov $(if $(findstring -,$(codecov)),$(codecov),) -demang -comp $(tbb_root)/build/codecov.txt -endif - -test_% debug_%: test_%.$(TEST_EXT) $(TEST_PREREQUISITE) -ifeq (,$(repeat)) - $(run_cmd) ./$< $(args) -else -ifeq (windows,$(tbb_os)) - for /L %%i in (1,1,$(repeat)) do echo %%i of $(repeat): && $(run_cmd) $< $(args) -else - for ((i=1;i<=$(repeat);++i)); do echo $$i of $(repeat): && $(run_cmd) ./$< $(args); done -endif -endif # repeat -ifneq (,$(codecov)) - profmerge - codecov $(if $(findstring -,$(codecov)),$(codecov),) -demang -comp $(tbb_root)/build/codecov.txt -endif - -time_%: time_%.$(TEST_EXT) $(TEST_PREREQUISITE) - $(run_cmd) ./$< $(args) - - -perf_%: AUX_LIBS = perf_dll.$(LIBEXT) -perf_%: perf_dll.$(DLL) perf_%.$(TEST_EXT) - $(run_cmd) ./$@.$(TEST_EXT) $(args) - -clean_%: - $(RM) $*.$(OBJ) $*.exe $*.$(DLL) $*.$(LIBEXT) $*.res $*.map $*.ilk $*.pdb $*.exp $*.*manifest $*.tmp $*.d - -clean: - $(RM) *.$(OBJ) *.exe *.$(DLL) *.$(LIBEXT) *.res *.map *.ilk *.pdb *.exp *.manifest *.tmp *.d pgopti.* *.dyn core core.*[0-9][0-9] - -# Include automatically generated dependences --include *.d diff --git a/deal.II/bundled/tbb30_104oss/build/SunOS.gcc.inc b/deal.II/bundled/tbb30_104oss/build/SunOS.gcc.inc deleted file mode 100644 index 83e612e17b..0000000000 --- a/deal.II/bundled/tbb30_104oss/build/SunOS.gcc.inc +++ /dev/null @@ -1,100 +0,0 @@ -# Copyright 2005-2010 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. -# -# Threading Building Blocks is free software; you can redistribute it -# and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. -# -# Threading Building Blocks is distributed in the hope that it will be -# useful, but WITHOUT ANY WARRANTY; without even the implied warranty -# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Threading Building Blocks; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software -# library without restriction. Specifically, if other files instantiate -# templates or use macros or inline functions from this file, or you compile -# this file and link it with other files to produce an executable, this -# file does not by itself cause the resulting executable to be covered by -# the GNU General Public License. This exception does not however -# invalidate any other reasons why the executable file might be covered by -# the GNU General Public License. - -COMPILE_ONLY = -c -MMD -PREPROC_ONLY = -E -x c -INCLUDE_KEY = -I -DEFINE_KEY = -D -OUTPUT_KEY = -o # -OUTPUTOBJ_KEY = -o # -PIC_KEY = -fPIC -WARNING_AS_ERROR_KEY = -Werror -WARNING_KEY = -Wall -DYLIB_KEY = -shared -LIBDL = -ldl - -TBB_NOSTRICT = 1 - -CPLUS = g++ -CONLY = gcc -LIB_LINK_FLAGS = -shared -LIBS = -lpthread -lrt -ldl -C_FLAGS = $(CPLUS_FLAGS) -x c - -ifeq ($(cfg), release) - CPLUS_FLAGS = -O2 -DUSE_PTHREAD -endif -ifeq ($(cfg), debug) - CPLUS_FLAGS = -DTBB_USE_DEBUG -g -O0 -DUSE_PTHREAD -endif - -ASM= -ASM_FLAGS= - -TBB_ASM.OBJ= - -ifeq (ia64,$(arch)) -# Position-independent code (PIC) is a must for IA-64 - CPLUS_FLAGS += $(PIC_KEY) -endif - -ifeq (intel64,$(arch)) - CPLUS_FLAGS += -m64 - LIB_LINK_FLAGS += -m64 -endif - -ifeq (ia32,$(arch)) - CPLUS_FLAGS += -m32 - LIB_LINK_FLAGS += -m32 -endif - -# for some gcc versions on Solaris, -m64 may imply V9, but perhaps not everywhere (TODO: verify) -ifeq (sparc,$(arch)) - CPLUS_FLAGS += -mcpu=v9 -m64 - LIB_LINK_FLAGS += -mcpu=v9 -m64 -endif - -#------------------------------------------------------------------------------ -# Setting assembler data. -#------------------------------------------------------------------------------ -ASSEMBLY_SOURCE=$(arch)-gas -ifeq (ia64,$(arch)) - ASM=ias - TBB_ASM.OBJ = atomic_support.o lock_byte.o log2.o pause.o -endif -#------------------------------------------------------------------------------ -# End of setting assembler data. -#------------------------------------------------------------------------------ - -#------------------------------------------------------------------------------ -# Setting tbbmalloc data. -#------------------------------------------------------------------------------ - -M_CPLUS_FLAGS = $(CPLUS_FLAGS) -fno-rtti -fno-exceptions -fno-schedule-insns2 - -#------------------------------------------------------------------------------ -# End of setting tbbmalloc data. -#------------------------------------------------------------------------------ diff --git a/deal.II/bundled/tbb30_104oss/build/SunOS.inc b/deal.II/bundled/tbb30_104oss/build/SunOS.inc deleted file mode 100644 index 60cc44f3c3..0000000000 --- a/deal.II/bundled/tbb30_104oss/build/SunOS.inc +++ /dev/null @@ -1,91 +0,0 @@ -# Copyright 2005-2010 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. -# -# Threading Building Blocks is free software; you can redistribute it -# and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. -# -# Threading Building Blocks is distributed in the hope that it will be -# useful, but WITHOUT ANY WARRANTY; without even the implied warranty -# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Threading Building Blocks; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software -# library without restriction. Specifically, if other files instantiate -# templates or use macros or inline functions from this file, or you compile -# this file and link it with other files to produce an executable, this -# file does not by itself cause the resulting executable to be covered by -# the GNU General Public License. This exception does not however -# invalidate any other reasons why the executable file might be covered by -# the GNU General Public License. - -ifndef arch - arch:=$(shell uname -p) - ifeq ($(arch),i386) - ifeq ($(shell isainfo -b),64) - arch:=intel64 - else - arch:=ia32 - endif - endif - export arch -# For non-IA systems running Sun OS, 'arch' will contain whatever is printed by uname -p. -# In particular, for SPARC architecture it will contain "sparc". -endif - -ifndef runtime - gcc_version:=$(shell gcc -v 2>&1 | grep 'gcc version' | sed -e 's/^gcc version //' | sed -e 's/ .*$$//') - os_version:=$(shell uname -r) - os_kernel_version:=$(shell uname -r | sed -e 's/-.*$$//') - export runtime:=cc$(gcc_version)_kernel$(os_kernel_version) -endif - -ifeq ($(arch),sparc) - native_compiler := gcc - export compiler ?= gcc -else - native_compiler := suncc - export compiler ?= suncc -endif -# debugger ?= gdb - -CMD=$(SHELL) -c -CWD=$(shell pwd) -RM?=rm -f -RD?=rmdir -MD?=mkdir -p -NUL= /dev/null -SLASH=/ -MAKE_VERSIONS=bash $(tbb_root)/build/version_info_sunos.sh $(CPLUS) $(CPLUS_FLAGS) $(INCLUDES) >version_string.tmp -MAKE_TBBVARS=bash $(tbb_root)/build/generate_tbbvars.sh - -ifdef LD_LIBRARY_PATH - export LD_LIBRARY_PATH := .:$(LD_LIBRARY_PATH) -else - export LD_LIBRARY_PATH := . -endif - -####### Build settings ######################################################## - -OBJ = o -DLL = so -LIBEXT=so - -TBB.DEF = -TBB.DLL = libtbb$(DEBUG_SUFFIX).$(DLL) -TBB.LIB = $(TBB.DLL) -LINK_TBB.LIB = $(TBB.LIB) - -MALLOC.DLL = libtbbmalloc$(DEBUG_SUFFIX).$(DLL) -MALLOC.LIB = $(MALLOC.DLL) - -MALLOCPROXY.DLL = libtbbmalloc_proxy$(DEBUG_SUFFIX).$(DLL) - -TBB_NOSTRICT=1 - -TEST_LAUNCHER=sh $(tbb_root)/build/test_launcher.sh diff --git a/deal.II/bundled/tbb30_104oss/build/SunOS.suncc.inc b/deal.II/bundled/tbb30_104oss/build/SunOS.suncc.inc deleted file mode 100644 index dab47f1cba..0000000000 --- a/deal.II/bundled/tbb30_104oss/build/SunOS.suncc.inc +++ /dev/null @@ -1,98 +0,0 @@ -# Copyright 2005-2010 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. -# -# Threading Building Blocks is free software; you can redistribute it -# and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. -# -# Threading Building Blocks is distributed in the hope that it will be -# useful, but WITHOUT ANY WARRANTY; without even the implied warranty -# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Threading Building Blocks; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software -# library without restriction. Specifically, if other files instantiate -# templates or use macros or inline functions from this file, or you compile -# this file and link it with other files to produce an executable, this -# file does not by itself cause the resulting executable to be covered by -# the GNU General Public License. This exception does not however -# invalidate any other reasons why the executable file might be covered by -# the GNU General Public License. - -COMPILE_ONLY = -c -xMMD -errtags -PREPROC_ONLY = -E -xMMD -INCLUDE_KEY = -I -DEFINE_KEY = -D -OUTPUT_KEY = -o # -OUTPUTOBJ_KEY = -o # -PIC_KEY = -KPIC -DYLIB_KEY = -G -LIBDL = -ldl -# WARNING_AS_ERROR_KEY = -errwarn=%all -WARNING_AS_ERROR_KEY = Warning as error -WARNING_SUPPRESS = -erroff=unassigned,attrskipunsup,badargtype2w,badbinaryopw,wbadasg,wvarhidemem -tbb_strict=0 - -TBB_NOSTRICT = 1 - -CPLUS = CC -CONLY = cc -LIB_LINK_FLAGS = -G -R . -M$(tbb_root)/build/suncc.map.pause -LINK_FLAGS += -M$(tbb_root)/build/suncc.map.pause -LIBS = -lpthread -lrt -R . -C_FLAGS = $(CPLUS_FLAGS) - -ifeq ($(cfg), release) - CPLUS_FLAGS = -mt -xO2 -library=stlport4 -DUSE_PTHREAD $(WARNING_SUPPRESS) -endif -ifeq ($(cfg), debug) - CPLUS_FLAGS = -mt -DTBB_USE_DEBUG -g -library=stlport4 -DUSE_PTHREAD $(WARNING_SUPPRESS) -endif - -ASM= -ASM_FLAGS= - -TBB_ASM.OBJ= - -ifeq (intel64,$(arch)) - CPLUS_FLAGS += -m64 - ASM_FLAGS += -m64 - LIB_LINK_FLAGS += -m64 -endif - -ifeq (ia32,$(arch)) - CPLUS_FLAGS += -m32 - LIB_LINK_FLAGS += -m32 -endif - -# TODO: verify whether -m64 implies V9 on relevant Sun Studio versions -# (those that handle gcc assembler syntax) -ifeq (sparc,$(arch)) - CPLUS_FLAGS += -m64 - LIB_LINK_FLAGS += -m64 -endif - -export TBB_CUSTOM_VARS_SH=export CXXFLAGS="-I$${TBB30_INSTALL_DIR}/include -library=stlport4 $(CXXFLAGS) -M$${TBB30_INSTALL_DIR}/build/suncc.map.pause" -export TBB_CUSTOM_VARS_CSH=setenv CXXFLAGS "-I$${TBB30_INSTALL_DIR}/include -library=stlport4 $(CXXFLAGS) -M$${TBB30_INSTALL_DIR}/build/suncc.map.pause" - -#------------------------------------------------------------------------------ -# Setting assembler data. -#------------------------------------------------------------------------------ -ASSEMBLY_SOURCE=$(arch)-fbe -#------------------------------------------------------------------------------ -# End of setting assembler data. -#------------------------------------------------------------------------------ - -#------------------------------------------------------------------------------ -# Setting tbbmalloc data. -#------------------------------------------------------------------------------ -M_INCLUDES = $(INCLUDES) -I$(MALLOC_ROOT) -I$(MALLOC_SOURCE_ROOT) -M_CPLUS_FLAGS = $(CPLUS_FLAGS) -#------------------------------------------------------------------------------ -# End of setting tbbmalloc data. -#------------------------------------------------------------------------------ diff --git a/deal.II/bundled/tbb30_104oss/build/codecov.txt b/deal.II/bundled/tbb30_104oss/build/codecov.txt deleted file mode 100644 index e22f8059a2..0000000000 --- a/deal.II/bundled/tbb30_104oss/build/codecov.txt +++ /dev/null @@ -1,7 +0,0 @@ -src/tbb -src/tbbmalloc -include/tbb -src/rml/server -src/rml/client -src/rml/include -source/malloc diff --git a/deal.II/bundled/tbb30_104oss/build/common.inc b/deal.II/bundled/tbb30_104oss/build/common.inc deleted file mode 100644 index c9e6b233c8..0000000000 --- a/deal.II/bundled/tbb30_104oss/build/common.inc +++ /dev/null @@ -1,108 +0,0 @@ -# Copyright 2005-2010 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. -# -# Threading Building Blocks is free software; you can redistribute it -# and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. -# -# Threading Building Blocks is distributed in the hope that it will be -# useful, but WITHOUT ANY WARRANTY; without even the implied warranty -# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Threading Building Blocks; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software -# library without restriction. Specifically, if other files instantiate -# templates or use macros or inline functions from this file, or you compile -# this file and link it with other files to produce an executable, this -# file does not by itself cause the resulting executable to be covered by -# the GNU General Public License. This exception does not however -# invalidate any other reasons why the executable file might be covered by -# the GNU General Public License. - -ifndef tbb_os - - # Windows sets environment variable OS; for other systems, ask uname - ifeq ($(OS),) - OS:=$(shell uname) - ifeq ($(OS),) - $(error "Cannot detect operating system") - endif - export tbb_os=$(OS) - endif - - ifeq ($(OS), Windows_NT) - export tbb_os=windows - endif - ifeq ($(OS), Linux) - export tbb_os=linux - endif - ifeq ($(OS), Darwin) - export tbb_os=macos - endif - -endif # !tbb_os - -ifeq ($(wildcard $(tbb_root)/build/$(tbb_os).inc),) - $(error "$(tbb_os)" is not supported. Add build/$(tbb_os).inc file with os-specific settings ) -endif - -# detect arch and runtime versions, provide common os-specific definitions -include $(tbb_root)/build/$(tbb_os).inc - -ifeq ($(arch),) - $(error Architecture not detected) -endif -ifeq ($(runtime),) - $(error Runtime version not detected) -endif -ifeq ($(wildcard $(tbb_root)/build/$(tbb_os).$(compiler).inc),) - $(error Compiler "$(compiler)" is not supported on $(tbb_os). Add build/$(tbb_os).$(compiler).inc file with compiler-specific settings ) -endif - -ifdef target - ifeq ($(wildcard $(tbb_root)/build/$(target).inc),) - $(error "$(target)" is not supported. Add build/$(target).inc file) - endif - include $(tbb_root)/build/$(target).inc -endif - -# Support for running debug tests to release library and vice versa -flip_cfg=$(subst _flipcfg,_release,$(subst _release,_debug,$(subst _debug,_flipcfg,$(1)))) -cross_cfg = $(if $(crosstest),$(call flip_cfg,$(1)),$(1)) - -ifdef BUILDING_PHASE - # Setting default configuration to release - cfg?=release - # No lambas or other C++0x extensions by default for compilers that implement them as experimental features - lambdas ?= 0 - cpp0x ?= 0 - # include compiler-specific build configurations - -include $(tbb_root)/build/$(tbb_os).$(compiler).inc - ifdef extra_inc - -include $(tbb_root)/build/$(extra_inc) - endif -endif -ifneq ($(BUILDING_PHASE),1) - # definitions for top-level Makefiles - origin_build_dir:=$(origin tbb_build_dir) - tbb_build_dir?=$(tbb_root)$(SLASH)build - tbb_build_prefix?=$(tbb_os)_$(arch)_$(compiler)_$(runtime) - work_dir=$(tbb_build_dir)$(SLASH)$(tbb_build_prefix) - ifneq ($(BUILDING_PHASE),0) - work_dir:=$(work_dir) - # assign new value for tbb_root if path is not absolute (the filter keeps only /* paths) - ifeq ($(filter /% $(SLASH)%, $(subst :, ,$(tbb_root)) ),) - ifeq ($(origin_build_dir),undefined) - override tbb_root:=../.. - else - override tbb_root:=$(CWD)/$(tbb_root) - endif - endif - export tbb_root - endif # BUILDING_PHASE != 0 -endif # BUILDING_PHASE != 1 diff --git a/deal.II/bundled/tbb30_104oss/build/common_rules.inc b/deal.II/bundled/tbb30_104oss/build/common_rules.inc deleted file mode 100644 index 07ec7b3f31..0000000000 --- a/deal.II/bundled/tbb30_104oss/build/common_rules.inc +++ /dev/null @@ -1,134 +0,0 @@ -# Copyright 2005-2010 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. -# -# Threading Building Blocks is free software; you can redistribute it -# and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. -# -# Threading Building Blocks is distributed in the hope that it will be -# useful, but WITHOUT ANY WARRANTY; without even the implied warranty -# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Threading Building Blocks; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software -# library without restriction. Specifically, if other files instantiate -# templates or use macros or inline functions from this file, or you compile -# this file and link it with other files to produce an executable, this -# file does not by itself cause the resulting executable to be covered by -# the GNU General Public License. This exception does not however -# invalidate any other reasons why the executable file might be covered by -# the GNU General Public License. - -ifeq ($(tbb_strict),1) - ifeq ($(WARNING_AS_ERROR_KEY),) - $(error WARNING_AS_ERROR_KEY is empty) - endif - # Do not remove line below! - WARNING_KEY += $(WARNING_AS_ERROR_KEY) -endif - -ifndef TEST_EXT - TEST_EXT = exe -endif - -.PRECIOUS: %.$(OBJ) %.$(TEST_EXT) %.res - -INCLUDES += $(INCLUDE_KEY)$(tbb_root)/src $(INCLUDE_KEY)$(tbb_root)/src/rml/include $(INCLUDE_KEY)$(tbb_root)/include - -CPLUS_FLAGS += $(WARNING_KEY) $(CXXFLAGS) -LINK_FLAGS += $(LDFLAGS) -LIB_LINK_FLAGS += $(LDFLAGS) -CPLUS_FLAGS_NOSTRICT = $(subst -strict-ansi,-ansi,$(CPLUS_FLAGS)) - -LIB_LINK_CMD ?= $(CPLUS) $(PIC_KEY) -ifeq ($(origin LIB_OUTPUT_KEY), undefined) - LIB_OUTPUT_KEY = $(OUTPUT_KEY) -endif -ifeq ($(origin LIB_LINK_LIBS), undefined) - LIB_LINK_LIBS = $(LIBDL) $(LIBS) -endif - -CONLY ?= $(CPLUS) - -# The most generic rules -%.$(OBJ): %.cpp - @echo "=====tbb=========================MT== $(cfg)/$($*.tmp - $(ASM) $(ASM_FLAGS) -o $@ $*.tmp - rm $*.tmp - -# Rule for generating .E file if needed for visual inspection -%.E: %.cpp - $(CPLUS) $(CPLUS_FLAGS) $(CXX_ONLY_FLAGS) $(INCLUDES) $(PREPROC_ONLY) $< >$@ - -# TODO Rule for generating .asm file if needed for visual inspection -%.asm: %.cpp - $(CPLUS) /c /Fa $(CPLUS_FLAGS) $(CXX_ONLY_FLAGS) $(INCLUDES) $< - -# TODO Rule for generating .s file if needed for visual inspection -%.s: %.cpp - $(CPLUS) -S $(CPLUS_FLAGS) $(CXX_ONLY_FLAGS) $(INCLUDES) $< - -# Customizations - -ifeq (1,$(TBB_NOSTRICT)) -# GNU 3.2.3 headers have a ISO syntax that is rejected by Intel compiler in -strict-ansi mode. -# The Mac uses gcc, so the list is empty for that platform. -# The files below need the -strict-ansi flag downgraded to -ansi to compile - -$(KNOWN_NOSTRICT): %.$(OBJ): %.cpp - @echo "=====tbb=========================MT== $(cfg)/$($@ - -%.res: %.rc version_string.tmp $(TBB.MANIFEST) - rc /Fo$@ $(INCLUDES) $(filter /D%,$(CPLUS_FLAGS)) $< - -ifneq (,$(TBB.MANIFEST)) -$(TBB.MANIFEST): - cmd /C "echo #include ^ >tbbmanifest.c" - cmd /C "echo int main(){return 0;} >>tbbmanifest.c" - cl /nologo $(C_FLAGS) tbbmanifest.c - -version_string.tmp: $(TBB.MANIFEST) - @echo "=====tbb=========================MT== $(cfg)/$@" - @$(MAKE_VERSIONS) - @cmd /C "echo #define TBB_MANIFEST 1 >> version_string.tmp" - -else -version_string.tmp: - @echo "=====tbb=========================MT== $(cfg)/$@" - @$(MAKE_VERSIONS) -endif - - -# Rules for generating a test DLL -%_dll.$(OBJ): %.cpp - $(CPLUS) $(COMPILE_ONLY) $(OUTPUTOBJ_KEY)$@ $(CPLUS_FLAGS) $(PIC_KEY) $(DEFINE_KEY)_USRDLL $(INCLUDES) $< diff --git a/deal.II/bundled/tbb30_104oss/build/detect.js b/deal.II/bundled/tbb30_104oss/build/detect.js deleted file mode 100644 index 8e90dc0ae9..0000000000 --- a/deal.II/bundled/tbb30_104oss/build/detect.js +++ /dev/null @@ -1,129 +0,0 @@ -// Copyright 2005-2010 Intel Corporation. All Rights Reserved. -// -// This file is part of Threading Building Blocks. -// -// Threading Building Blocks is free software; you can redistribute it -// and/or modify it under the terms of the GNU General Public License -// version 2 as published by the Free Software Foundation. -// -// Threading Building Blocks is distributed in the hope that it will be -// useful, but WITHOUT ANY WARRANTY; without even the implied warranty -// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with Threading Building Blocks; if not, write to the Free Software -// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -// -// As a special exception, you may use this file as part of a free software -// library without restriction. Specifically, if other files instantiate -// templates or use macros or inline functions from this file, or you compile -// this file and link it with other files to produce an executable, this -// file does not by itself cause the resulting executable to be covered by -// the GNU General Public License. This exception does not however -// invalidate any other reasons why the executable file might be covered by -// the GNU General Public License. - -function doWork() { - var WshShell = WScript.CreateObject("WScript.Shell"); - - var fso = new ActiveXObject("Scripting.FileSystemObject"); - - var tmpExec; - - if ( WScript.Arguments.Count() > 1 && WScript.Arguments(1) == "gcc" ) { - if ( WScript.Arguments(0) == "/arch" ) { - WScript.Echo( "ia32" ); - } - else if ( WScript.Arguments(0) == "/runtime" ) { - WScript.Echo( "mingw" ); - } - return; - } - - //Compile binary - tmpExec = WshShell.Exec("cmd /c echo int main(){return 0;} >detect.c"); - while ( tmpExec.Status == 0 ) { - WScript.Sleep(100); - } - - tmpExec = WshShell.Exec("cl /MD detect.c /link /MAP"); - while ( tmpExec.Status == 0 ) { - WScript.Sleep(100); - } - - if ( WScript.Arguments(0) == "/arch" ) { - //read compiler banner - var clVersion = tmpExec.StdErr.ReadAll(); - - //detect target architecture - var intel64=/AMD64|EM64T|x64/mgi; - var ia64=/IA-64|Itanium/mgi; - var ia32=/80x86/mgi; - if ( clVersion.match(intel64) ) { - WScript.Echo( "intel64" ); - } else if ( clVersion.match(ia64) ) { - WScript.Echo( "ia64" ); - } else if ( clVersion.match(ia32) ) { - WScript.Echo( "ia32" ); - } else { - WScript.Echo( "unknown" ); - } - } - - if ( WScript.Arguments(0) == "/runtime" ) { - //read map-file - var map = fso.OpenTextFile("detect.map", 1, 0); - var mapContext = map.readAll(); - map.Close(); - - //detect runtime - var vc71=/MSVCR71\.DLL/mgi; - var vc80=/MSVCR80\.DLL/mgi; - var vc90=/MSVCR90\.DLL/mgi; - var vc100=/MSVCR100\.DLL/mgi; - var psdk=/MSVCRT\.DLL/mgi; - if ( mapContext.match(vc71) ) { - WScript.Echo( "vc7.1" ); - } else if ( mapContext.match(vc80) ) { - WScript.Echo( "vc8" ); - } else if ( mapContext.match(vc90) ) { - WScript.Echo( "vc9" ); - } else if ( mapContext.match(vc100) ) { - WScript.Echo( "vc10" ); - } else if ( mapContext.match(psdk) ) { - // Our current naming convention assumes vc7.1 for 64-bit Windows PSDK - WScript.Echo( "vc7.1" ); - } else { - WScript.Echo( "unknown" ); - } - } - - // delete intermediate files - if ( fso.FileExists("detect.c") ) - fso.DeleteFile ("detect.c", false); - if ( fso.FileExists("detect.obj") ) - fso.DeleteFile ("detect.obj", false); - if ( fso.FileExists("detect.map") ) - fso.DeleteFile ("detect.map", false); - if ( fso.FileExists("detect.exe") ) - fso.DeleteFile ("detect.exe", false); - if ( fso.FileExists("detect.exe.manifest") ) - fso.DeleteFile ("detect.exe.manifest", false); -} - -if ( WScript.Arguments.Count() > 0 ) { - - try { - doWork(); - } catch( error ) - { - WScript.Echo( "unknown" ); - WScript.Quit( 0 ); - } - -} else { - - WScript.Echo( "/arch or /runtime should be set" ); -} - diff --git a/deal.II/bundled/tbb30_104oss/build/generate_tbbvars.bat b/deal.II/bundled/tbb30_104oss/build/generate_tbbvars.bat deleted file mode 100644 index 9fd3a51c43..0000000000 --- a/deal.II/bundled/tbb30_104oss/build/generate_tbbvars.bat +++ /dev/null @@ -1,74 +0,0 @@ -@echo off -REM -REM Copyright 2005-2010 Intel Corporation. All Rights Reserved. -REM -REM This file is part of Threading Building Blocks. -REM -REM Threading Building Blocks is free software; you can redistribute it -REM and/or modify it under the terms of the GNU General Public License -REM version 2 as published by the Free Software Foundation. -REM -REM Threading Building Blocks is distributed in the hope that it will be -REM useful, but WITHOUT ANY WARRANTY; without even the implied warranty -REM of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -REM GNU General Public License for more details. -REM -REM You should have received a copy of the GNU General Public License -REM along with Threading Building Blocks; if not, write to the Free Software -REM Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -REM -REM As a special exception, you may use this file as part of a free software -REM library without restriction. Specifically, if other files instantiate -REM templates or use macros or inline functions from this file, or you compile -REM this file and link it with other files to produce an executable, this -REM file does not by itself cause the resulting executable to be covered by -REM the GNU General Public License. This exception does not however -REM invalidate any other reasons why the executable file might be covered by -REM the GNU General Public License. -REM -setlocal -for %%D in ("%tbb_root%") do set actual_root=%%~fD -set fslash_root=%actual_root:\=/% -set bin_dir=%CD% -set fslash_bin_dir=%bin_dir:\=/% -set _INCLUDE=INCLUDE& set _LIB=LIB -if not x%UNIXMODE%==x set _INCLUDE=CPATH& set _LIB=LIBRARY_PATH - -if exist tbbvars.bat goto skipbat -echo Generating local tbbvars.bat -echo @echo off>tbbvars.bat -echo SET TBB30_INSTALL_DIR=%actual_root%>>tbbvars.bat -echo SET TBB_ARCH_PLATFORM=%arch%\%runtime%>>tbbvars.bat -echo SET TBB_TARGET_ARCH=%arch%>>tbbvars.bat -echo SET %_INCLUDE%=%%TBB30_INSTALL_DIR%%\include;%%%_INCLUDE%%%>>tbbvars.bat -echo SET %_LIB%=%bin_dir%;%%%_LIB%%%>>tbbvars.bat -echo SET PATH=%bin_dir%;%%PATH%%>>tbbvars.bat -if not x%UNIXMODE%==x echo SET LD_LIBRARY_PATH=%bin_dir%;%%LD_LIBRARY_PATH%%>>tbbvars.bat -:skipbat - -if exist tbbvars.sh goto skipsh -echo Generating local tbbvars.sh -echo #!/bin/sh>tbbvars.sh -echo export TBB30_INSTALL_DIR="%fslash_root%">>tbbvars.sh -echo export TBB_ARCH_PLATFORM="%arch%\%runtime%">>tbbvars.sh -echo export TBB_TARGET_ARCH="%arch%">>tbbvars.sh -echo export %_INCLUDE%="${TBB30_INSTALL_DIR}/include;$%_INCLUDE%">>tbbvars.sh -echo export %_LIB%="%fslash_bin_dir%;$%_LIB%">>tbbvars.sh -echo export PATH="%fslash_bin_dir%;$PATH">>tbbvars.sh -if not x%UNIXMODE%==x echo export LD_LIBRARY_PATH="%fslash_bin_dir%;$LD_LIBRARY_PATH">>tbbvars.sh -:skipsh - -if exist tbbvars.csh goto skipcsh -echo Generating local tbbvars.csh -echo #!/bin/csh>tbbvars.csh -echo setenv TBB30_INSTALL_DIR "%actual_root%">>tbbvars.csh -echo setenv TBB_ARCH_PLATFORM "%arch%\%runtime%">>tbbvars.csh -echo setenv TBB_TARGET_ARCH "%arch%">>tbbvars.csh -echo setenv %_INCLUDE% "${TBB30_INSTALL_DIR}\include;$%_INCLUDE%">>tbbvars.csh -echo setenv %_LIB% "%bin_dir%;$%_LIB%">>tbbvars.csh -echo setenv PATH "%bin_dir%;$PATH">>tbbvars.csh -if not x%UNIXMODE%==x echo setenv LD_LIBRARY_PATH "%bin_dir%;$LD_LIBRARY_PATH">>tbbvars.csh -:skipcsh - -endlocal -exit diff --git a/deal.II/bundled/tbb30_104oss/build/generate_tbbvars.sh b/deal.II/bundled/tbb30_104oss/build/generate_tbbvars.sh deleted file mode 100644 index 484719f172..0000000000 --- a/deal.II/bundled/tbb30_104oss/build/generate_tbbvars.sh +++ /dev/null @@ -1,76 +0,0 @@ -#!/bin/bash -# -# Copyright 2005-2010 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. -# -# Threading Building Blocks is free software; you can redistribute it -# and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. -# -# Threading Building Blocks is distributed in the hope that it will be -# useful, but WITHOUT ANY WARRANTY; without even the implied warranty -# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Threading Building Blocks; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software -# library without restriction. Specifically, if other files instantiate -# templates or use macros or inline functions from this file, or you compile -# this file and link it with other files to produce an executable, this -# file does not by itself cause the resulting executable to be covered by -# the GNU General Public License. This exception does not however -# invalidate any other reasons why the executable file might be covered by -# the GNU General Public License. - -# Script used to generate tbbvars.[c]sh scripts -bin_dir="$PWD" # -cd "$tbb_root" # keep this comments here -tbb_root="$PWD" # to make it unsensible -cd "$bin_dir" # to EOL encoding -[ "`uname`" = "Darwin" ] && dll_path="DYLD_LIBRARY_PATH" || dll_path="LD_LIBRARY_PATH" # -[ -f ./tbbvars.sh ] || cat >./tbbvars.sh <./tbbvars.csh < - - -

Overview

-This directory contains the internal Makefile infrastructure for Threading Building Blocks. - -

-See below for how to build TBB and how to port TBB -to a new platform, operating system or architecture. -

- -

Files

-The files here are not intended to be used directly. See below for usage. -
-
Makefile.tbb -
Main Makefile to build the TBB library. - Invoked via 'make tbb' from top-level Makefile. -
Makefile.tbbmalloc -
Main Makefile to build the TBB scalable memory allocator library as well as its tests. - Invoked via 'make tbbmalloc' from top-level Makefile. -
Makefile.test -
Main Makefile to build and run the tests for the TBB library. - Invoked via 'make test' from top-level Makefile. -
common.inc -
Main common included Makefile that includes OS-specific and compiler-specific Makefiles. -
<os>.inc -
OS-specific Makefile for a particular <os>. -
<os>.<compiler>.inc -
Compiler-specific Makefile for a particular <os> / <compiler> combination. -
*.sh -
Infrastructure utilities for Linux*, Mac OS* X, and UNIX*-related systems. -
*.js, *.bat -
Infrastructure utilities for Windows* systems. -
- -

To Build

-

-To port TBB to a new platform, operating system or architecture, see the porting directions below. -

- -

Software prerequisites:

-
    -
  1. C++ compiler for the platform, operating system and architecture of interest. - Either the native compiler for your system, or, optionally, the appropriate Intel® C++ compiler, may be used. -
  2. GNU make utility. On Windows*, if a UNIX* emulator is used to run GNU make, - it should be able to run Windows* utilities and commands. On Linux*, Mac OS* X, etc., - shell commands issued by GNU make should execute in a Bourne or BASH compatible shell. -
- -

-TBB libraries can be built by performing the following steps. -On systems that support only one ABI (e.g., 32-bit), these steps build the libraries for that ABI. -On systems that support both 64-bit and 32-bit libraries, these steps build the 64-bit libraries -(Linux*, Mac OS* X, and related systems) or whichever ABI is selected in the development environment (Windows* systems). -

-
    -
  1. Change to the top-level directory of the installed software. -
  2. If using the Intel® C++ compiler, make sure the appropriate compiler is available in your PATH - (e.g., by sourcing the appropriate iccvars script for the compiler to be used). -
  3. Invoke GNU make using no arguments, for example, 'gmake'. -
- -

-To build TBB libraries for other than the default ABI (e.g., to build 32-bit libraries on Linux*, Mac OS* X, -or related systems that support both 64-bit and 32-bit libraries), perform the following steps. -

-
    -
  1. Change to the top-level directory of the installed software. -
  2. If using the Intel® C++ compiler, make sure the appropriate compiler is available in your PATH - (e.g., by sourcing the appropriate iccvars script for the compiler to be used). -
  3. Invoke GNU make as follows, 'gmake arch=ia32'. -
- -

The default make target will build the release and debug versions of the TBB library.

-

Other targets are available in the top-level Makefile. You might find the following targets useful: -

    -
  • 'make test' will build and run TBB unit-tests; -
  • 'make examples' will build and run TBB examples; -
  • 'make all' will do all of the above. -
-See also the list of other targets below. -

- -

-By default, the libraries will be built in sub-directories within the build/ directory. -The sub-directories are named according to the operating system, architecture, compiler and software environment used -(the sub-directory names also distinguish release vs. debug libraries). On Linux*, the software environment comprises -the GCC, libc and kernel version used. On Mac OS* X, the software environment comprises the GCC and OS version used. -On Windows, the software environment comprises the Microsoft* Visual Studio* version used. -See below for how to change the default build directory. -

- -

-To perform different build and/or test operations, use the following steps. -

-
    -
  1. Change to the top-level directory of the installed software. -
  2. If using the Intel® C++ compiler, make sure the appropriate compiler is available in your PATH - (e.g., by sourcing the appropriate iccvars script for the compiler to be used). -
  3. Invoke GNU make by using one or more of the following commands. -
    -
    make -
    Default build. Equivalent to 'make tbb tbbmalloc'. -
    make all -
    Equivalent to 'make tbb tbbmalloc test examples'. -
    cd src;make release -
    Build and test release libraries only. -
    cd src;make debug -
    Build and test debug libraries only. -
    make tbb -
    Make TBB release and debug libraries. -
    make tbbmalloc -
    Make TBB scalable memory allocator libraries. -
    make test -
    Compile and run unit-tests -
    make examples -
    Build libraries and run all examples, like doing 'make debug clean release' from - the general example Makefile. -
    make compiler={icl, icc} [(above options or targets)] -
    Build and run as above, but use Intel® compilers instead of default, native compilers - (e.g., icl instead of cl.exe on Windows* systems, or icc instead of g++ on Linux* or Mac OS* X systems). -
    make arch={ia32, intel64, ia64} [(above options or targets)] -
    Build and run as above, but build libraries for the selected ABI. - Might be useful for cross-compilation; ensure proper environment is set before running this command. -
    make tbb_root={(TBB directory)} [(above options or targets)] -
    Build and run as above; for use when invoking 'make' from a directory other than - the top-level directory. -
    make tbb_build_dir={(build directory)} [(above options or targets)] -
    Build and run as above, but place the built libraries in the specified directory, rather than in the default - sub-directory within the build/ directory. This command might have troubles with the build in case the sources - installed to the directory with spaces in the path. -
    make tbb_build_prefix={(build sub-directory)} [(above options or targets)] -
    Build and run as above, but place the built libraries in the specified sub-directory within the build/ directory, - rather than using the default sub-directory name. -
    make [(above options)] clean -
    Remove any executables or intermediate files produced by the above commands. - Includes build directories, object files, libraries and test executables. -
    -
- -

To Port

-

-This section provides information on how to port TBB to a new platform, operating system or architecture. -A subset or a superset of these steps may be required for porting to a given platform. -

- -

To port the TBB source code:

-
    -
  1. If porting to a new architecture, create a file that describes the architecture-specific details for that architecture. -
      -
    • Create a <os>_<architecture>.h file in the include/tbb/machine directory - that describes these details. -
        -
      • The <os>_<architecture>.h is named after the operating system and architecture as recognized by - include/tbb/tbb_machine.h and the Makefile infrastructure. -
      • This file defines the implementations of synchronization operations, and also the - scheduler yield function, for the operating system and architecture. -
      • Several examples of <os>_<architecture>.h files can be found in the - include/tbb/machine directory. -
          -
        • A minimal implementation defines the 4-byte and 8-byte compare-and-swap operations, - and the scheduler yield function. See include/tbb/machine/mac_ppc.h - for an example of a minimal implementation. -
        • More complex implementation examples can also be found in the - include/tbb/machine directory - that implement all the individual variants of synchronization operations that TBB uses. - Such implementations are more verbose but may achieve better performance on a given architecture. -
        • In a given implementation, any synchronization operation that is not defined is implemented, by default, - in terms of 4-byte or 8-byte compare-and-swap. More operations can thus be added incrementally to increase - the performance of an implementation. -
        • In most cases, synchronization operations are implemented as inline assembly code; examples also exist, - (e.g., for Intel® Itanium® processors) that use out-of-line assembly code in *.s or *.asm files - (see the assembly code sub-directories in the src/tbb directory). -
        -
      -
    • Modify include/tbb/tbb_machine.h, if needed, to invoke the appropriate - <os>_<architecture>.h file in the include/tbb/machine directory. -
    -
  2. Add an implementation of DetectNumberOfWorkers() in src/tbb/tbb_misc.h, - if needed, that returns the number of cores found on the system. This is used to determine the default - number of threads for the TBB task scheduler. -
  3. Either properly define FillDynamicLinks for use in - src/tbb/cache_aligned_allocator.cpp, - or hardcode the allocator to be used. -
  4. Additional types might be required in the union defined in - include/tbb/aligned_space.h - to ensure proper alignment on your platform. -
  5. Changes may be required in include/tbb/tick_count.h - for systems that do not provide gettimeofday. -
- -

To port the Makefile infrastructure:

-Modify the appropriate files in the Makefile infrastructure to add a new platform, operating system or architecture as needed. -See the Makefile infrastructure files for examples. -
    -
  1. The top-level Makefile includes common.inc to determine the operating system. -
      -
    • To add a new operating system, add the appropriate test to common.inc, - and create the needed <os>.inc and <os>.<compiler>.inc files (see below). -
    -
  2. The <os>.inc file makes OS-specific settings for a particular <os>. -
      -
    • For example, linux.inc makes settings specific to Linux* systems. -
    • This file performs OS-dependent tests to determine the specific platform and/or architecture, - and sets other platform-dependent values. -
    • Add a new <os>.inc file for each new operating system added. -
    -
  3. The <os>.<compiler>.inc file makes compiler-specific settings for a particular - <os> / <compiler> combination. -
      -
    • For example, linux.gcc.inc makes specific settings for using GCC on Linux* systems, - and linux.icc.inc makes specific settings for using the Intel® C++ compiler on Linux* systems. -
    • This file sets particular compiler, assembler and linker options required when using a particular - <os> / <compiler> combination. -
    • Add a new <os>.<compiler>.inc file for each new <os> / <compiler> combination added. -
    -
- -
-Up to parent directory -

-Copyright © 2005-2010 Intel Corporation. All Rights Reserved. -

-Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are -registered trademarks or trademarks of Intel Corporation or its -subsidiaries in the United States and other countries. -

-* Other names and brands may be claimed as the property of others. - - diff --git a/deal.II/bundled/tbb30_104oss/build/linux.gcc.inc b/deal.II/bundled/tbb30_104oss/build/linux.gcc.inc deleted file mode 100644 index e3accbb599..0000000000 --- a/deal.II/bundled/tbb30_104oss/build/linux.gcc.inc +++ /dev/null @@ -1,109 +0,0 @@ -# Copyright 2005-2010 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. -# -# Threading Building Blocks is free software; you can redistribute it -# and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. -# -# Threading Building Blocks is distributed in the hope that it will be -# useful, but WITHOUT ANY WARRANTY; without even the implied warranty -# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Threading Building Blocks; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software -# library without restriction. Specifically, if other files instantiate -# templates or use macros or inline functions from this file, or you compile -# this file and link it with other files to produce an executable, this -# file does not by itself cause the resulting executable to be covered by -# the GNU General Public License. This exception does not however -# invalidate any other reasons why the executable file might be covered by -# the GNU General Public License. - -COMPILE_ONLY = -c -MMD -PREPROC_ONLY = -E -x c -INCLUDE_KEY = -I -DEFINE_KEY = -D -OUTPUT_KEY = -o # -OUTPUTOBJ_KEY = -o # -PIC_KEY = -fPIC -WARNING_AS_ERROR_KEY = -Werror -WARNING_KEY = -Wall -TEST_WARNING_KEY = -Wshadow -Wcast-qual -Woverloaded-virtual -Wnon-virtual-dtor $(if $(findstring cc4., $(runtime)),-Wextra) - -WARNING_SUPPRESS = -Wno-parentheses -Wno-non-virtual-dtor -DYLIB_KEY = -shared -EXPORT_KEY = -Wl,--version-script, -LIBDL = -ldl - -TBB_NOSTRICT = 1 - -CPLUS = g++ -CONLY = gcc -LIB_LINK_FLAGS = -shared -Wl,-soname=$(BUILDING_LIBRARY) -LIBS = -lpthread -lrt -C_FLAGS = $(CPLUS_FLAGS) - -ifeq ($(cfg), release) - CPLUS_FLAGS = -DDO_ITT_NOTIFY -O2 -DUSE_PTHREAD -endif -ifeq ($(cfg), debug) - CPLUS_FLAGS = -DTBB_USE_DEBUG -DDO_ITT_NOTIFY -g -O0 -DUSE_PTHREAD -endif - -ifneq (0,$(cpp0x)) - CXX_ONLY_FLAGS = -std=c++0x -endif - -ASM= -ASM_FLAGS= - -TBB_ASM.OBJ= - -ifeq (ia64,$(arch)) -# Position-independent code (PIC) is a must on IA-64, even for regular (not shared) executables - CPLUS_FLAGS += $(PIC_KEY) -endif - -ifeq (intel64,$(arch)) - CPLUS_FLAGS += -m64 - LIB_LINK_FLAGS += -m64 -endif - -ifeq (ia32,$(arch)) - CPLUS_FLAGS += -m32 -march=pentium4 - LIB_LINK_FLAGS += -m32 -endif - -# for some gcc versions on Solaris, -m64 may imply V9, but perhaps not everywhere (TODO: verify) -ifeq (sparc,$(arch)) - CPLUS_FLAGS += -mcpu=v9 -m64 - LIB_LINK_FLAGS += -mcpu=v9 -m64 -endif - -#------------------------------------------------------------------------------ -# Setting assembler data. -#------------------------------------------------------------------------------ -ASSEMBLY_SOURCE=$(arch)-gas -ifeq (ia64,$(arch)) - ASM=as - ASM_FLAGS += -xexplicit - TBB_ASM.OBJ = atomic_support.o lock_byte.o log2.o pause.o ia64_misc.o -endif -#------------------------------------------------------------------------------ -# End of setting assembler data. -#------------------------------------------------------------------------------ - -#------------------------------------------------------------------------------ -# Setting tbbmalloc data. -#------------------------------------------------------------------------------ - -M_CPLUS_FLAGS = $(CPLUS_FLAGS) -fno-rtti -fno-exceptions -fno-schedule-insns2 - -#------------------------------------------------------------------------------ -# End of setting tbbmalloc data. -#------------------------------------------------------------------------------ diff --git a/deal.II/bundled/tbb30_104oss/build/linux.icc.inc b/deal.II/bundled/tbb30_104oss/build/linux.icc.inc deleted file mode 100644 index a6bd81ab07..0000000000 --- a/deal.II/bundled/tbb30_104oss/build/linux.icc.inc +++ /dev/null @@ -1,103 +0,0 @@ -# Copyright 2005-2010 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. -# -# Threading Building Blocks is free software; you can redistribute it -# and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. -# -# Threading Building Blocks is distributed in the hope that it will be -# useful, but WITHOUT ANY WARRANTY; without even the implied warranty -# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Threading Building Blocks; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software -# library without restriction. Specifically, if other files instantiate -# templates or use macros or inline functions from this file, or you compile -# this file and link it with other files to produce an executable, this -# file does not by itself cause the resulting executable to be covered by -# the GNU General Public License. This exception does not however -# invalidate any other reasons why the executable file might be covered by -# the GNU General Public License. - -COMPILE_ONLY = -c -MMD -PREPROC_ONLY = -E -x c -INCLUDE_KEY = -I -DEFINE_KEY = -D -OUTPUT_KEY = -o # -OUTPUTOBJ_KEY = -o # -PIC_KEY = -fPIC -WARNING_AS_ERROR_KEY = -Werror -WARNING_KEY = -w1 -DYLIB_KEY = -shared -EXPORT_KEY = -Wl,--version-script, -LIBDL = -ldl -export COMPILER_VERSION := ICC: $(shell icc -V &1 | grep 'Version') -#TODO: autodetection of arch from COMPILER_VERSION!! - -TBB_NOSTRICT = 1 - -CPLUS = icpc -CONLY = icc - -ifeq (release,$(cfg)) -CPLUS_FLAGS = -O2 -strict-ansi -DUSE_PTHREAD -else -CPLUS_FLAGS = -O0 -g -strict-ansi -DUSE_PTHREAD -DTBB_USE_DEBUG -endif - -ifneq (,$(codecov)) - CPLUS_FLAGS += -prof-genx -else - CPLUS_FLAGS += -DDO_ITT_NOTIFY -endif - -OPENMP_FLAG = -openmp -LIB_LINK_FLAGS = -shared -i-static -Wl,-soname=$(BUILDING_LIBRARY) -LIBS = -lpthread -lrt -C_FLAGS = $(CPLUS_FLAGS) - -ASM= -ASM_FLAGS= - -TBB_ASM.OBJ= - -ifeq (ia32,$(arch)) - CPLUS_FLAGS += -falign-stack=maintain-16-byte -endif - -ifeq (ia64,$(arch)) -# Position-independent code (PIC) is a must on IA-64, even for regular (not shared) executables - CPLUS_FLAGS += $(PIC_KEY) -endif - -ifneq (00,$(lambdas)$(cpp0x)) - CPLUS_FLAGS += -std=c++0x -D_TBB_CPP0X -endif - -#------------------------------------------------------------------------------ -# Setting assembler data. -#------------------------------------------------------------------------------ -ASSEMBLY_SOURCE=$(arch)-gas -ifeq (ia64,$(arch)) - ASM=ias - TBB_ASM.OBJ = atomic_support.o lock_byte.o log2.o pause.o ia64_misc.o -endif -#------------------------------------------------------------------------------ -# End of setting assembler data. -#------------------------------------------------------------------------------ - -#------------------------------------------------------------------------------ -# Setting tbbmalloc data. -#------------------------------------------------------------------------------ - -M_CPLUS_FLAGS = $(CPLUS_FLAGS) -fno-rtti -fno-exceptions - -#------------------------------------------------------------------------------ -# End of setting tbbmalloc data. -#------------------------------------------------------------------------------ - diff --git a/deal.II/bundled/tbb30_104oss/build/linux.inc b/deal.II/bundled/tbb30_104oss/build/linux.inc deleted file mode 100644 index 99fbff7f2c..0000000000 --- a/deal.II/bundled/tbb30_104oss/build/linux.inc +++ /dev/null @@ -1,122 +0,0 @@ -# Copyright 2005-2010 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. -# -# Threading Building Blocks is free software; you can redistribute it -# and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. -# -# Threading Building Blocks is distributed in the hope that it will be -# useful, but WITHOUT ANY WARRANTY; without even the implied warranty -# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Threading Building Blocks; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software -# library without restriction. Specifically, if other files instantiate -# templates or use macros or inline functions from this file, or you compile -# this file and link it with other files to produce an executable, this -# file does not by itself cause the resulting executable to be covered by -# the GNU General Public License. This exception does not however -# invalidate any other reasons why the executable file might be covered by -# the GNU General Public License. - -ifndef arch - uname_m:=$(shell uname -m) - ifeq ($(uname_m),i686) - export arch:=ia32 - endif - ifeq ($(uname_m),ia64) - export arch:=ia64 - endif - ifeq ($(uname_m),x86_64) - export arch:=intel64 - endif - ifeq ($(uname_m),sparc64) - export arch:=sparc - endif - ifndef arch - export arch:=$(uname_m) - endif -endif - -ifndef runtime - #gcc_version:=$(shell gcc -v 2>&1 | grep 'gcc --version' | sed -e 's/^gcc version //' | sed -e 's/ .*$$//') - gcc_version_full=$(shell gcc --version | grep 'gcc'| egrep -o ' [0-9]+\.[0-9]+\.[0-9]+.*' | sed -e 's/^\ //') - gcc_version=$(shell echo "$(gcc_version_full)" | egrep -o '^[0-9]+\.[0-9]+\.[0-9]+\s*' | head -n 1 | sed -e 's/ *//g') - os_version:=$(shell uname -r) - os_kernel_version:=$(shell uname -r | sed -e 's/-.*$$//') - export os_glibc_version_full:=$(shell getconf GNU_LIBC_VERSION | grep glibc | sed -e 's/^glibc //') - os_glibc_version:=$(shell echo "$(os_glibc_version_full)" | sed -e '2,$$d' -e 's/-.*$$//') - export runtime:=cc$(gcc_version)_libc$(os_glibc_version)_kernel$(os_kernel_version) -endif - -native_compiler := gcc -export compiler ?= gcc -debugger ?= gdb - -CMD=sh -c -CWD=$(shell pwd) -RM?=rm -f -RD?=rmdir -MD?=mkdir -p -NUL= /dev/null -SLASH=/ -MAKE_VERSIONS=sh $(tbb_root)/build/version_info_linux.sh $(CPLUS) $(CPLUS_FLAGS) $(INCLUDES) >version_string.tmp -MAKE_TBBVARS=sh $(tbb_root)/build/generate_tbbvars.sh - -ifdef LD_LIBRARY_PATH - export LD_LIBRARY_PATH := .:$(LD_LIBRARY_PATH) -else - export LD_LIBRARY_PATH := . -endif - -####### Build settings ######################################################## - -OBJ = o -DLL = so -LIBEXT = so -SONAME_SUFFIX =$(shell grep TBB_COMPATIBLE_INTERFACE_VERSION $(tbb_root)/include/tbb/tbb_stddef.h | egrep -o [0-9.]+) - -ifeq ($(arch),ia64) - def_prefix = lin64ipf -endif -ifeq ($(arch),sparc) - def_prefix = lin64 -endif -ifeq (,$(def_prefix)) - ifeq (64,$(findstring 64,$(arch))) - def_prefix = lin64 - else - def_prefix = lin32 - endif -endif -TBB.DEF = $(tbb_root)/src/tbb/$(def_prefix)-tbb-export.def - -TBB.DLL = $(TBB_NO_VERSION.DLL).$(SONAME_SUFFIX) -TBB.LIB = $(TBB.DLL) -TBB_NO_VERSION.DLL=libtbb$(DEBUG_SUFFIX).$(DLL) -LINK_TBB.LIB = $(TBB_NO_VERSION.DLL) - -MALLOC_NO_VERSION.DLL = libtbbmalloc$(DEBUG_SUFFIX).$(DLL) -MALLOC.DEF = $(MALLOC_ROOT)/lin-tbbmalloc-export.def -MALLOC.DLL = $(MALLOC_NO_VERSION.DLL).$(SONAME_SUFFIX) -MALLOC.LIB = $(MALLOC_NO_VERSION.DLL) -LINK_MALLOC.LIB = $(MALLOC_NO_VERSION.DLL) - -MALLOCPROXY_NO_VERSION.DLL = libtbbmalloc_proxy$(DEBUG_SUFFIX).$(DLL) -MALLOCPROXY.DEF = $(MALLOC_ROOT)/$(def_prefix)-proxy-export.def -MALLOCPROXY.DLL = $(MALLOCPROXY_NO_VERSION.DLL).$(SONAME_SUFFIX) -MALLOCPROXY.LIB = $(MALLOCPROXY_NO_VERSION.DLL) - -RML_NO_VERSION.DLL = libirml$(DEBUG_SUFFIX).$(DLL) -RML.DEF = $(RML_SERVER_ROOT)/lin-rml-export.def -RML.DLL = $(RML_NO_VERSION.DLL).1 -RML.LIB = $(RML_NO_VERSION.DLL) - -TBB_NOSTRICT=1 - -TEST_LAUNCHER=sh $(tbb_root)/build/test_launcher.sh diff --git a/deal.II/bundled/tbb30_104oss/build/macos.gcc.inc b/deal.II/bundled/tbb30_104oss/build/macos.gcc.inc deleted file mode 100644 index 4ac4731ae1..0000000000 --- a/deal.II/bundled/tbb30_104oss/build/macos.gcc.inc +++ /dev/null @@ -1,90 +0,0 @@ -# Copyright 2005-2010 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. -# -# Threading Building Blocks is free software; you can redistribute it -# and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. -# -# Threading Building Blocks is distributed in the hope that it will be -# useful, but WITHOUT ANY WARRANTY; without even the implied warranty -# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Threading Building Blocks; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software -# library without restriction. Specifically, if other files instantiate -# templates or use macros or inline functions from this file, or you compile -# this file and link it with other files to produce an executable, this -# file does not by itself cause the resulting executable to be covered by -# the GNU General Public License. This exception does not however -# invalidate any other reasons why the executable file might be covered by -# the GNU General Public License. - -CPLUS = g++ -CONLY = gcc -COMPILE_ONLY = -c -MMD -PREPROC_ONLY = -E -x c -INCLUDE_KEY = -I -DEFINE_KEY = -D -OUTPUT_KEY = -o # -OUTPUTOBJ_KEY = -o # -PIC_KEY = -fPIC -WARNING_AS_ERROR_KEY = -Werror -WARNING_KEY = -Wall -TEST_WARNING_KEY = -Wextra -Wshadow -Wcast-qual -Woverloaded-virtual -Wnon-virtual-dtor -WARNING_SUPPRESS = -Wno-non-virtual-dtor -DYLIB_KEY = -dynamiclib -EXPORT_KEY = -Wl,-exported_symbols_list, -LIBDL = -ldl - -LIBS = -lpthread -LINK_FLAGS = -LIB_LINK_FLAGS = -dynamiclib -C_FLAGS = $(CPLUS_FLAGS) - -ifeq ($(cfg), release) - CPLUS_FLAGS = -O2 -else - CPLUS_FLAGS = -g -O0 -DTBB_USE_DEBUG -endif - -CPLUS_FLAGS += -DUSE_PTHREAD - -ifeq (intel64,$(arch)) - CPLUS_FLAGS += -m64 - LINK_FLAGS += -m64 - LIB_LINK_FLAGS += -m64 -endif - -ifeq (ia32,$(arch)) - CPLUS_FLAGS += -m32 - LINK_FLAGS += -m32 - LIB_LINK_FLAGS += -m32 -endif - -ifeq (ppc64,$(arch)) - CPLUS_FLAGS += -arch ppc64 - LINK_FLAGS += -arch ppc64 - LIB_LINK_FLAGS += -arch ppc64 -endif - -ifeq (ppc32,$(arch)) - CPLUS_FLAGS += -arch ppc - LINK_FLAGS += -arch ppc - LIB_LINK_FLAGS += -arch ppc -endif - -#------------------------------------------------------------------------------ -# Setting tbbmalloc data. -#------------------------------------------------------------------------------ - -M_CPLUS_FLAGS = $(CPLUS_FLAGS) -fno-rtti -fno-exceptions -fno-schedule-insns2 - -#------------------------------------------------------------------------------ -# End of setting tbbmalloc data. -#------------------------------------------------------------------------------ - diff --git a/deal.II/bundled/tbb30_104oss/build/macos.icc.inc b/deal.II/bundled/tbb30_104oss/build/macos.icc.inc deleted file mode 100644 index fbca6d9f50..0000000000 --- a/deal.II/bundled/tbb30_104oss/build/macos.icc.inc +++ /dev/null @@ -1,75 +0,0 @@ -# Copyright 2005-2010 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. -# -# Threading Building Blocks is free software; you can redistribute it -# and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. -# -# Threading Building Blocks is distributed in the hope that it will be -# useful, but WITHOUT ANY WARRANTY; without even the implied warranty -# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Threading Building Blocks; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software -# library without restriction. Specifically, if other files instantiate -# templates or use macros or inline functions from this file, or you compile -# this file and link it with other files to produce an executable, this -# file does not by itself cause the resulting executable to be covered by -# the GNU General Public License. This exception does not however -# invalidate any other reasons why the executable file might be covered by -# the GNU General Public License. - -CPLUS = icpc -CONLY = icc -COMPILE_ONLY = -c -MMD -PREPROC_ONLY = -E -x c -INCLUDE_KEY = -I -DEFINE_KEY = -D -OUTPUT_KEY = -o # -OUTPUTOBJ_KEY = -o # -PIC_KEY = -fPIC -WARNING_AS_ERROR_KEY = -Werror -WARNING_KEY = -w1 -DYLIB_KEY = -dynamiclib -EXPORT_KEY = -Wl,-exported_symbols_list, -LIBDL = -ldl -export COMPILER_VERSION := $(shell icc -V &1 | grep 'Version') -#TODO: autodetection of arch from COMPILER_VERSION!! - -OPENMP_FLAG = -openmp -LIBS = -lpthread -LINK_FLAGS = -LIB_LINK_FLAGS = -dynamiclib -i-static -C_FLAGS = $(CPLUS_FLAGS) - -ifeq ($(cfg), release) - CPLUS_FLAGS = -O2 -fno-omit-frame-pointer -else - CPLUS_FLAGS = -g -O0 -DTBB_USE_DEBUG -endif - -CPLUS_FLAGS += -DUSE_PTHREAD - -ifneq (,$(codecov)) - CPLUS_FLAGS += -prof-genx -endif - -ifneq (00,$(lambdas)$(cpp0x)) - CPLUS_FLAGS += -std=c++0x -D_TBB_CPP0X -endif - - -#------------------------------------------------------------------------------ -# Setting tbbmalloc data. -#------------------------------------------------------------------------------ - -M_CPLUS_FLAGS = $(CPLUS_FLAGS) -fno-rtti -fno-exceptions - -#------------------------------------------------------------------------------ -# End of setting tbbmalloc data. -#------------------------------------------------------------------------------ diff --git a/deal.II/bundled/tbb30_104oss/build/macos.inc b/deal.II/bundled/tbb30_104oss/build/macos.inc deleted file mode 100644 index 15c9e1d535..0000000000 --- a/deal.II/bundled/tbb30_104oss/build/macos.inc +++ /dev/null @@ -1,85 +0,0 @@ -# Copyright 2005-2010 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. -# -# Threading Building Blocks is free software; you can redistribute it -# and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. -# -# Threading Building Blocks is distributed in the hope that it will be -# useful, but WITHOUT ANY WARRANTY; without even the implied warranty -# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Threading Building Blocks; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software -# library without restriction. Specifically, if other files instantiate -# templates or use macros or inline functions from this file, or you compile -# this file and link it with other files to produce an executable, this -# file does not by itself cause the resulting executable to be covered by -# the GNU General Public License. This exception does not however -# invalidate any other reasons why the executable file might be covered by -# the GNU General Public License. - -####### Detections and Commands ############################################### -ifndef arch - ifeq ($(shell /usr/sbin/sysctl -n hw.machine),Power Macintosh) - ifeq ($(shell /usr/sbin/sysctl -n hw.optional.64bitops),1) - export arch:=ppc64 - else - export arch:=ppc32 - endif - else - ifeq ($(shell /usr/sbin/sysctl -n hw.optional.x86_64 2>/dev/null),1) - export arch:=intel64 - else - export arch:=ia32 - endif - endif -endif - -ifndef runtime - #gcc_version:=$(shell gcc -v 2>&1 | grep 'gcc version' | sed -e 's/^gcc version //' | sed -e 's/ .*$$//' ) - gcc_version_full=$(shell gcc --version | grep 'gcc'| egrep -o ' [0-9]+\.[0-9]+\.[0-9]+.*' | sed -e 's/^\ //') - gcc_version=$(shell echo "$(gcc_version_full)" | egrep -o '^[0-9]+\.[0-9]+\.[0-9]+\s*' | head -n 1 | sed -e 's/ *//g') - os_version:=$(shell /usr/bin/sw_vers -productVersion) - export runtime:=cc$(gcc_version)_os$(os_version) -endif - -native_compiler := gcc -export compiler ?= gcc -debugger ?= gdb - -CMD=$(SHELL) -c -CWD=$(shell pwd) -RM?=rm -f -RD?=rmdir -MD?=mkdir -p -NUL= /dev/null -SLASH=/ -MAKE_VERSIONS=sh $(tbb_root)/build/version_info_macos.sh $(CPLUS) $(CPLUS_FLAGS) $(INCLUDES) >version_string.tmp -MAKE_TBBVARS=sh $(tbb_root)/build/generate_tbbvars.sh - -####### Build settings ######################################################## - -OBJ=o -DLL=dylib -LIBEXT=dylib - -def_prefix = $(if $(findstring 64,$(arch)),mac64,mac32) - -TBB.DEF = $(tbb_root)/src/tbb/$(def_prefix)-tbb-export.def -TBB.DLL = libtbb$(DEBUG_SUFFIX).$(DLL) -TBB.LIB = $(TBB.DLL) -LINK_TBB.LIB = $(TBB.LIB) - -MALLOC.DEF = $(MALLOC_ROOT)/$(def_prefix)-tbbmalloc-export.def -MALLOC.DLL = libtbbmalloc$(DEBUG_SUFFIX).$(DLL) -MALLOC.LIB = $(MALLOC.DLL) - -TBB_NOSTRICT=1 - -TEST_LAUNCHER=sh $(tbb_root)/build/test_launcher.sh diff --git a/deal.II/bundled/tbb30_104oss/build/suncc.map.pause b/deal.II/bundled/tbb30_104oss/build/suncc.map.pause deleted file mode 100644 index a92d08eb19..0000000000 --- a/deal.II/bundled/tbb30_104oss/build/suncc.map.pause +++ /dev/null @@ -1 +0,0 @@ -hwcap_1 = OVERRIDE; \ No newline at end of file diff --git a/deal.II/bundled/tbb30_104oss/build/test_launcher.bat b/deal.II/bundled/tbb30_104oss/build/test_launcher.bat deleted file mode 100644 index a26149498c..0000000000 --- a/deal.II/bundled/tbb30_104oss/build/test_launcher.bat +++ /dev/null @@ -1,36 +0,0 @@ -@echo off -REM -REM Copyright 2005-2010 Intel Corporation. All Rights Reserved. -REM -REM This file is part of Threading Building Blocks. -REM -REM Threading Building Blocks is free software; you can redistribute it -REM and/or modify it under the terms of the GNU General Public License -REM version 2 as published by the Free Software Foundation. -REM -REM Threading Building Blocks is distributed in the hope that it will be -REM useful, but WITHOUT ANY WARRANTY; without even the implied warranty -REM of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -REM GNU General Public License for more details. -REM -REM You should have received a copy of the GNU General Public License -REM along with Threading Building Blocks; if not, write to the Free Software -REM Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -REM -REM As a special exception, you may use this file as part of a free software -REM library without restriction. Specifically, if other files instantiate -REM templates or use macros or inline functions from this file, or you compile -REM this file and link it with other files to produce an executable, this -REM file does not by itself cause the resulting executable to be covered by -REM the GNU General Public License. This exception does not however -REM invalidate any other reasons why the executable file might be covered by -REM the GNU General Public License. -REM - -REM no LD_PRELOAD under Windows -if "%1"=="-l" ( - echo skip - exit -) - -%* diff --git a/deal.II/bundled/tbb30_104oss/build/test_launcher.sh b/deal.II/bundled/tbb30_104oss/build/test_launcher.sh deleted file mode 100644 index 48a382b738..0000000000 --- a/deal.II/bundled/tbb30_104oss/build/test_launcher.sh +++ /dev/null @@ -1,42 +0,0 @@ -#!/bin/sh -# -# Copyright 2005-2010 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. -# -# Threading Building Blocks is free software; you can redistribute it -# and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. -# -# Threading Building Blocks is distributed in the hope that it will be -# useful, but WITHOUT ANY WARRANTY; without even the implied warranty -# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Threading Building Blocks; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software -# library without restriction. Specifically, if other files instantiate -# templates or use macros or inline functions from this file, or you compile -# this file and link it with other files to produce an executable, this -# file does not by itself cause the resulting executable to be covered by -# the GNU General Public License. This exception does not however -# invalidate any other reasons why the executable file might be covered by -# the GNU General Public License. - -while getopts "l:" flag # -do # - if [ `uname` != 'Linux' ] ; then # - echo 'skip' # - exit # - fi # - LD_PRELOAD=$OPTARG # - shift `expr $OPTIND - 1` # -done # -# Set stack limit -ulimit -s 10240 # -# Run the command line passed via parameters -export LD_PRELOAD # -./$* # diff --git a/deal.II/bundled/tbb30_104oss/build/version_info_aix.sh b/deal.II/bundled/tbb30_104oss/build/version_info_aix.sh deleted file mode 100644 index 11e5abf879..0000000000 --- a/deal.II/bundled/tbb30_104oss/build/version_info_aix.sh +++ /dev/null @@ -1,42 +0,0 @@ -#!/bin/sh -# -# Copyright 2005-2010 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. -# -# Threading Building Blocks is free software; you can redistribute it -# and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. -# -# Threading Building Blocks is distributed in the hope that it will be -# useful, but WITHOUT ANY WARRANTY; without even the implied warranty -# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Threading Building Blocks; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software -# library without restriction. Specifically, if other files instantiate -# templates or use macros or inline functions from this file, or you compile -# this file and link it with other files to produce an executable, this -# file does not by itself cause the resulting executable to be covered by -# the GNU General Public License. This exception does not however -# invalidate any other reasons why the executable file might be covered by -# the GNU General Public License. - -# Script used to generate version info string -echo "#define __TBB_VERSION_STRINGS \\" -echo '"TBB:' "BUILD_HOST\t\t"`hostname -s`" ("`uname -m`")"'" ENDL \' -# find OS name in *-release and issue* files by filtering blank lines and lsb-release content out -echo '"TBB:' "BUILD_OS\t\t"`lsb_release -sd 2>/dev/null | grep -ih '[a-z] ' - /etc/*release /etc/issue 2>/dev/null | head -1 | sed -e 's/["\\\\]//g'`'" ENDL \' -echo '"TBB:' "BUILD_KERNEL\t"`uname -srv`'" ENDL \' -echo '"TBB:' "BUILD_GCC\t\t"`g++ -v &1 | grep 'gcc.*version'`'" ENDL \' -[ -z "$COMPILER_VERSION" ] || echo '"TBB:' "BUILD_COMPILER\t"$COMPILER_VERSION'" ENDL \' -echo '"TBB:' "BUILD_GLIBC\t"`getconf GNU_LIBC_VERSION | grep glibc | sed -e 's/^glibc //'`'" ENDL \' -echo '"TBB:' "BUILD_LD\t\t"`ld -v 2>&1 | grep 'version'`'" ENDL \' -echo '"TBB:' "BUILD_TARGET\t$arch on $runtime"'" ENDL \' -echo '"TBB:' "BUILD_COMMAND\t"$*'" ENDL \' -echo "" -echo "#define __TBB_DATETIME \""`date -u`"\"" diff --git a/deal.II/bundled/tbb30_104oss/build/version_info_linux.sh b/deal.II/bundled/tbb30_104oss/build/version_info_linux.sh deleted file mode 100644 index 11e5abf879..0000000000 --- a/deal.II/bundled/tbb30_104oss/build/version_info_linux.sh +++ /dev/null @@ -1,42 +0,0 @@ -#!/bin/sh -# -# Copyright 2005-2010 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. -# -# Threading Building Blocks is free software; you can redistribute it -# and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. -# -# Threading Building Blocks is distributed in the hope that it will be -# useful, but WITHOUT ANY WARRANTY; without even the implied warranty -# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Threading Building Blocks; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software -# library without restriction. Specifically, if other files instantiate -# templates or use macros or inline functions from this file, or you compile -# this file and link it with other files to produce an executable, this -# file does not by itself cause the resulting executable to be covered by -# the GNU General Public License. This exception does not however -# invalidate any other reasons why the executable file might be covered by -# the GNU General Public License. - -# Script used to generate version info string -echo "#define __TBB_VERSION_STRINGS \\" -echo '"TBB:' "BUILD_HOST\t\t"`hostname -s`" ("`uname -m`")"'" ENDL \' -# find OS name in *-release and issue* files by filtering blank lines and lsb-release content out -echo '"TBB:' "BUILD_OS\t\t"`lsb_release -sd 2>/dev/null | grep -ih '[a-z] ' - /etc/*release /etc/issue 2>/dev/null | head -1 | sed -e 's/["\\\\]//g'`'" ENDL \' -echo '"TBB:' "BUILD_KERNEL\t"`uname -srv`'" ENDL \' -echo '"TBB:' "BUILD_GCC\t\t"`g++ -v &1 | grep 'gcc.*version'`'" ENDL \' -[ -z "$COMPILER_VERSION" ] || echo '"TBB:' "BUILD_COMPILER\t"$COMPILER_VERSION'" ENDL \' -echo '"TBB:' "BUILD_GLIBC\t"`getconf GNU_LIBC_VERSION | grep glibc | sed -e 's/^glibc //'`'" ENDL \' -echo '"TBB:' "BUILD_LD\t\t"`ld -v 2>&1 | grep 'version'`'" ENDL \' -echo '"TBB:' "BUILD_TARGET\t$arch on $runtime"'" ENDL \' -echo '"TBB:' "BUILD_COMMAND\t"$*'" ENDL \' -echo "" -echo "#define __TBB_DATETIME \""`date -u`"\"" diff --git a/deal.II/bundled/tbb30_104oss/build/version_info_macos.sh b/deal.II/bundled/tbb30_104oss/build/version_info_macos.sh deleted file mode 100644 index 8ba45d6370..0000000000 --- a/deal.II/bundled/tbb30_104oss/build/version_info_macos.sh +++ /dev/null @@ -1,39 +0,0 @@ -#!/bin/sh -# -# Copyright 2005-2010 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. -# -# Threading Building Blocks is free software; you can redistribute it -# and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. -# -# Threading Building Blocks is distributed in the hope that it will be -# useful, but WITHOUT ANY WARRANTY; without even the implied warranty -# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Threading Building Blocks; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software -# library without restriction. Specifically, if other files instantiate -# templates or use macros or inline functions from this file, or you compile -# this file and link it with other files to produce an executable, this -# file does not by itself cause the resulting executable to be covered by -# the GNU General Public License. This exception does not however -# invalidate any other reasons why the executable file might be covered by -# the GNU General Public License. - -# Script used to generate version info string -echo "#define __TBB_VERSION_STRINGS \\" -echo '"TBB:' "BUILD_HOST\t\t"`hostname -s`" ("`arch`")"'" ENDL \' -echo '"TBB:' "BUILD_OS\t\t"`sw_vers -productName`" version "`sw_vers -productVersion`'" ENDL \' -echo '"TBB:' "BUILD_KERNEL\t"`uname -v`'" ENDL \' -echo '"TBB:' "BUILD_GCC\t\t"`gcc -v &1 | grep 'version'`'" ENDL \' -[ -z "$COMPILER_VERSION" ] || echo '"TBB:' "BUILD_COMPILER\t"$COMPILER_VERSION'" ENDL \' -echo '"TBB:' "BUILD_TARGET\t$arch on $runtime"'" ENDL \' -echo '"TBB:' "BUILD_COMMAND\t"$*'" ENDL \' -echo "" -echo "#define __TBB_DATETIME \""`date -u`"\"" diff --git a/deal.II/bundled/tbb30_104oss/build/version_info_sunos.sh b/deal.II/bundled/tbb30_104oss/build/version_info_sunos.sh deleted file mode 100644 index 02ad49165f..0000000000 --- a/deal.II/bundled/tbb30_104oss/build/version_info_sunos.sh +++ /dev/null @@ -1,39 +0,0 @@ -#!/bin/sh -# -# Copyright 2005-2010 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. -# -# Threading Building Blocks is free software; you can redistribute it -# and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. -# -# Threading Building Blocks is distributed in the hope that it will be -# useful, but WITHOUT ANY WARRANTY; without even the implied warranty -# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Threading Building Blocks; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software -# library without restriction. Specifically, if other files instantiate -# templates or use macros or inline functions from this file, or you compile -# this file and link it with other files to produce an executable, this -# file does not by itself cause the resulting executable to be covered by -# the GNU General Public License. This exception does not however -# invalidate any other reasons why the executable file might be covered by -# the GNU General Public License. - -# Script used to generate version info string -echo "#define __TBB_VERSION_STRINGS \\" -echo '"TBB:' "BUILD_HOST\t"`hostname`" ("`arch`")"'" ENDL \' -echo '"TBB:' "BUILD_OS\t\t"`uname`'" ENDL \' -echo '"TBB:' "BUILD_KERNEL\t"`uname -srv`'" ENDL \' -echo '"TBB:' "BUILD_SUNCC\t"`CC -V &1 | grep 'C++'`'" ENDL \' -[ -z "$COMPILER_VERSION" ] || echo '"TBB:' "BUILD_COMPILER\t"$COMPILER_VERSION'" ENDL \' -echo '"TBB:' "BUILD_TARGET\t$arch on $runtime"'" ENDL \' -echo '"TBB:' "BUILD_COMMAND\t"$*'" ENDL \' -echo "" -echo "#define __TBB_DATETIME \""`date -u`"\"" diff --git a/deal.II/bundled/tbb30_104oss/build/version_info_windows.js b/deal.II/bundled/tbb30_104oss/build/version_info_windows.js deleted file mode 100644 index c8e7634d61..0000000000 --- a/deal.II/bundled/tbb30_104oss/build/version_info_windows.js +++ /dev/null @@ -1,136 +0,0 @@ -// Copyright 2005-2010 Intel Corporation. All Rights Reserved. -// -// This file is part of Threading Building Blocks. -// -// Threading Building Blocks is free software; you can redistribute it -// and/or modify it under the terms of the GNU General Public License -// version 2 as published by the Free Software Foundation. -// -// Threading Building Blocks is distributed in the hope that it will be -// useful, but WITHOUT ANY WARRANTY; without even the implied warranty -// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with Threading Building Blocks; if not, write to the Free Software -// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -// -// As a special exception, you may use this file as part of a free software -// library without restriction. Specifically, if other files instantiate -// templates or use macros or inline functions from this file, or you compile -// this file and link it with other files to produce an executable, this -// file does not by itself cause the resulting executable to be covered by -// the GNU General Public License. This exception does not however -// invalidate any other reasons why the executable file might be covered by -// the GNU General Public License. - -var WshShell = WScript.CreateObject("WScript.Shell"); - -var tmpExec; - -WScript.Echo("#define __TBB_VERSION_STRINGS \\"); - -//Getting BUILD_HOST -WScript.echo( "\"TBB: BUILD_HOST\\t\\t" + - WshShell.ExpandEnvironmentStrings("%COMPUTERNAME%") + - "\" ENDL \\" ); - -//Getting BUILD_OS -tmpExec = WshShell.Exec("cmd /c ver"); -while ( tmpExec.Status == 0 ) { - WScript.Sleep(100); -} -tmpExec.StdOut.ReadLine(); - -WScript.echo( "\"TBB: BUILD_OS\\t\\t" + - tmpExec.StdOut.ReadLine() + - "\" ENDL \\" ); - -if ( WScript.Arguments(0).toLowerCase().match("gcc") ) { - tmpExec = WshShell.Exec("gcc --version"); - WScript.echo( "\"TBB: BUILD_COMPILER\\t" + - tmpExec.StdOut.ReadLine() + - "\" ENDL \\" ); - -} else { // MS / Intel compilers - //Getting BUILD_CL - tmpExec = WshShell.Exec("cmd /c echo #define 0 0>empty.cpp"); - tmpExec = WshShell.Exec("cl -c empty.cpp "); - while ( tmpExec.Status == 0 ) { - WScript.Sleep(100); - } - var clVersion = tmpExec.StdErr.ReadLine(); - WScript.echo( "\"TBB: BUILD_CL\\t\\t" + - clVersion + - "\" ENDL \\" ); - - //Getting BUILD_COMPILER - if ( WScript.Arguments(0).toLowerCase().match("icl") ) { - tmpExec = WshShell.Exec("icl -c empty.cpp "); - while ( tmpExec.Status == 0 ) { - WScript.Sleep(100); - } - WScript.echo( "\"TBB: BUILD_COMPILER\\t" + - tmpExec.StdErr.ReadLine() + - "\" ENDL \\" ); - } else { - WScript.echo( "\"TBB: BUILD_COMPILER\\t\\t" + - clVersion + - "\" ENDL \\" ); - } - tmpExec = WshShell.Exec("cmd /c del /F /Q empty.obj empty.cpp"); -} - -//Getting BUILD_TARGET -WScript.echo( "\"TBB: BUILD_TARGET\\t" + - WScript.Arguments(1) + - "\" ENDL \\" ); - -//Getting BUILD_COMMAND -WScript.echo( "\"TBB: BUILD_COMMAND\\t" + WScript.Arguments(2) + "\" ENDL" ); - -//Getting __TBB_DATETIME and __TBB_VERSION_YMD -var date = new Date(); -WScript.echo( "#define __TBB_DATETIME \"" + date.toUTCString() + "\"" ); -WScript.echo( "#define __TBB_VERSION_YMD " + date.getUTCFullYear() + ", " + - (date.getUTCMonth() > 8 ? (date.getUTCMonth()+1):("0"+(date.getUTCMonth()+1))) + - (date.getUTCDate() > 9 ? date.getUTCDate():("0"+date.getUTCDate())) ); - - -/* - -Original strings - -#define __TBB_VERSION_STRINGS \ -"TBB: BUILD_HOST\t\tvpolin-mobl1 (ia32)" ENDL \ -"TBB: BUILD_OS\t\tMicrosoft Windows XP [Version 5.1.2600]" ENDL \ -"TBB: BUILD_CL\t\tMicrosoft (R) 32-bit C/C++ Optimizing Compiler Version 13.10.3077 for 80x86" ENDL \ -"TBB: BUILD_COMPILER\tIntel(R) C++ Compiler for 32-bit applications, Version 9.1 Build 20070109Z Package ID: W_CC_C_9.1.034 " ENDL \ -"TBB: BUILD_TARGET\t" ENDL \ -"TBB: BUILD_COMMAND\t" ENDL \ - -#define __TBB_DATETIME "Mon Jun 4 10:16:07 UTC 2007" -#define __TBB_VERSION_YMD 2007, 0604 - - - -# The script must be run from two directory levels below this level. -x='"TBB: ' -y='" ENDL \' -echo "#define __TBB_VERSION_STRINGS \\" -echo $x "BUILD_HOST\t\t"`hostname`" ("`../../arch.exe`")"$y -echo $x "BUILD_OS\t\t"`../../win_version.bat|grep -i 'Version'`$y -echo >empty.cpp -echo $x "BUILD_CL\t\t"`cl -c empty.cpp 2>&1 | grep -i Version`$y -echo $x "BUILD_COMPILER\t"`icl -c empty.cpp 2>&1 | grep -i Version`$y -echo $x "BUILD_TARGET\t"$TBB_ARCH$y -echo $x "BUILD_COMMAND\t"$*$y -echo "" -# A workaround for MKS 8.6 where `date -u` crashes. -date -u > date.tmp -echo "#define __TBB_DATETIME \""`cat date.tmp`"\"" -echo "#define __TBB_VERSION_YMD "`date '+%Y, %m%d'` -rm empty.cpp -rm empty.obj -rm date.tmp -*/ diff --git a/deal.II/bundled/tbb30_104oss/build/vsproject/index.html b/deal.II/bundled/tbb30_104oss/build/vsproject/index.html deleted file mode 100644 index a0753e49d0..0000000000 --- a/deal.II/bundled/tbb30_104oss/build/vsproject/index.html +++ /dev/null @@ -1,31 +0,0 @@ - - - -

Overview

-This directory contains the visual studio* 2005 solution to build Threading Building Blocks. - - -

Files

-
-
makefile.sln -
Solution file. -
tbb.vcproj -
Library project file. -
tbbmalloc.vcproj -
Scalable allocator library project file. Allocator sources are expected to be located in ../../src/tbbmalloc folder. -
tbbmalloc_proxy.vcproj -
Standard allocator replacement project file. -
- -
-Up to parent directory -

-Copyright © 2005-2010 Intel Corporation. All Rights Reserved. -

-Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are -registered trademarks or trademarks of Intel Corporation or its -subsidiaries in the United States and other countries. -

-* Other names and brands may be claimed as the property of others. - - diff --git a/deal.II/bundled/tbb30_104oss/build/vsproject/makefile.sln b/deal.II/bundled/tbb30_104oss/build/vsproject/makefile.sln deleted file mode 100644 index 1fd7a8d7f8..0000000000 --- a/deal.II/bundled/tbb30_104oss/build/vsproject/makefile.sln +++ /dev/null @@ -1,100 +0,0 @@ -Microsoft Visual Studio Solution File, Format Version 9.00 -# Visual Studio 2005 -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "tbb", "tbb.vcproj", "{F62787DD-1327-448B-9818-030062BCFAA5}" - ProjectSection(WebsiteProperties) = preProject - Debug.AspNetCompiler.Debug = "True" - Release.AspNetCompiler.Debug = "False" - EndProjectSection -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "tbbmalloc", "tbbmalloc.vcproj", "{B15F131E-328A-4D42-ADC2-9FF4CA6306D8}" - ProjectSection(WebsiteProperties) = preProject - Debug.AspNetCompiler.Debug = "True" - Release.AspNetCompiler.Debug = "False" - EndProjectSection - ProjectSection(ProjectDependencies) = postProject - {F62787DD-1327-448B-9818-030062BCFAA5} = {F62787DD-1327-448B-9818-030062BCFAA5} - EndProjectSection -EndProject -Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Solution Items", "Solution Items", "{8898CE0B-0BFB-45AE-AA71-83735ED2510D}" - ProjectSection(WebsiteProperties) = preProject - Debug.AspNetCompiler.Debug = "True" - Release.AspNetCompiler.Debug = "False" - EndProjectSection - ProjectSection(SolutionItems) = preProject - index.html = index.html - EndProjectSection -EndProject -Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "tbbmalloc_proxy", "tbbmalloc_proxy.vcproj", "{02F61511-D5B6-46E6-B4BB-DEAA96E6BCC7}" - ProjectSection(WebsiteProperties) = preProject - Debug.AspNetCompiler.Debug = "True" - Release.AspNetCompiler.Debug = "False" - EndProjectSection - ProjectSection(ProjectDependencies) = postProject - {B15F131E-328A-4D42-ADC2-9FF4CA6306D8} = {B15F131E-328A-4D42-ADC2-9FF4CA6306D8} - EndProjectSection -EndProject -Global - GlobalSection(SolutionConfigurationPlatforms) = preSolution - Debug|Win32 = Debug|Win32 - Debug|x64 = Debug|x64 - Debug-MT|Win32 = Debug-MT|Win32 - Debug-MT|x64 = Debug-MT|x64 - Release|Win32 = Release|Win32 - Release|x64 = Release|x64 - Release-MT|Win32 = Release-MT|Win32 - Release-MT|x64 = Release-MT|x64 - EndGlobalSection - GlobalSection(ProjectConfigurationPlatforms) = postSolution - {F62787DD-1327-448B-9818-030062BCFAA5}.Debug|Win32.ActiveCfg = Debug|Win32 - {F62787DD-1327-448B-9818-030062BCFAA5}.Debug|Win32.Build.0 = Debug|Win32 - {F62787DD-1327-448B-9818-030062BCFAA5}.Debug|x64.ActiveCfg = Debug|x64 - {F62787DD-1327-448B-9818-030062BCFAA5}.Debug|x64.Build.0 = Debug|x64 - {F62787DD-1327-448B-9818-030062BCFAA5}.Debug-MT|Win32.ActiveCfg = Debug-MT|Win32 - {F62787DD-1327-448B-9818-030062BCFAA5}.Debug-MT|Win32.Build.0 = Debug-MT|Win32 - {F62787DD-1327-448B-9818-030062BCFAA5}.Debug-MT|x64.ActiveCfg = Debug-MT|x64 - {F62787DD-1327-448B-9818-030062BCFAA5}.Debug-MT|x64.Build.0 = Debug-MT|x64 - {F62787DD-1327-448B-9818-030062BCFAA5}.Release|Win32.ActiveCfg = Release|Win32 - {F62787DD-1327-448B-9818-030062BCFAA5}.Release|Win32.Build.0 = Release|Win32 - {F62787DD-1327-448B-9818-030062BCFAA5}.Release|x64.ActiveCfg = Release|x64 - {F62787DD-1327-448B-9818-030062BCFAA5}.Release|x64.Build.0 = Release|x64 - {F62787DD-1327-448B-9818-030062BCFAA5}.Release-MT|Win32.ActiveCfg = Release-MT|Win32 - {F62787DD-1327-448B-9818-030062BCFAA5}.Release-MT|Win32.Build.0 = Release-MT|Win32 - {F62787DD-1327-448B-9818-030062BCFAA5}.Release-MT|x64.ActiveCfg = Release-MT|x64 - {F62787DD-1327-448B-9818-030062BCFAA5}.Release-MT|x64.Build.0 = Release-MT|x64 - {B15F131E-328A-4D42-ADC2-9FF4CA6306D8}.Debug|Win32.ActiveCfg = Debug|Win32 - {B15F131E-328A-4D42-ADC2-9FF4CA6306D8}.Debug|Win32.Build.0 = Debug|Win32 - {B15F131E-328A-4D42-ADC2-9FF4CA6306D8}.Debug|x64.ActiveCfg = Debug|x64 - {B15F131E-328A-4D42-ADC2-9FF4CA6306D8}.Debug|x64.Build.0 = Debug|x64 - {B15F131E-328A-4D42-ADC2-9FF4CA6306D8}.Debug-MT|Win32.ActiveCfg = Debug-MT|Win32 - {B15F131E-328A-4D42-ADC2-9FF4CA6306D8}.Debug-MT|Win32.Build.0 = Debug-MT|Win32 - {B15F131E-328A-4D42-ADC2-9FF4CA6306D8}.Debug-MT|x64.ActiveCfg = Debug-MT|x64 - {B15F131E-328A-4D42-ADC2-9FF4CA6306D8}.Debug-MT|x64.Build.0 = Debug-MT|x64 - {B15F131E-328A-4D42-ADC2-9FF4CA6306D8}.Release|Win32.ActiveCfg = Release|Win32 - {B15F131E-328A-4D42-ADC2-9FF4CA6306D8}.Release|Win32.Build.0 = Release|Win32 - {B15F131E-328A-4D42-ADC2-9FF4CA6306D8}.Release|x64.ActiveCfg = Release|x64 - {B15F131E-328A-4D42-ADC2-9FF4CA6306D8}.Release|x64.Build.0 = Release|x64 - {B15F131E-328A-4D42-ADC2-9FF4CA6306D8}.Release-MT|Win32.ActiveCfg = Release-MT|Win32 - {B15F131E-328A-4D42-ADC2-9FF4CA6306D8}.Release-MT|Win32.Build.0 = Release-MT|Win32 - {B15F131E-328A-4D42-ADC2-9FF4CA6306D8}.Release-MT|x64.ActiveCfg = Release-MT|x64 - {B15F131E-328A-4D42-ADC2-9FF4CA6306D8}.Release-MT|x64.Build.0 = Release-MT|x64 - {02F61511-D5B6-46E6-B4BB-DEAA96E6BCC7}.Debug|Win32.ActiveCfg = Debug|Win32 - {02F61511-D5B6-46E6-B4BB-DEAA96E6BCC7}.Debug|Win32.Build.0 = Debug|Win32 - {02F61511-D5B6-46E6-B4BB-DEAA96E6BCC7}.Debug|x64.ActiveCfg = Debug|x64 - {02F61511-D5B6-46E6-B4BB-DEAA96E6BCC7}.Debug|x64.Build.0 = Debug|x64 - {02F61511-D5B6-46E6-B4BB-DEAA96E6BCC7}.Debug-MT|Win32.ActiveCfg = Debug-MT|Win32 - {02F61511-D5B6-46E6-B4BB-DEAA96E6BCC7}.Debug-MT|Win32.Build.0 = Debug-MT|Win32 - {02F61511-D5B6-46E6-B4BB-DEAA96E6BCC7}.Debug-MT|x64.ActiveCfg = Debug-MT|x64 - {02F61511-D5B6-46E6-B4BB-DEAA96E6BCC7}.Debug-MT|x64.Build.0 = Debug-MT|x64 - {02F61511-D5B6-46E6-B4BB-DEAA96E6BCC7}.Release|Win32.ActiveCfg = Release|Win32 - {02F61511-D5B6-46E6-B4BB-DEAA96E6BCC7}.Release|Win32.Build.0 = Release|Win32 - {02F61511-D5B6-46E6-B4BB-DEAA96E6BCC7}.Release|x64.ActiveCfg = Release|x64 - {02F61511-D5B6-46E6-B4BB-DEAA96E6BCC7}.Release|x64.Build.0 = Release|x64 - {02F61511-D5B6-46E6-B4BB-DEAA96E6BCC7}.Release-MT|Win32.ActiveCfg = Release-MT|Win32 - {02F61511-D5B6-46E6-B4BB-DEAA96E6BCC7}.Release-MT|Win32.Build.0 = Release-MT|Win32 - {02F61511-D5B6-46E6-B4BB-DEAA96E6BCC7}.Release-MT|x64.ActiveCfg = Release-MT|x64 - {02F61511-D5B6-46E6-B4BB-DEAA96E6BCC7}.Release-MT|x64.Build.0 = Release-MT|x64 - EndGlobalSection - GlobalSection(SolutionProperties) = preSolution - HideSolutionNode = FALSE - EndGlobalSection -EndGlobal diff --git a/deal.II/bundled/tbb30_104oss/build/vsproject/tbb.vcproj b/deal.II/bundled/tbb30_104oss/build/vsproject/tbb.vcproj deleted file mode 100644 index bafa80d113..0000000000 --- a/deal.II/bundled/tbb30_104oss/build/vsproject/tbb.vcproj +++ /dev/null @@ -1,506 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/deal.II/bundled/tbb30_104oss/build/vsproject/tbbmalloc.vcproj b/deal.II/bundled/tbb30_104oss/build/vsproject/tbbmalloc.vcproj deleted file mode 100644 index f459f5f8bd..0000000000 --- a/deal.II/bundled/tbb30_104oss/build/vsproject/tbbmalloc.vcproj +++ /dev/null @@ -1,452 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/deal.II/bundled/tbb30_104oss/build/vsproject/tbbmalloc_proxy.vcproj b/deal.II/bundled/tbb30_104oss/build/vsproject/tbbmalloc_proxy.vcproj deleted file mode 100644 index 233f247b8a..0000000000 --- a/deal.II/bundled/tbb30_104oss/build/vsproject/tbbmalloc_proxy.vcproj +++ /dev/null @@ -1,206 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/deal.II/bundled/tbb30_104oss/build/vsproject/version_string.tmp b/deal.II/bundled/tbb30_104oss/build/vsproject/version_string.tmp deleted file mode 100644 index 2098d67595..0000000000 --- a/deal.II/bundled/tbb30_104oss/build/vsproject/version_string.tmp +++ /dev/null @@ -1 +0,0 @@ -#define __TBB_VERSION_STRINGS "Empty" diff --git a/deal.II/bundled/tbb30_104oss/build/windows.cl.inc b/deal.II/bundled/tbb30_104oss/build/windows.cl.inc deleted file mode 100644 index 39481aa9ee..0000000000 --- a/deal.II/bundled/tbb30_104oss/build/windows.cl.inc +++ /dev/null @@ -1,123 +0,0 @@ -# Copyright 2005-2010 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. -# -# Threading Building Blocks is free software; you can redistribute it -# and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. -# -# Threading Building Blocks is distributed in the hope that it will be -# useful, but WITHOUT ANY WARRANTY; without even the implied warranty -# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Threading Building Blocks; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software -# library without restriction. Specifically, if other files instantiate -# templates or use macros or inline functions from this file, or you compile -# this file and link it with other files to produce an executable, this -# file does not by itself cause the resulting executable to be covered by -# the GNU General Public License. This exception does not however -# invalidate any other reasons why the executable file might be covered by -# the GNU General Public License. - -#------------------------------------------------------------------------------ -# Define compiler-specific variables. -#------------------------------------------------------------------------------ - - -#------------------------------------------------------------------------------ -# Setting compiler flags. -#------------------------------------------------------------------------------ -CPLUS = cl /nologo -LINK_FLAGS = /link /nologo -LIB_LINK_FLAGS=/link /nologo /DLL /MAP /DEBUG /fixed:no /INCREMENTAL:NO -ifeq ($(runtime), vc_mt) - MS_CRT_KEY = /MT$(if $(findstring debug,$(cfg)),d) -else - MS_CRT_KEY = /MD$(if $(findstring debug,$(cfg)),d) -endif -EH_FLAGS = /EHsc /GR - -ifeq ($(cfg), release) - CPLUS_FLAGS = $(MS_CRT_KEY) /O2 /Zi $(EH_FLAGS) /Zc:forScope /Zc:wchar_t - ASM_FLAGS = -endif -ifeq ($(cfg), debug) - CPLUS_FLAGS = $(MS_CRT_KEY) /Od /Ob0 /Zi $(EH_FLAGS) /Zc:forScope /Zc:wchar_t /DTBB_USE_DEBUG - ASM_FLAGS = /DUSE_FRAME_POINTER -endif - - -COMPILE_ONLY = /c -PREPROC_ONLY = /TC /EP -INCLUDE_KEY = /I -DEFINE_KEY = /D -OUTPUT_KEY = /Fe -OUTPUTOBJ_KEY = /Fo -WARNING_AS_ERROR_KEY = /WX - -ifeq ($(runtime),vc7.1) - WARNING_KEY = /W3 -else - WARNING_KEY = /W4 -endif - -DYLIB_KEY = /DLL -EXPORT_KEY = /DEF: - -ifeq ($(runtime),vc8) - OPENMP_FLAG = /openmp - WARNING_KEY += /Wp64 - CPLUS_FLAGS += /D_USE_RTM_VERSION -endif -ifeq ($(runtime),vc9) - OPENMP_FLAG = /openmp -endif -ifeq ($(runtime),vc_mt) - OPENMP_FLAG = /openmp -endif -ifeq (intel64,$(arch)) - CPLUS_FLAGS += /GS- -endif - -CPLUS_FLAGS += /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE \ - /D_WIN32_WINNT=$(_WIN32_WINNT) -C_FLAGS = $(CPLUS_FLAGS) -#------------------------------------------------------------------------------ -# End of setting compiler flags. -#------------------------------------------------------------------------------ - - -#------------------------------------------------------------------------------ -# Setting assembler data. -#------------------------------------------------------------------------------ -ASSEMBLY_SOURCE=$(arch)-masm -ifeq (intel64,$(arch)) - ASM=ml64 /nologo - ASM_FLAGS += /DEM64T=1 /c /Zi - TBB_ASM.OBJ = atomic_support.obj -else - ASM=ml /nologo - ASM_FLAGS += /c /coff /Zi - TBB_ASM.OBJ = atomic_support.obj lock_byte.obj -endif -#------------------------------------------------------------------------------ -# End of setting assembler data. -#------------------------------------------------------------------------------ - - -#------------------------------------------------------------------------------ -# Setting tbbmalloc data. -#------------------------------------------------------------------------------ -M_CPLUS_FLAGS = $(subst $(EH_FLAGS),/EHs-,$(CPLUS_FLAGS)) -#------------------------------------------------------------------------------ -# End of setting tbbmalloc data. -#------------------------------------------------------------------------------ - -#------------------------------------------------------------------------------ -# End of define compiler-specific variables. -#------------------------------------------------------------------------------ diff --git a/deal.II/bundled/tbb30_104oss/build/windows.gcc.inc b/deal.II/bundled/tbb30_104oss/build/windows.gcc.inc deleted file mode 100644 index b0caa894d4..0000000000 --- a/deal.II/bundled/tbb30_104oss/build/windows.gcc.inc +++ /dev/null @@ -1,132 +0,0 @@ -# Copyright 2005-2010 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. -# -# Threading Building Blocks is free software; you can redistribute it -# and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. -# -# Threading Building Blocks is distributed in the hope that it will be -# useful, but WITHOUT ANY WARRANTY; without even the implied warranty -# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Threading Building Blocks; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software -# library without restriction. Specifically, if other files instantiate -# templates or use macros or inline functions from this file, or you compile -# this file and link it with other files to produce an executable, this -# file does not by itself cause the resulting executable to be covered by -# the GNU General Public License. This exception does not however -# invalidate any other reasons why the executable file might be covered by -# the GNU General Public License. - -#------------------------------------------------------------------------------ -# Overriding settings from windows.inc -#------------------------------------------------------------------------------ - -SLASH= $(strip \) -OBJ = o -LIBEXT = dll # MinGW allows linking with DLLs directly - -TBB.RES = -MALLOC.RES = -RML.RES = -TBB.MANIFEST = -MALLOC.MANIFEST = -RML.MANIFEST = - -# TODO: do better when/if mingw64 support is added -ifeq (ia32,$(arch)) - TBB.DEF = $(tbb_root)/src/tbb/lin32-tbb-export.def -else - TBB.DEF = $(tbb_root)/src/tbb/win64-gcc-tbb-export.def -endif -MALLOC.DEF = $(MALLOC_ROOT)/win-gcc-tbbmalloc-export.def -RML.DEF = $(RML_SERVER_ROOT)/lin-rml-export.def - -LINK_TBB.LIB = $(TBB.LIB) - -#------------------------------------------------------------------------------ -# End of overridden settings -#------------------------------------------------------------------------------ -# Compiler-specific variables -#------------------------------------------------------------------------------ - -CPLUS = g++ -COMPILE_ONLY = -c -MMD -PREPROC_ONLY = -E -x c -INCLUDE_KEY = -I -DEFINE_KEY = -D -OUTPUT_KEY = -o # -OUTPUTOBJ_KEY = -o # -PIC_KEY = -WARNING_AS_ERROR_KEY = -Werror -WARNING_KEY = -Wall -Wno-uninitialized -WARNING_SUPPRESS = -Wno-parentheses -DYLIB_KEY = -shared -LIBDL = -EXPORT_KEY = -Wl,--version-script, -LIBS = -lpsapi - -#------------------------------------------------------------------------------ -# End of compiler-specific variables -#------------------------------------------------------------------------------ -# Command lines -#------------------------------------------------------------------------------ - -LINK_FLAGS = -Wl,--enable-auto-import -LIB_LINK_FLAGS = $(DYLIB_KEY) - -ifeq ($(cfg), release) - CPLUS_FLAGS = -O2 -endif -ifeq ($(cfg), debug) - CPLUS_FLAGS = -g -O0 -DTBB_USE_DEBUG -endif -CPLUS_FLAGS += -DUSE_WINTHREAD - -# MinGW specific -CPLUS_FLAGS += -D__MSVCRT_VERSION__=0x0700 -msse -mthreads - -CONLY = gcc -C_FLAGS = $(CPLUS_FLAGS) - -ifeq (intel64,$(arch)) - CPLUS_FLAGS += -m64 - LIB_LINK_FLAGS += -m64 -endif - -ifeq (ia32,$(arch)) - CPLUS_FLAGS += -m32 - LIB_LINK_FLAGS += -m32 -endif - -# For examples -export UNIXMODE = 1 - -#------------------------------------------------------------------------------ -# End of command lines -#------------------------------------------------------------------------------ -# Setting assembler data -#------------------------------------------------------------------------------ - -ASM= -ASM_FLAGS= -TBB_ASM.OBJ= -ASSEMBLY_SOURCE=$(arch)-gas - -#------------------------------------------------------------------------------ -# End of setting assembler data -#------------------------------------------------------------------------------ -# Setting tbbmalloc data -#------------------------------------------------------------------------------ - -M_CPLUS_FLAGS = $(CPLUS_FLAGS) -fno-rtti -fno-exceptions - -#------------------------------------------------------------------------------ -# End of setting tbbmalloc data -#------------------------------------------------------------------------------ diff --git a/deal.II/bundled/tbb30_104oss/build/windows.icl.inc b/deal.II/bundled/tbb30_104oss/build/windows.icl.inc deleted file mode 100644 index d2a12e8006..0000000000 --- a/deal.II/bundled/tbb30_104oss/build/windows.icl.inc +++ /dev/null @@ -1,151 +0,0 @@ -# Copyright 2005-2010 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. -# -# Threading Building Blocks is free software; you can redistribute it -# and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. -# -# Threading Building Blocks is distributed in the hope that it will be -# useful, but WITHOUT ANY WARRANTY; without even the implied warranty -# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Threading Building Blocks; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software -# library without restriction. Specifically, if other files instantiate -# templates or use macros or inline functions from this file, or you compile -# this file and link it with other files to produce an executable, this -# file does not by itself cause the resulting executable to be covered by -# the GNU General Public License. This exception does not however -# invalidate any other reasons why the executable file might be covered by -# the GNU General Public License. - -#------------------------------------------------------------------------------ -# Define compiler-specific variables. -#------------------------------------------------------------------------------ - - -#------------------------------------------------------------------------------ -# Setting default configuration to release. -#------------------------------------------------------------------------------ -cfg ?= release -#------------------------------------------------------------------------------ -# End of setting default configuration to release. -#------------------------------------------------------------------------------ - - -#------------------------------------------------------------------------------ -# Setting compiler flags. -#------------------------------------------------------------------------------ -CPLUS = icl /nologo $(VCCOMPAT_FLAG) -LINK_FLAGS = /link /nologo -LIB_LINK_FLAGS= /link /nologo /DLL /MAP /DEBUG /fixed:no /INCREMENTAL:NO -ifeq ($(runtime), vc_mt) - MS_CRT_KEY = /MT$(if $(findstring debug,$(cfg)),d) -else - MS_CRT_KEY = /MD$(if $(findstring debug,$(cfg)),d) -endif -EH_FLAGS = /EHsc /GR - -ifeq ($(cfg), release) - CPLUS_FLAGS = $(MS_CRT_KEY) /O2 /Zi $(EH_FLAGS) /Zc:forScope /Zc:wchar_t - ASM_FLAGS = -endif -ifeq ($(cfg), debug) - CPLUS_FLAGS = $(MS_CRT_KEY) /Od /Ob0 /Zi $(EH_FLAGS) /Zc:forScope /Zc:wchar_t /DTBB_USE_DEBUG - ASM_FLAGS = /DUSE_FRAME_POINTER -endif - - -COMPILE_ONLY = /c /QMMD -PREPROC_ONLY = /EP /Tc -INCLUDE_KEY = /I -DEFINE_KEY = /D -OUTPUT_KEY = /Fe -OUTPUTOBJ_KEY = /Fo -WARNING_AS_ERROR_KEY = /WX -WARNING_KEY = /W3 -DYLIB_KEY = /DLL -EXPORT_KEY = /DEF: - -ifeq (intel64,$(arch)) - CPLUS_FLAGS += /GS- -endif - -ifneq (,$(codecov)) - CPLUS_FLAGS += /Qprof-genx -else - CPLUS_FLAGS += /DDO_ITT_NOTIFY -endif - -OPENMP_FLAG = /Qopenmp -CPLUS_FLAGS += /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE \ - /D_WIN32_WINNT=$(_WIN32_WINNT) - -ifeq ($(runtime),vc8) - CPLUS_FLAGS += /D_USE_RTM_VERSION -endif - - -C_FLAGS = $(CPLUS_FLAGS) - -ifneq (00,$(lambdas)$(cpp0x)) - CPLUS_FLAGS += /Qstd=c++0x /D_TBB_CPP0X -endif - -VCVERSION:=$(runtime) -VCCOMPAT_FLAG ?= $(if $(findstring vc7.1, $(VCVERSION)),/Qvc7.1) -ifeq ($(VCCOMPAT_FLAG),) - VCCOMPAT_FLAG := $(if $(findstring vc8, $(VCVERSION)),/Qvc8) -endif -ifeq ($(VCCOMPAT_FLAG),) - VCCOMPAT_FLAG := $(if $(findstring vc_mt, $(VCVERSION)),/Qvc8) -endif -ifeq ($(VCCOMPAT_FLAG),) - VCCOMPAT_FLAG := $(if $(findstring vc9, $(VCVERSION)),/Qvc9) -endif -ifeq ($(VCCOMPAT_FLAG),) - VCCOMPAT_FLAG := $(if $(findstring vc10, $(VCVERSION)),/Qvc10) -endif -ifeq ($(VCCOMPAT_FLAG),) - $(error VC version not detected correctly: $(VCVERSION) ) -endif -export VCCOMPAT_FLAG -#------------------------------------------------------------------------------ -# End of setting compiler flags. -#------------------------------------------------------------------------------ - - -#------------------------------------------------------------------------------ -# Setting assembler data. -#------------------------------------------------------------------------------ -ASSEMBLY_SOURCE=$(arch)-masm -ifeq (intel64,$(arch)) - ASM=ml64 /nologo - ASM_FLAGS += /DEM64T=1 /c /Zi - TBB_ASM.OBJ = atomic_support.obj -else - ASM=ml /nologo - ASM_FLAGS += /c /coff /Zi - TBB_ASM.OBJ = atomic_support.obj lock_byte.obj -endif -#------------------------------------------------------------------------------ -# End of setting assembler data. -#------------------------------------------------------------------------------ - - -#------------------------------------------------------------------------------ -# Setting tbbmalloc data. -#------------------------------------------------------------------------------ -M_CPLUS_FLAGS = $(subst $(EH_FLAGS),/EHs-,$(CPLUS_FLAGS)) -#------------------------------------------------------------------------------ -# End of setting tbbmalloc data. -#------------------------------------------------------------------------------ - -#------------------------------------------------------------------------------ -# End of define compiler-specific variables. -#------------------------------------------------------------------------------ diff --git a/deal.II/bundled/tbb30_104oss/build/windows.inc b/deal.II/bundled/tbb30_104oss/build/windows.inc deleted file mode 100644 index fe446bbf49..0000000000 --- a/deal.II/bundled/tbb30_104oss/build/windows.inc +++ /dev/null @@ -1,102 +0,0 @@ -# Copyright 2005-2010 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. -# -# Threading Building Blocks is free software; you can redistribute it -# and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. -# -# Threading Building Blocks is distributed in the hope that it will be -# useful, but WITHOUT ANY WARRANTY; without even the implied warranty -# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Threading Building Blocks; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software -# library without restriction. Specifically, if other files instantiate -# templates or use macros or inline functions from this file, or you compile -# this file and link it with other files to produce an executable, this -# file does not by itself cause the resulting executable to be covered by -# the GNU General Public License. This exception does not however -# invalidate any other reasons why the executable file might be covered by -# the GNU General Public License. - -export SHELL = cmd - -ifdef tbb_build_dir - test_dir:=$(tbb_build_dir) -else - test_dir:=. -endif - -# TODO give an error if archs doesn't match -ifndef arch - export arch:=$(shell cmd /C "cscript /nologo /E:jscript $(tbb_root)/build/detect.js /arch $(compiler)") -endif - -ifndef runtime - export runtime:=$(shell cmd /C "cscript /nologo /E:jscript $(tbb_root)/build/detect.js /runtime $(compiler)") -endif - -native_compiler := cl -export compiler ?= cl -debugger ?= devenv /debugexe - -CMD=cmd /C -CWD=$(shell cmd /C echo %CD%) -RM=cmd /C del /Q /F -RD=cmd /C rmdir -MD=cmd /c mkdir -SLASH=/ -NUL = nul - -OBJ = obj -DLL = dll -LIBEXT = lib - -def_prefix = $(if $(findstring ia32,$(arch)),win32,win64) - -# Target Windows version. Do not increase beyond 0x0500 without prior discussion! -# Used as the value for macro definition opiton in windows.cl.inc etc. -_WIN32_WINNT=0x0400 - -TBB.DEF = $(tbb_root)/src/tbb/$(def_prefix)-tbb-export.def -TBB.DLL = tbb$(DEBUG_SUFFIX).$(DLL) -TBB.LIB = tbb$(DEBUG_SUFFIX).$(LIBEXT) -TBB.RES = tbb_resource.res -# On Windows, we use #pragma comment to set the proper TBB lib to link with -# But for cross-configuration testing, need to link explicitly -LINK_TBB.LIB = $(if $(crosstest),$(TBB.LIB)) -TBB.MANIFEST = -ifneq ($(filter vc8 vc9,$(runtime)),) - TBB.MANIFEST = tbbmanifest.exe.manifest -endif - -MALLOC.DEF = $(MALLOC_ROOT)/$(def_prefix)-tbbmalloc-export.def -MALLOC.DLL = tbbmalloc$(DEBUG_SUFFIX).$(DLL) -MALLOC.LIB = tbbmalloc$(DEBUG_SUFFIX).$(LIBEXT) -MALLOC.RES = tbbmalloc.res -MALLOC.MANIFEST = -ifneq ($(filter vc8 vc9,$(runtime)),) -MALLOC.MANIFEST = tbbmanifest.exe.manifest -endif -LINK_MALLOC.LIB = $(MALLOC.LIB) - -MALLOCPROXY.DLL = tbbmalloc_proxy$(DEBUG_SUFFIX).$(DLL) -MALLOCPROXY.LIB = tbbmalloc_proxy$(DEBUG_SUFFIX).$(LIBEXT) - -RML.DEF = $(RML_SERVER_ROOT)/$(def_prefix)-rml-export.def -RML.DLL = irml$(DEBUG_SUFFIX).$(DLL) -RML.LIB = irml$(DEBUG_SUFFIX).$(LIBEXT) -RML.RES = irml.res -ifneq ($(filter vc8 vc9,$(runtime)),) -RML.MANIFEST = tbbmanifest.exe.manifest -endif - -MAKE_VERSIONS = cmd /C cscript /nologo /E:jscript $(subst \,/,$(tbb_root))/build/version_info_windows.js $(compiler) $(arch) $(subst \,/,"$(CPLUS) $(CPLUS_FLAGS)") > version_string.tmp -MAKE_TBBVARS = cmd /C "$(subst /,\,$(tbb_root))\build\generate_tbbvars.bat" - -TEST_LAUNCHER = $(subst /,\,$(tbb_root))\build\test_launcher.bat diff --git a/deal.II/bundled/tbb30_104oss/build/xbox360.cl.inc b/deal.II/bundled/tbb30_104oss/build/xbox360.cl.inc deleted file mode 100644 index cd04d00dd1..0000000000 --- a/deal.II/bundled/tbb30_104oss/build/xbox360.cl.inc +++ /dev/null @@ -1,119 +0,0 @@ -# Copyright 2005-2010 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. -# -# Threading Building Blocks is free software; you can redistribute it -# and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. -# -# Threading Building Blocks is distributed in the hope that it will be -# useful, but WITHOUT ANY WARRANTY; without even the implied warranty -# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Threading Building Blocks; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software -# library without restriction. Specifically, if other files instantiate -# templates or use macros or inline functions from this file, or you compile -# this file and link it with other files to produce an executable, this -# file does not by itself cause the resulting executable to be covered by -# the GNU General Public License. This exception does not however -# invalidate any other reasons why the executable file might be covered by -# the GNU General Public License. - -#------------------------------------------------------------------------------ -# Define compiler-specific variables. -#------------------------------------------------------------------------------ - - -#------------------------------------------------------------------------------ -# Setting compiler flags. -#------------------------------------------------------------------------------ -CPLUS = cl /nologo -LINK_FLAGS = /link /nologo -LIB_LINK_FLAGS=/link /nologo /DLL /MAP /DEBUG -MS_CRT_KEY = /MT$(if $(findstring debug,$(cfg)),d) -EH_FLAGS = /EHsc /GR - -ifeq ($(cfg), release) - CPLUS_FLAGS = $(MS_CRT_KEY) /O2 /Zi $(EH_FLAGS) /Zc:forScope /D_XBOX /DTBB_NO_LEGACY=1 - ASM_FLAGS = -endif -ifeq ($(cfg), debug) - CPLUS_FLAGS = $(MS_CRT_KEY) /Od /Ob0 /Zi $(EH_FLAGS) /Zc:forScope \ - /DTBB_DO_ASSERT /D_XBOX /DTBB_NO_LEGACY=1 - ASM_FLAGS = /DUSE_FRAME_POINTER -endif - - -COMPILE_ONLY = /c -PREPROC_ONLY = /TC /EP -INCLUDE_KEY = /I -DEFINE_KEY = /D -OUTPUT_KEY = /Fe -OUTPUTOBJ_KEY = /Fo -WARNING_AS_ERROR_KEY = /WX -WARNING_KEY = /W3 -DYLIB_KEY = /DLL -EXPORT_KEY = /DEF: - - OPENMP_FLAG = /openmp -ifeq ($(runtime),vc8) - OPENMP_FLAG = /openmp -endif -ifeq ($(runtime),vc9) - OPENMP_FLAG = /openmp -endif - -ifeq (em64t,$(arch)) - CPLUS_FLAGS += /GS- -endif - - - -CPLUS_FLAGS += /DDO_ITT_NOTIFY /DUSE_WINTHREAD /D_CRT_SECURE_NO_DEPRECATE \ - /D_WIN32_WINNT=$(_WIN32_WINNT) -C_FLAGS = $(CPLUS_FLAGS) /TC -#------------------------------------------------------------------------------ -# End of setting compiler flags. -#------------------------------------------------------------------------------ - - -#------------------------------------------------------------------------------ -# Setting assembler data. -#------------------------------------------------------------------------------ -ASSEMBLY_SOURCE=$(arch)-masm -ifeq (XBOX360,$(arch)) -#do nothing for XBOX360 -else - -ifeq (em64t,$(arch)) - ASM=ml64 - ASM_FLAGS += /DEM64T=1 /c /Zi - TBB_ASM.OBJ = -else - ASM=ml - ASM_FLAGS += /c /coff /Zi - TBB_ASM.OBJ = -endif - -endif -#------------------------------------------------------------------------------ -# End of setting assembler data. -#------------------------------------------------------------------------------ - - -#------------------------------------------------------------------------------ -# Setting tbbmalloc data. -#------------------------------------------------------------------------------ -M_CPLUS_FLAGS = $(subst $(EH_FLAGS),/EHs-,$(CPLUS_FLAGS)) -#------------------------------------------------------------------------------ -# End of setting tbbmalloc data. -#------------------------------------------------------------------------------ - -#------------------------------------------------------------------------------ -# End of define compiler-specific variables. -#------------------------------------------------------------------------------ diff --git a/deal.II/bundled/tbb30_104oss/build/xbox360.inc b/deal.II/bundled/tbb30_104oss/build/xbox360.inc deleted file mode 100644 index 8852934fe1..0000000000 --- a/deal.II/bundled/tbb30_104oss/build/xbox360.inc +++ /dev/null @@ -1,77 +0,0 @@ -# Copyright 2005-2010 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. -# -# Threading Building Blocks is free software; you can redistribute it -# and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. -# -# Threading Building Blocks is distributed in the hope that it will be -# useful, but WITHOUT ANY WARRANTY; without even the implied warranty -# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Threading Building Blocks; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software -# library without restriction. Specifically, if other files instantiate -# templates or use macros or inline functions from this file, or you compile -# this file and link it with other files to produce an executable, this -# file does not by itself cause the resulting executable to be covered by -# the GNU General Public License. This exception does not however -# invalidate any other reasons why the executable file might be covered by -# the GNU General Public License. - -ifdef tbb_build_dir - test_dir:=$(tbb_build_dir) -else - test_dir:=. -endif - -# TODO give an error if archs doesn't match -ifndef arch - export arch:=xbox360 -endif - -ifndef runtime - export runtime:=xdk -endif - -native_compiler := cl -export compiler ?= cl -debugger ?= devenv /debugexe - -CMD=cmd /C -CWD=$(shell cmd /C echo %CD%) -RM=cmd /C del /Q /F -RD=cmd /C rmdir -MD=cmd /c mkdir -SLASH=\\ -NUL = nul - -OBJ = obj -DLL = dll -LIBEXT = lib - -def_prefix = $(arch) - -# Target Windows version. Do not increase beyond 0x0500 without prior discussion! -# Used as the value for macro definition opiton in compiler specific inc files. -_WIN32_WINNT=0x0400 - -TBB.DEF = $(tbb_root)/src/tbb/$(def_prefix)-tbb-export.def -TBB.DLL = tbb$(DEBUG_SUFFIX).$(DLL) -TBB.LIB = tbb$(DEBUG_SUFFIX).$(LIBEXT) -TBB.RES = -#On Windows we specify appropriate tbb library using #pragma comment -LINK_TBB.LIB = - -MALLOC.DEF = $(MALLOC_ROOT)/$(def_prefix)-tbbmalloc-export.def -MALLOC.DLL = tbbmalloc$(DEBUG_SUFFIX).$(DLL) -MALLOC.LIB = tbbmalloc$(DEBUG_SUFFIX).$(LIBEXT) -MALLOC.RES = - -MAKE_VERSIONS = cmd /C cscript /nologo /E:jscript $(subst \,/,$(tbb_root))/build/version_info_windows.js $(compiler) $(arch) $(subst \,/,"$(CPLUS) $(CPLUS_FLAGS) $(INCLUDES)") > version_string.tmp -MAKE_TBBVARS = cmd /C "$(subst /,\,$(tbb_root))\build\generate_tbbvars.bat" diff --git a/deal.II/bundled/tbb30_104oss/include/index.html b/deal.II/bundled/tbb30_104oss/include/index.html deleted file mode 100644 index dddfb9dd2e..0000000000 --- a/deal.II/bundled/tbb30_104oss/include/index.html +++ /dev/null @@ -1,24 +0,0 @@ - - - -

Overview

-Include files for Threading Building Blocks. - -

Directories

-
-
tbb -
Include files for Threading Building Blocks classes and functions. -
- -
-Up to parent directory -

-Copyright © 2005-2010 Intel Corporation. All Rights Reserved. -

-Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are -registered trademarks or trademarks of Intel Corporation or its -subsidiaries in the United States and other countries. -

-* Other names and brands may be claimed as the property of others. - - diff --git a/deal.II/bundled/tbb30_104oss/include/tbb/_concurrent_queue_internal.h b/deal.II/bundled/tbb30_104oss/include/tbb/_concurrent_queue_internal.h deleted file mode 100644 index c1ebc9f629..0000000000 --- a/deal.II/bundled/tbb30_104oss/include/tbb/_concurrent_queue_internal.h +++ /dev/null @@ -1,1016 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_concurrent_queue_internal_H -#define __TBB_concurrent_queue_internal_H - -#include "tbb_stddef.h" -#include "tbb_machine.h" -#include "atomic.h" -#include "spin_mutex.h" -#include "cache_aligned_allocator.h" -#include "tbb_exception.h" -#include - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (push) - #pragma warning (disable: 4530) -#endif - -#include - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - #pragma warning (pop) -#endif - - -namespace tbb { - -#if !__TBB_TEMPLATE_FRIENDS_BROKEN - -// forward declaration -namespace strict_ppl { -template class concurrent_queue; -} - -template class concurrent_bounded_queue; - -namespace deprecated { -template class concurrent_queue; -} -#endif - -//! For internal use only. -namespace strict_ppl { - -//! @cond INTERNAL -namespace internal { - -using namespace tbb::internal; - -typedef size_t ticket; - -template class micro_queue ; -template class micro_queue_pop_finalizer ; -template class concurrent_queue_base_v3; - -//! parts of concurrent_queue_rep that do not have references to micro_queue -/** - * For internal use only. - */ -struct concurrent_queue_rep_base : no_copy { - template friend class micro_queue; - template friend class concurrent_queue_base_v3; - -protected: - //! Approximately n_queue/golden ratio - static const size_t phi = 3; - -public: - // must be power of 2 - static const size_t n_queue = 8; - - //! Prefix on a page - struct page { - page* next; - uintptr_t mask; - }; - - atomic head_counter; - char pad1[NFS_MaxLineSize-sizeof(atomic)]; - atomic tail_counter; - char pad2[NFS_MaxLineSize-sizeof(atomic)]; - - //! Always a power of 2 - size_t items_per_page; - - //! Size of an item - size_t item_size; - - //! number of invalid entries in the queue - atomic n_invalid_entries; - - char pad3[NFS_MaxLineSize-sizeof(size_t)-sizeof(size_t)-sizeof(atomic)]; -} ; - -inline bool is_valid_page(const concurrent_queue_rep_base::page* p) { - return uintptr_t(p)>1; -} - -//! Abstract class to define interface for page allocation/deallocation -/** - * For internal use only. - */ -class concurrent_queue_page_allocator -{ - template friend class micro_queue ; - template friend class micro_queue_pop_finalizer ; -protected: - virtual ~concurrent_queue_page_allocator() {} -private: - virtual concurrent_queue_rep_base::page* allocate_page() = 0; - virtual void deallocate_page( concurrent_queue_rep_base::page* p ) = 0; -} ; - -#if _MSC_VER && !defined(__INTEL_COMPILER) -// unary minus operator applied to unsigned type, result still unsigned -#pragma warning( push ) -#pragma warning( disable: 4146 ) -#endif - -//! A queue using simple locking. -/** For efficient, this class has no constructor. - The caller is expected to zero-initialize it. */ -template -class micro_queue : no_copy { - typedef concurrent_queue_rep_base::page page; - - //! Class used to ensure exception-safety of method "pop" - class destroyer: no_copy { - T& my_value; - public: - destroyer( T& value ) : my_value(value) {} - ~destroyer() {my_value.~T();} - }; - - void copy_item( page& dst, size_t index, const void* src ) { - new( &get_ref(dst,index) ) T(*static_cast(src)); - } - - void copy_item( page& dst, size_t dindex, const page& src, size_t sindex ) { - new( &get_ref(dst,dindex) ) T( get_ref(const_cast(src),sindex) ); - } - - void assign_and_destroy_item( void* dst, page& src, size_t index ) { - T& from = get_ref(src,index); - destroyer d(from); - *static_cast(dst) = from; - } - - void spin_wait_until_my_turn( atomic& counter, ticket k, concurrent_queue_rep_base& rb ) const ; - -public: - friend class micro_queue_pop_finalizer; - - struct padded_page: page { - //! Not defined anywhere - exists to quiet warnings. - padded_page(); - //! Not defined anywhere - exists to quiet warnings. - void operator=( const padded_page& ); - //! Must be last field. - T last; - }; - - static T& get_ref( page& p, size_t index ) { - return (&static_cast(static_cast(&p))->last)[index]; - } - - atomic head_page; - atomic head_counter; - - atomic tail_page; - atomic tail_counter; - - spin_mutex page_mutex; - - void push( const void* item, ticket k, concurrent_queue_base_v3& base ) ; - - bool pop( void* dst, ticket k, concurrent_queue_base_v3& base ) ; - - micro_queue& assign( const micro_queue& src, concurrent_queue_base_v3& base ) ; - - page* make_copy( concurrent_queue_base_v3& base, const page* src_page, size_t begin_in_page, size_t end_in_page, ticket& g_index ) ; - - void invalidate_page_and_rethrow( ticket k ) ; -}; - -template -void micro_queue::spin_wait_until_my_turn( atomic& counter, ticket k, concurrent_queue_rep_base& rb ) const { - atomic_backoff backoff; - do { - backoff.pause(); - if( counter&1 ) { - ++rb.n_invalid_entries; - throw_exception( eid_bad_last_alloc ); - } - } while( counter!=k ) ; -} - -template -void micro_queue::push( const void* item, ticket k, concurrent_queue_base_v3& base ) { - k &= -concurrent_queue_rep_base::n_queue; - page* p = NULL; - size_t index = k/concurrent_queue_rep_base::n_queue & (base.my_rep->items_per_page-1); - if( !index ) { - __TBB_TRY { - concurrent_queue_page_allocator& pa = base; - p = pa.allocate_page(); - } __TBB_CATCH (...) { - ++base.my_rep->n_invalid_entries; - invalidate_page_and_rethrow( k ); - } - p->mask = 0; - p->next = NULL; - } - - if( tail_counter!=k ) spin_wait_until_my_turn( tail_counter, k, *base.my_rep ); - - if( p ) { - spin_mutex::scoped_lock lock( page_mutex ); - page* q = tail_page; - if( is_valid_page(q) ) - q->next = p; - else - head_page = p; - tail_page = p; - } else { - p = tail_page; - } - - __TBB_TRY { - copy_item( *p, index, item ); - // If no exception was thrown, mark item as present. - p->mask |= uintptr_t(1)<n_invalid_entries; - tail_counter += concurrent_queue_rep_base::n_queue; - __TBB_RETHROW(); - } -} - -template -bool micro_queue::pop( void* dst, ticket k, concurrent_queue_base_v3& base ) { - k &= -concurrent_queue_rep_base::n_queue; - if( head_counter!=k ) spin_wait_until_eq( head_counter, k ); - if( tail_counter==k ) spin_wait_while_eq( tail_counter, k ); - page& p = *head_page; - __TBB_ASSERT( &p, NULL ); - size_t index = k/concurrent_queue_rep_base::n_queue & (base.my_rep->items_per_page-1); - bool success = false; - { - micro_queue_pop_finalizer finalizer( *this, base, k+concurrent_queue_rep_base::n_queue, index==base.my_rep->items_per_page-1 ? &p : NULL ); - if( p.mask & uintptr_t(1)<n_invalid_entries; - } - } - return success; -} - -template -micro_queue& micro_queue::assign( const micro_queue& src, concurrent_queue_base_v3& base ) { - head_counter = src.head_counter; - tail_counter = src.tail_counter; - page_mutex = src.page_mutex; - - const page* srcp = src.head_page; - if( is_valid_page(srcp) ) { - ticket g_index = head_counter; - __TBB_TRY { - size_t n_items = (tail_counter-head_counter)/concurrent_queue_rep_base::n_queue; - size_t index = head_counter/concurrent_queue_rep_base::n_queue & (base.my_rep->items_per_page-1); - size_t end_in_first_page = (index+n_itemsitems_per_page)?(index+n_items):base.my_rep->items_per_page; - - head_page = make_copy( base, srcp, index, end_in_first_page, g_index ); - page* cur_page = head_page; - - if( srcp != src.tail_page ) { - for( srcp = srcp->next; srcp!=src.tail_page; srcp=srcp->next ) { - cur_page->next = make_copy( base, srcp, 0, base.my_rep->items_per_page, g_index ); - cur_page = cur_page->next; - } - - __TBB_ASSERT( srcp==src.tail_page, NULL ); - size_t last_index = tail_counter/concurrent_queue_rep_base::n_queue & (base.my_rep->items_per_page-1); - if( last_index==0 ) last_index = base.my_rep->items_per_page; - - cur_page->next = make_copy( base, srcp, 0, last_index, g_index ); - cur_page = cur_page->next; - } - tail_page = cur_page; - } __TBB_CATCH (...) { - invalidate_page_and_rethrow( g_index ); - } - } else { - head_page = tail_page = NULL; - } - return *this; -} - -template -void micro_queue::invalidate_page_and_rethrow( ticket k ) { - // Append an invalid page at address 1 so that no more pushes are allowed. - page* invalid_page = (page*)uintptr_t(1); - { - spin_mutex::scoped_lock lock( page_mutex ); - tail_counter = k+concurrent_queue_rep_base::n_queue+1; - page* q = tail_page; - if( is_valid_page(q) ) - q->next = invalid_page; - else - head_page = invalid_page; - tail_page = invalid_page; - } - __TBB_RETHROW(); -} - -template -concurrent_queue_rep_base::page* micro_queue::make_copy( concurrent_queue_base_v3& base, const concurrent_queue_rep_base::page* src_page, size_t begin_in_page, size_t end_in_page, ticket& g_index ) { - concurrent_queue_page_allocator& pa = base; - page* new_page = pa.allocate_page(); - new_page->next = NULL; - new_page->mask = src_page->mask; - for( ; begin_in_page!=end_in_page; ++begin_in_page, ++g_index ) - if( new_page->mask & uintptr_t(1)< -class micro_queue_pop_finalizer: no_copy { - typedef concurrent_queue_rep_base::page page; - ticket my_ticket; - micro_queue& my_queue; - page* my_page; - concurrent_queue_page_allocator& allocator; -public: - micro_queue_pop_finalizer( micro_queue& queue, concurrent_queue_base_v3& b, ticket k, page* p ) : - my_ticket(k), my_queue(queue), my_page(p), allocator(b) - {} - ~micro_queue_pop_finalizer() ; -}; - -template -micro_queue_pop_finalizer::~micro_queue_pop_finalizer() { - page* p = my_page; - if( is_valid_page(p) ) { - spin_mutex::scoped_lock lock( my_queue.page_mutex ); - page* q = p->next; - my_queue.head_page = q; - if( !is_valid_page(q) ) { - my_queue.tail_page = NULL; - } - } - my_queue.head_counter = my_ticket; - if( is_valid_page(p) ) { - allocator.deallocate_page( p ); - } -} - -#if _MSC_VER && !defined(__INTEL_COMPILER) -#pragma warning( pop ) -#endif // warning 4146 is back - -template class concurrent_queue_iterator_rep ; -template class concurrent_queue_iterator_base_v3; - -//! representation of concurrent_queue_base -/** - * the class inherits from concurrent_queue_rep_base and defines an array of micro_queue's - */ -template -struct concurrent_queue_rep : public concurrent_queue_rep_base { - micro_queue array[n_queue]; - - //! Map ticket to an array index - static size_t index( ticket k ) { - return k*phi%n_queue; - } - - micro_queue& choose( ticket k ) { - // The formula here approximates LRU in a cache-oblivious way. - return array[index(k)]; - } -}; - -//! base class of concurrent_queue -/** - * The class implements the interface defined by concurrent_queue_page_allocator - * and has a pointer to an instance of concurrent_queue_rep. - */ -template -class concurrent_queue_base_v3: public concurrent_queue_page_allocator { - //! Internal representation - concurrent_queue_rep* my_rep; - - friend struct concurrent_queue_rep; - friend class micro_queue; - friend class concurrent_queue_iterator_rep; - friend class concurrent_queue_iterator_base_v3; - -protected: - typedef typename concurrent_queue_rep::page page; - -private: - typedef typename micro_queue::padded_page padded_page; - - /* override */ virtual page *allocate_page() { - concurrent_queue_rep& r = *my_rep; - size_t n = sizeof(padded_page) + (r.items_per_page-1)*sizeof(T); - return reinterpret_cast(allocate_block ( n )); - } - - /* override */ virtual void deallocate_page( concurrent_queue_rep_base::page *p ) { - concurrent_queue_rep& r = *my_rep; - size_t n = sizeof(padded_page) + (r.items_per_page-1)*sizeof(T); - deallocate_block( reinterpret_cast(p), n ); - } - - //! custom allocator - virtual void *allocate_block( size_t n ) = 0; - - //! custom de-allocator - virtual void deallocate_block( void *p, size_t n ) = 0; - -protected: - concurrent_queue_base_v3(); - - /* override */ virtual ~concurrent_queue_base_v3() { -#if __TBB_USE_ASSERT - size_t nq = my_rep->n_queue; - for( size_t i=0; iarray[i].tail_page==NULL, "pages were not freed properly" ); -#endif /* __TBB_USE_ASSERT */ - cache_aligned_allocator >().deallocate(my_rep,1); - } - - //! Enqueue item at tail of queue - void internal_push( const void* src ) { - concurrent_queue_rep& r = *my_rep; - ticket k = r.tail_counter++; - r.choose(k).push( src, k, *this ); - } - - //! Attempt to dequeue item from queue. - /** NULL if there was no item to dequeue. */ - bool internal_try_pop( void* dst ) ; - - //! Get size of queue; result may be invalid if queue is modified concurrently - size_t internal_size() const ; - - //! check if the queue is empty; thread safe - bool internal_empty() const ; - - //! free any remaining pages - /* note that the name may be misleading, but it remains so due to a historical accident. */ - void internal_finish_clear() ; - - //! Obsolete - void internal_throw_exception() const { - throw_exception( eid_bad_alloc ); - } - - //! copy internal representation - void assign( const concurrent_queue_base_v3& src ) ; -}; - -template -concurrent_queue_base_v3::concurrent_queue_base_v3() { - const size_t item_size = sizeof(T); - my_rep = cache_aligned_allocator >().allocate(1); - __TBB_ASSERT( (size_t)my_rep % NFS_GetLineSize()==0, "alignment error" ); - __TBB_ASSERT( (size_t)&my_rep->head_counter % NFS_GetLineSize()==0, "alignment error" ); - __TBB_ASSERT( (size_t)&my_rep->tail_counter % NFS_GetLineSize()==0, "alignment error" ); - __TBB_ASSERT( (size_t)&my_rep->array % NFS_GetLineSize()==0, "alignment error" ); - memset(my_rep,0,sizeof(concurrent_queue_rep)); - my_rep->item_size = item_size; - my_rep->items_per_page = item_size<=8 ? 32 : - item_size<=16 ? 16 : - item_size<=32 ? 8 : - item_size<=64 ? 4 : - item_size<=128 ? 2 : - 1; -} - -template -bool concurrent_queue_base_v3::internal_try_pop( void* dst ) { - concurrent_queue_rep& r = *my_rep; - ticket k; - do { - k = r.head_counter; - for(;;) { - if( r.tail_counter<=k ) { - // Queue is empty - return false; - } - // Queue had item with ticket k when we looked. Attempt to get that item. - ticket tk=k; -#if defined(_MSC_VER) && defined(_Wp64) - #pragma warning (push) - #pragma warning (disable: 4267) -#endif - k = r.head_counter.compare_and_swap( tk+1, tk ); -#if defined(_MSC_VER) && defined(_Wp64) - #pragma warning (pop) -#endif - if( k==tk ) - break; - // Another thread snatched the item, retry. - } - } while( !r.choose( k ).pop( dst, k, *this ) ); - return true; -} - -template -size_t concurrent_queue_base_v3::internal_size() const { - concurrent_queue_rep& r = *my_rep; - __TBB_ASSERT( sizeof(ptrdiff_t)<=sizeof(size_t), NULL ); - ticket hc = r.head_counter; - size_t nie = r.n_invalid_entries; - ticket tc = r.tail_counter; - __TBB_ASSERT( hc!=tc || !nie, NULL ); - ptrdiff_t sz = tc-hc-nie; - return sz<0 ? 0 : size_t(sz); -} - -template -bool concurrent_queue_base_v3::internal_empty() const { - concurrent_queue_rep& r = *my_rep; - ticket tc = r.tail_counter; - ticket hc = r.head_counter; - // if tc!=r.tail_counter, the queue was not empty at some point between the two reads. - return tc==r.tail_counter && tc==hc+r.n_invalid_entries ; -} - -template -void concurrent_queue_base_v3::internal_finish_clear() { - concurrent_queue_rep& r = *my_rep; - size_t nq = r.n_queue; - for( size_t i=0; i -void concurrent_queue_base_v3::assign( const concurrent_queue_base_v3& src ) { - concurrent_queue_rep& r = *my_rep; - r.items_per_page = src.my_rep->items_per_page; - - // copy concurrent_queue_rep. - r.head_counter = src.my_rep->head_counter; - r.tail_counter = src.my_rep->tail_counter; - r.n_invalid_entries = src.my_rep->n_invalid_entries; - - // copy micro_queues - for( size_t i = 0; iarray[i], *this); - - __TBB_ASSERT( r.head_counter==src.my_rep->head_counter && r.tail_counter==src.my_rep->tail_counter, - "the source concurrent queue should not be concurrently modified." ); -} - -template class concurrent_queue_iterator; - -template -class concurrent_queue_iterator_rep: no_assign { - typedef typename micro_queue::padded_page padded_page; -public: - ticket head_counter; - const concurrent_queue_base_v3& my_queue; - typename concurrent_queue_base_v3::page* array[concurrent_queue_rep::n_queue]; - concurrent_queue_iterator_rep( const concurrent_queue_base_v3& queue ) : - head_counter(queue.my_rep->head_counter), - my_queue(queue) - { - for( size_t k=0; k::n_queue; ++k ) - array[k] = queue.my_rep->array[k].head_page; - } - - //! Set item to point to kth element. Return true if at end of queue or item is marked valid; false otherwise. - bool get_item( T*& item, size_t k ) ; -}; - -template -bool concurrent_queue_iterator_rep::get_item( T*& item, size_t k ) { - if( k==my_queue.my_rep->tail_counter ) { - item = NULL; - return true; - } else { - typename concurrent_queue_base_v3::page* p = array[concurrent_queue_rep::index(k)]; - __TBB_ASSERT(p,NULL); - size_t i = k/concurrent_queue_rep::n_queue & (my_queue.my_rep->items_per_page-1); - item = µ_queue::get_ref(*p,i); - return (p->mask & uintptr_t(1)< -class concurrent_queue_iterator_base_v3 : no_assign { - //! Represents concurrent_queue over which we are iterating. - /** NULL if one past last element in queue. */ - concurrent_queue_iterator_rep* my_rep; - - template - friend bool operator==( const concurrent_queue_iterator& i, const concurrent_queue_iterator& j ); - - template - friend bool operator!=( const concurrent_queue_iterator& i, const concurrent_queue_iterator& j ); -protected: - //! Pointer to current item - Value* my_item; - - //! Default constructor - concurrent_queue_iterator_base_v3() : my_rep(NULL), my_item(NULL) { -#if __GNUC__==4&&__GNUC_MINOR__==3 - // to get around a possible gcc 4.3 bug - __TBB_release_consistency_helper(); -#endif - } - - //! Copy constructor - concurrent_queue_iterator_base_v3( const concurrent_queue_iterator_base_v3& i ) - : no_assign(), my_rep(NULL), my_item(NULL) { - assign(i); - } - - //! Construct iterator pointing to head of queue. - concurrent_queue_iterator_base_v3( const concurrent_queue_base_v3& queue ) ; - - //! Assignment - void assign( const concurrent_queue_iterator_base_v3& other ) ; - - //! Advance iterator one step towards tail of queue. - void advance() ; - - //! Destructor - ~concurrent_queue_iterator_base_v3() { - cache_aligned_allocator >().deallocate(my_rep, 1); - my_rep = NULL; - } -}; - -template -concurrent_queue_iterator_base_v3::concurrent_queue_iterator_base_v3( const concurrent_queue_base_v3& queue ) { - my_rep = cache_aligned_allocator >().allocate(1); - new( my_rep ) concurrent_queue_iterator_rep(queue); - size_t k = my_rep->head_counter; - if( !my_rep->get_item(my_item, k) ) advance(); -} - -template -void concurrent_queue_iterator_base_v3::assign( const concurrent_queue_iterator_base_v3& other ) { - if( my_rep!=other.my_rep ) { - if( my_rep ) { - cache_aligned_allocator >().deallocate(my_rep, 1); - my_rep = NULL; - } - if( other.my_rep ) { - my_rep = cache_aligned_allocator >().allocate(1); - new( my_rep ) concurrent_queue_iterator_rep( *other.my_rep ); - } - } - my_item = other.my_item; -} - -template -void concurrent_queue_iterator_base_v3::advance() { - __TBB_ASSERT( my_item, "attempt to increment iterator past end of queue" ); - size_t k = my_rep->head_counter; - const concurrent_queue_base_v3& queue = my_rep->my_queue; -#if TBB_USE_ASSERT - Value* tmp; - my_rep->get_item(tmp,k); - __TBB_ASSERT( my_item==tmp, NULL ); -#endif /* TBB_USE_ASSERT */ - size_t i = k/concurrent_queue_rep::n_queue & (queue.my_rep->items_per_page-1); - if( i==queue.my_rep->items_per_page-1 ) { - typename concurrent_queue_base_v3::page*& root = my_rep->array[concurrent_queue_rep::index(k)]; - root = root->next; - } - // advance k - my_rep->head_counter = ++k; - if( !my_rep->get_item(my_item, k) ) advance(); -} - -//! Similar to C++0x std::remove_cv -/** "tbb_" prefix added to avoid overload confusion with C++0x implementations. */ -template struct tbb_remove_cv {typedef T type;}; -template struct tbb_remove_cv {typedef T type;}; -template struct tbb_remove_cv {typedef T type;}; -template struct tbb_remove_cv {typedef T type;}; - -//! Meets requirements of a forward iterator for STL. -/** Value is either the T or const T type of the container. - @ingroup containers */ -template -class concurrent_queue_iterator: public concurrent_queue_iterator_base_v3::type>, - public std::iterator { -#if !__TBB_TEMPLATE_FRIENDS_BROKEN - template - friend class ::tbb::strict_ppl::concurrent_queue; -#else -public: // workaround for MSVC -#endif - //! Construct iterator pointing to head of queue. - concurrent_queue_iterator( const concurrent_queue_base_v3& queue ) : - concurrent_queue_iterator_base_v3::type>(queue) - { - } - -public: - concurrent_queue_iterator() {} - - concurrent_queue_iterator( const concurrent_queue_iterator& other ) : - concurrent_queue_iterator_base_v3::type>(other) - {} - - //! Iterator assignment - concurrent_queue_iterator& operator=( const concurrent_queue_iterator& other ) { - this->assign(other); - return *this; - } - - //! Reference to current item - Value& operator*() const { - return *static_cast(this->my_item); - } - - Value* operator->() const {return &operator*();} - - //! Advance to next item in queue - concurrent_queue_iterator& operator++() { - this->advance(); - return *this; - } - - //! Post increment - Value* operator++(int) { - Value* result = &operator*(); - operator++(); - return result; - } -}; // concurrent_queue_iterator - - -template -bool operator==( const concurrent_queue_iterator& i, const concurrent_queue_iterator& j ) { - return i.my_item==j.my_item; -} - -template -bool operator!=( const concurrent_queue_iterator& i, const concurrent_queue_iterator& j ) { - return i.my_item!=j.my_item; -} - -} // namespace internal - -//! @endcond - -} // namespace strict_ppl - -//! @cond INTERNAL -namespace internal { - -class concurrent_queue_rep; -class concurrent_queue_iterator_rep; -class concurrent_queue_iterator_base_v3; -template class concurrent_queue_iterator; - -//! For internal use only. -/** Type-independent portion of concurrent_queue. - @ingroup containers */ -class concurrent_queue_base_v3: no_copy { - //! Internal representation - concurrent_queue_rep* my_rep; - - friend class concurrent_queue_rep; - friend struct micro_queue; - friend class micro_queue_pop_finalizer; - friend class concurrent_queue_iterator_rep; - friend class concurrent_queue_iterator_base_v3; -protected: - //! Prefix on a page - struct page { - page* next; - uintptr_t mask; - }; - - //! Capacity of the queue - ptrdiff_t my_capacity; - - //! Always a power of 2 - size_t items_per_page; - - //! Size of an item - size_t item_size; - -#if __TBB_GCC_3_3_PROTECTED_BROKEN -public: -#endif - template - struct padded_page: page { - //! Not defined anywhere - exists to quiet warnings. - padded_page(); - //! Not defined anywhere - exists to quiet warnings. - void operator=( const padded_page& ); - //! Must be last field. - T last; - }; - -private: - virtual void copy_item( page& dst, size_t index, const void* src ) = 0; - virtual void assign_and_destroy_item( void* dst, page& src, size_t index ) = 0; -protected: - __TBB_EXPORTED_METHOD concurrent_queue_base_v3( size_t item_size ); - virtual __TBB_EXPORTED_METHOD ~concurrent_queue_base_v3(); - - //! Enqueue item at tail of queue - void __TBB_EXPORTED_METHOD internal_push( const void* src ); - - //! Dequeue item from head of queue - void __TBB_EXPORTED_METHOD internal_pop( void* dst ); - - //! Attempt to enqueue item onto queue. - bool __TBB_EXPORTED_METHOD internal_push_if_not_full( const void* src ); - - //! Attempt to dequeue item from queue. - /** NULL if there was no item to dequeue. */ - bool __TBB_EXPORTED_METHOD internal_pop_if_present( void* dst ); - - //! Get size of queue - ptrdiff_t __TBB_EXPORTED_METHOD internal_size() const; - - //! Check if the queue is emtpy - bool __TBB_EXPORTED_METHOD internal_empty() const; - - //! Set the queue capacity - void __TBB_EXPORTED_METHOD internal_set_capacity( ptrdiff_t capacity, size_t element_size ); - - //! custom allocator - virtual page *allocate_page() = 0; - - //! custom de-allocator - virtual void deallocate_page( page *p ) = 0; - - //! free any remaining pages - /* note that the name may be misleading, but it remains so due to a historical accident. */ - void __TBB_EXPORTED_METHOD internal_finish_clear() ; - - //! throw an exception - void __TBB_EXPORTED_METHOD internal_throw_exception() const; - - //! copy internal representation - void __TBB_EXPORTED_METHOD assign( const concurrent_queue_base_v3& src ) ; - -private: - virtual void copy_page_item( page& dst, size_t dindex, const page& src, size_t sindex ) = 0; -}; - -//! Type-independent portion of concurrent_queue_iterator. -/** @ingroup containers */ -class concurrent_queue_iterator_base_v3 { - //! concurrent_queue over which we are iterating. - /** NULL if one past last element in queue. */ - concurrent_queue_iterator_rep* my_rep; - - template - friend bool operator==( const concurrent_queue_iterator& i, const concurrent_queue_iterator& j ); - - template - friend bool operator!=( const concurrent_queue_iterator& i, const concurrent_queue_iterator& j ); - - void initialize( const concurrent_queue_base_v3& queue, size_t offset_of_data ); -protected: - //! Pointer to current item - void* my_item; - - //! Default constructor - concurrent_queue_iterator_base_v3() : my_rep(NULL), my_item(NULL) {} - - //! Copy constructor - concurrent_queue_iterator_base_v3( const concurrent_queue_iterator_base_v3& i ) : my_rep(NULL), my_item(NULL) { - assign(i); - } - - //! Obsolete entry point for constructing iterator pointing to head of queue. - /** Does not work correctly for SSE types. */ - __TBB_EXPORTED_METHOD concurrent_queue_iterator_base_v3( const concurrent_queue_base_v3& queue ); - - //! Construct iterator pointing to head of queue. - __TBB_EXPORTED_METHOD concurrent_queue_iterator_base_v3( const concurrent_queue_base_v3& queue, size_t offset_of_data ); - - //! Assignment - void __TBB_EXPORTED_METHOD assign( const concurrent_queue_iterator_base_v3& i ); - - //! Advance iterator one step towards tail of queue. - void __TBB_EXPORTED_METHOD advance(); - - //! Destructor - __TBB_EXPORTED_METHOD ~concurrent_queue_iterator_base_v3(); -}; - -typedef concurrent_queue_iterator_base_v3 concurrent_queue_iterator_base; - -//! Meets requirements of a forward iterator for STL. -/** Value is either the T or const T type of the container. - @ingroup containers */ -template -class concurrent_queue_iterator: public concurrent_queue_iterator_base, - public std::iterator { - -#if !defined(_MSC_VER) || defined(__INTEL_COMPILER) - template - friend class ::tbb::concurrent_bounded_queue; - - template - friend class ::tbb::deprecated::concurrent_queue; -#else -public: // workaround for MSVC -#endif - //! Construct iterator pointing to head of queue. - concurrent_queue_iterator( const concurrent_queue_base_v3& queue ) : - concurrent_queue_iterator_base_v3(queue,__TBB_offsetof(concurrent_queue_base_v3::padded_page,last)) - { - } - -public: - concurrent_queue_iterator() {} - - /** If Value==Container::value_type, then this routine is the copy constructor. - If Value==const Container::value_type, then this routine is a conversion constructor. */ - concurrent_queue_iterator( const concurrent_queue_iterator& other ) : - concurrent_queue_iterator_base_v3(other) - {} - - //! Iterator assignment - concurrent_queue_iterator& operator=( const concurrent_queue_iterator& other ) { - assign(other); - return *this; - } - - //! Reference to current item - Value& operator*() const { - return *static_cast(my_item); - } - - Value* operator->() const {return &operator*();} - - //! Advance to next item in queue - concurrent_queue_iterator& operator++() { - advance(); - return *this; - } - - //! Post increment - Value* operator++(int) { - Value* result = &operator*(); - operator++(); - return result; - } -}; // concurrent_queue_iterator - - -template -bool operator==( const concurrent_queue_iterator& i, const concurrent_queue_iterator& j ) { - return i.my_item==j.my_item; -} - -template -bool operator!=( const concurrent_queue_iterator& i, const concurrent_queue_iterator& j ) { - return i.my_item!=j.my_item; -} - -} // namespace internal; - -//! @endcond - -} // namespace tbb - -#endif /* __TBB_concurrent_queue_internal_H */ diff --git a/deal.II/bundled/tbb30_104oss/include/tbb/_concurrent_unordered_internal.h b/deal.II/bundled/tbb30_104oss/include/tbb/_concurrent_unordered_internal.h deleted file mode 100644 index 1a9a9a5c37..0000000000 --- a/deal.II/bundled/tbb30_104oss/include/tbb/_concurrent_unordered_internal.h +++ /dev/null @@ -1,1408 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -/* Container implementations in this header are based on PPL implementations - provided by Microsoft. */ - -#ifndef __TBB_concurrent_unordered_internal_H -#define __TBB_concurrent_unordered_internal_H - -#include "tbb_stddef.h" - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (push) - #pragma warning (disable: 4530) -#endif - -#include -#include // Need std::pair -#include -#include // For tbb_hasher -#include // Need std::memset - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - #pragma warning (pop) -#endif - -#include "atomic.h" -#include "tbb_exception.h" -#include "tbb_allocator.h" - -namespace tbb { -namespace interface5 { -//! @cond INTERNAL -namespace internal { - -template -class split_ordered_list; -template -class concurrent_unordered_base; - -// Forward list iterators (without skipping dummy elements) -template -class flist_iterator : public std::iterator -{ - template - friend class split_ordered_list; - template - friend class concurrent_unordered_base; - template - friend class flist_iterator; - - typedef typename Solist::nodeptr_t nodeptr_t; -public: - typedef typename Solist::value_type value_type; - typedef typename Solist::difference_type difference_type; - typedef typename Solist::pointer pointer; - typedef typename Solist::reference reference; - - flist_iterator() : my_node_ptr(0) {} - flist_iterator( const flist_iterator &other ) - : my_node_ptr(other.my_node_ptr) {} - - reference operator*() const { return my_node_ptr->my_element; } - pointer operator->() const { return &**this; } - - flist_iterator& operator++() { - my_node_ptr = my_node_ptr->my_next; - return *this; - } - - flist_iterator operator++(int) { - flist_iterator tmp = *this; - ++*this; - return tmp; - } - -protected: - flist_iterator(nodeptr_t pnode) : my_node_ptr(pnode) {} - nodeptr_t get_node_ptr() const { return my_node_ptr; } - - nodeptr_t my_node_ptr; - - template - friend bool operator==( const flist_iterator &i, const flist_iterator &j ); - template - friend bool operator!=( const flist_iterator& i, const flist_iterator& j ); -}; - -template -bool operator==( const flist_iterator &i, const flist_iterator &j ) { - return i.my_node_ptr == j.my_node_ptr; -} -template -bool operator!=( const flist_iterator& i, const flist_iterator& j ) { - return i.my_node_ptr != j.my_node_ptr; -} - -// Split-order list iterators, needed to skip dummy elements -template -class solist_iterator : public flist_iterator -{ - typedef flist_iterator base_type; - typedef typename Solist::nodeptr_t nodeptr_t; - using base_type::get_node_ptr; - template - friend class split_ordered_list; - template - friend class solist_iterator; - template - friend bool operator==( const solist_iterator &i, const solist_iterator &j ); - template - friend bool operator!=( const solist_iterator& i, const solist_iterator& j ); - - const Solist *my_list_ptr; - solist_iterator(nodeptr_t pnode, const Solist *plist) : base_type(pnode), my_list_ptr(plist) {} - -public: - typedef typename Solist::value_type value_type; - typedef typename Solist::difference_type difference_type; - typedef typename Solist::pointer pointer; - typedef typename Solist::reference reference; - - solist_iterator() {} - solist_iterator(const solist_iterator &other ) - : base_type(other), my_list_ptr(other.my_list_ptr) {} - - reference operator*() const { - return this->base_type::operator*(); - } - - pointer operator->() const { - return (&**this); - } - - solist_iterator& operator++() { - do ++(*(base_type *)this); - while (get_node_ptr() != NULL && get_node_ptr()->is_dummy()); - - return (*this); - } - - solist_iterator operator++(int) { - solist_iterator tmp = *this; - do ++*this; - while (get_node_ptr() != NULL && get_node_ptr()->is_dummy()); - - return (tmp); - } -}; - -template -bool operator==( const solist_iterator &i, const solist_iterator &j ) { - return i.my_node_ptr == j.my_node_ptr && i.my_list_ptr == j.my_list_ptr; -} -template -bool operator!=( const solist_iterator& i, const solist_iterator& j ) { - return i.my_node_ptr != j.my_node_ptr || i.my_list_ptr != j.my_list_ptr; -} - -// Forward type and class definitions -typedef size_t sokey_t; - -// Forward list in which elements are sorted in a split-order -template -class split_ordered_list -{ -public: - typedef split_ordered_list self_type; - typedef typename Allocator::template rebind::other allocator_type; - struct node; - typedef node *nodeptr_t; - - typedef typename allocator_type::size_type size_type; - typedef typename allocator_type::difference_type difference_type; - typedef typename allocator_type::pointer pointer; - typedef typename allocator_type::const_pointer const_pointer; - typedef typename allocator_type::reference reference; - typedef typename allocator_type::const_reference const_reference; - typedef typename allocator_type::value_type value_type; - - typedef solist_iterator const_iterator; - typedef solist_iterator iterator; - typedef flist_iterator raw_const_iterator; - typedef flist_iterator raw_iterator; - - // Node that holds the element in a split-ordered list - struct node : tbb::internal::no_assign - { - // Initialize the node with the given order key - void init(sokey_t order_key) { - my_order_key = order_key; - my_next = NULL; - } - - // Return the order key (needed for hashing) - sokey_t get_order_key() const { // TODO: remove - return my_order_key; - } - - // Inserts the new element in the list in an atomic fashion - nodeptr_t atomic_set_next(nodeptr_t new_node, nodeptr_t current_node) - { - // Try to change the next pointer on the current element to a new element, only if it still points to the cached next - nodeptr_t exchange_node = (nodeptr_t) __TBB_CompareAndSwapW((void *) &my_next, (uintptr_t)new_node, (uintptr_t)current_node); - - if (exchange_node == current_node) // TODO: why this branch? - { - // Operation succeeded, return the new node - return new_node; - } - else - { - // Operation failed, return the "interfering" node - return exchange_node; - } - } - - // Checks if this element in the list is a dummy, order enforcing node. Dummy nodes are used by buckets - // in the hash table to quickly index into the right subsection of the split-ordered list. - bool is_dummy() const { - return (my_order_key & 0x1) == 0; - } - - - nodeptr_t my_next; // Next element in the list - value_type my_element; // Element storage - sokey_t my_order_key; // Order key for this element - }; - - // Allocate a new node with the given order key and value - nodeptr_t create_node(sokey_t order_key, const T &value) { - nodeptr_t pnode = my_node_allocator.allocate(1); - - __TBB_TRY { - new(static_cast(&pnode->my_element)) T(value); - pnode->init(order_key); - } __TBB_CATCH(...) { - my_node_allocator.deallocate(pnode, 1); - __TBB_RETHROW(); - } - - return (pnode); - } - - // Allocate a new node with the given order key; used to allocate dummy nodes - nodeptr_t create_node(sokey_t order_key) { - nodeptr_t pnode = my_node_allocator.allocate(1); - - __TBB_TRY { - new(static_cast(&pnode->my_element)) T(); - pnode->init(order_key); - } __TBB_CATCH(...) { - my_node_allocator.deallocate(pnode, 1); - __TBB_RETHROW(); - } - - return (pnode); - } - - split_ordered_list(allocator_type a = allocator_type()) - : my_node_allocator(a), my_element_count(0) - { - // Immediately allocate a dummy node with order key of 0. This node - // will always be the head of the list. - my_head = create_node(0); - } - - ~split_ordered_list() - { - // Clear the list - clear(); - - // Remove the head element which is not cleared by clear() - nodeptr_t pnode = my_head; - my_head = NULL; - - __TBB_ASSERT(pnode != NULL && pnode->my_next == NULL, "Invalid head list node"); - - destroy_node(pnode); - } - - // Common forward list functions - - allocator_type get_allocator() const { - return (my_node_allocator); - } - - void clear() { - nodeptr_t pnext; - nodeptr_t pnode = my_head; - - __TBB_ASSERT(my_head != NULL, "Invalid head list node"); - pnext = pnode->my_next; - pnode->my_next = NULL; - pnode = pnext; - - while (pnode != NULL) - { - pnext = pnode->my_next; - destroy_node(pnode); - pnode = pnext; - } - - my_element_count = 0; - } - - // Returns a first non-dummy element in the SOL - iterator begin() { - return first_real_iterator(raw_begin()); - } - - // Returns a first non-dummy element in the SOL - const_iterator begin() const { - return first_real_iterator(raw_begin()); - } - - iterator end() { - return (iterator(0, this)); - } - - const_iterator end() const { - return (const_iterator(0, this)); - } - - const_iterator cbegin() const { - return (((const self_type *)this)->begin()); - } - - const_iterator cend() const { - return (((const self_type *)this)->end()); - } - - // Checks if the number of elements (non-dummy) is 0 - bool empty() const { - return (my_element_count == 0); - } - - // Returns the number of non-dummy elements in the list - size_type size() const { - return my_element_count; - } - - // Returns the maximum size of the list, determined by the allocator - size_type max_size() const { - return my_node_allocator.max_size(); - } - - // Swaps 'this' list with the passed in one - void swap(self_type& other) - { - if (this == &other) - { - // Nothing to do - return; - } - - std::swap(my_element_count, other.my_element_count); - std::swap(my_head, other.my_head); - } - - // Split-order list functions - - // Returns a first element in the SOL, which is always a dummy - raw_iterator raw_begin() { - return raw_iterator(my_head); - } - - // Returns a first element in the SOL, which is always a dummy - raw_const_iterator raw_begin() const { - return raw_const_iterator(my_head); - } - - raw_iterator raw_end() { - return raw_iterator(0); - } - - raw_const_iterator raw_end() const { - return raw_const_iterator(0); - } - - static sokey_t get_order_key(const raw_const_iterator& it) { - return it.get_node_ptr()->get_order_key(); - } - - static sokey_t get_safe_order_key(const raw_const_iterator& it) { - if( !it.get_node_ptr() ) return sokey_t(~0U); - return it.get_node_ptr()->get_order_key(); - } - - // Returns a public iterator version of the internal iterator. Public iterator must not - // be a dummy private iterator. - iterator get_iterator(raw_iterator it) { - __TBB_ASSERT(it.get_node_ptr() == NULL || !it.get_node_ptr()->is_dummy(), "Invalid user node (dummy)"); - return iterator(it.get_node_ptr(), this); - } - - // Returns a public iterator version of the internal iterator. Public iterator must not - // be a dummy private iterator. - const_iterator get_iterator(raw_const_iterator it) const { - __TBB_ASSERT(it.get_node_ptr() == NULL || !it.get_node_ptr()->is_dummy(), "Invalid user node (dummy)"); - return const_iterator(it.get_node_ptr(), this); - } - - // Returns a non-const version of the raw_iterator - raw_iterator get_iterator(raw_const_iterator it) { - return raw_iterator(it.get_node_ptr()); - } - - // Returns a non-const version of the iterator - static iterator get_iterator(const_iterator it) { - return iterator(it.my_node_ptr, it.my_list_ptr); - } - - // Returns a public iterator version of a first non-dummy internal iterator at or after - // the passed in internal iterator. - iterator first_real_iterator(raw_iterator it) - { - // Skip all dummy, internal only iterators - while (it != raw_end() && it.get_node_ptr()->is_dummy()) - ++it; - - return iterator(it.get_node_ptr(), this); - } - - // Returns a public iterator version of a first non-dummy internal iterator at or after - // the passed in internal iterator. - const_iterator first_real_iterator(raw_const_iterator it) const - { - // Skip all dummy, internal only iterators - while (it != raw_end() && it.get_node_ptr()->is_dummy()) - ++it; - - return const_iterator(it.get_node_ptr(), this); - } - - // Erase an element using the allocator - void destroy_node(nodeptr_t pnode) { - my_node_allocator.destroy(pnode); - my_node_allocator.deallocate(pnode, 1); - } - - // Try to insert a new element in the list. If insert fails, return the node that - // was inserted instead. - nodeptr_t try_insert(nodeptr_t previous, nodeptr_t new_node, nodeptr_t current_node) { - new_node->my_next = current_node; - return previous->atomic_set_next(new_node, current_node); - } - - // Insert a new element between passed in iterators - std::pair try_insert(raw_iterator it, raw_iterator next, const value_type &value, sokey_t order_key, size_type *new_count) - { - nodeptr_t pnode = create_node(order_key, value); - nodeptr_t inserted_node = try_insert(it.get_node_ptr(), pnode, next.get_node_ptr()); - - if (inserted_node == pnode) - { - // If the insert succeeded, check that the order is correct and increment the element count - check_range(); - *new_count = __TBB_FetchAndAddW((uintptr_t*)&my_element_count, uintptr_t(1)); - return std::pair(iterator(pnode, this), true); - } - else - { - // If the insert failed (element already there), then delete the new one - destroy_node(pnode); - return std::pair(end(), false); - } - } - - // Insert a new dummy element, starting search at a parent dummy element - raw_iterator insert_dummy(raw_iterator it, sokey_t order_key) - { - raw_iterator last = raw_end(); - raw_iterator where = it; - - __TBB_ASSERT(where != last, "Invalid head node"); - - ++where; - - // Create a dummy element up front, even though it may be discarded (due to concurrent insertion) - nodeptr_t dummy_node = create_node(order_key); - - for (;;) - { - __TBB_ASSERT(it != last, "Invalid head list node"); - - // If the head iterator is at the end of the list, or past the point where this dummy - // node needs to be inserted, then try to insert it. - if (where == last || get_order_key(where) > order_key) - { - __TBB_ASSERT(get_order_key(it) < order_key, "Invalid node order in the list"); - - // Try to insert it in the right place - nodeptr_t inserted_node = try_insert(it.get_node_ptr(), dummy_node, where.get_node_ptr()); - - if (inserted_node == dummy_node) - { - // Insertion succeeded, check the list for order violations - check_range(); - return raw_iterator(dummy_node); - } - else - { - // Insertion failed: either dummy node was inserted by another thread, or - // a real element was inserted at exactly the same place as dummy node. - // Proceed with the search from the previous location where order key was - // known to be larger (note: this is legal only because there is no safe - // concurrent erase operation supported). - where = it; - ++where; - continue; - } - } - else if (get_order_key(where) == order_key) - { - // Another dummy node with the same value found, discard the new one. - destroy_node(dummy_node); - return where; - } - - // Move the iterator forward - it = where; - ++where; - } - - } - - // This erase function can handle both real and dummy nodes - void erase_node(raw_iterator previous, raw_const_iterator& where) - { - nodeptr_t pnode = (where++).get_node_ptr(); - nodeptr_t prevnode = previous.get_node_ptr(); - __TBB_ASSERT(prevnode->my_next == pnode, "Erase must take consecutive iterators"); - prevnode->my_next = pnode->my_next; - - destroy_node(pnode); - } - - // Erase the element (previous node needs to be passed because this is a forward only list) - iterator erase_node(raw_iterator previous, const_iterator where) - { - raw_const_iterator it = where; - erase_node(previous, it); - my_element_count--; - - return get_iterator(first_real_iterator(it)); - } - - // Move all elements from the passed in split-ordered list to this one - void move_all(self_type& source) - { - raw_const_iterator first = source.raw_begin(); - raw_const_iterator last = source.raw_end(); - - if (first == last) - return; - - nodeptr_t previous_node = my_head; - raw_const_iterator begin_iterator = first++; - - // Move all elements one by one, including dummy ones - for (raw_const_iterator it = first; it != last;) - { - nodeptr_t pnode = it.get_node_ptr(); - - nodeptr_t dummy_node = pnode->is_dummy() ? create_node(pnode->get_order_key()) : create_node(pnode->get_order_key(), pnode->my_element); - previous_node = try_insert(previous_node, dummy_node, NULL); - __TBB_ASSERT(previous_node != NULL, "Insertion must succeed"); - raw_const_iterator where = it++; - source.erase_node(get_iterator(begin_iterator), where); - } - check_range(); - } - - -private: - - // Check the list for order violations - void check_range() - { -#if TBB_USE_ASSERT - for (raw_iterator it = raw_begin(); it != raw_end(); ++it) - { - raw_iterator next_iterator = it; - ++next_iterator; - - __TBB_ASSERT(next_iterator == end() || next_iterator.get_node_ptr()->get_order_key() >= it.get_node_ptr()->get_order_key(), "!!! List order inconsistency !!!"); - } -#endif - } - - typename allocator_type::template rebind::other my_node_allocator; // allocator object for nodes - size_type my_element_count; // Total item count, not counting dummy nodes - nodeptr_t my_head; // pointer to head node -}; - -// Template class for hash compare -template -class hash_compare -{ -public: - hash_compare() {} - - hash_compare(Hasher a_hasher) : my_hash_object(a_hasher) {} - - hash_compare(Hasher a_hasher, Key_equality a_keyeq) : my_hash_object(a_hasher), my_key_compare_object(a_keyeq) {} - - size_t operator()(const Key& key) const { - return ((size_t)my_hash_object(key)); - } - - bool operator()(const Key& key1, const Key& key2) const { - return (!my_key_compare_object(key1, key2)); - } - - Hasher my_hash_object; // The hash object - Key_equality my_key_compare_object; // The equality comparator object -}; - -#if _MSC_VER -#pragma warning(push) -#pragma warning(disable: 4127) // warning 4127 -- while (true) has a constant expression in it (for allow_multimapping) -#endif - -template -class concurrent_unordered_base : public Traits -{ -protected: - // Type definitions - typedef concurrent_unordered_base self_type; - typedef typename Traits::value_type value_type; - typedef typename Traits::key_type key_type; - typedef typename Traits::hash_compare hash_compare; - typedef typename Traits::value_compare value_compare; - typedef typename Traits::allocator_type allocator_type; - typedef typename allocator_type::pointer pointer; - typedef typename allocator_type::const_pointer const_pointer; - typedef typename allocator_type::reference reference; - typedef typename allocator_type::const_reference const_reference; - typedef typename allocator_type::size_type size_type; - typedef typename allocator_type::difference_type difference_type; - typedef split_ordered_list solist_t; - typedef typename solist_t::nodeptr_t nodeptr_t; - // Iterators that walk the entire split-order list, including dummy nodes - typedef typename solist_t::raw_iterator raw_iterator; - typedef typename solist_t::raw_const_iterator raw_const_iterator; - typedef typename solist_t::iterator iterator; // TODO: restore const iterator for unordered_sets - typedef typename solist_t::const_iterator const_iterator; - typedef iterator local_iterator; - typedef const_iterator const_local_iterator; - using Traits::my_hash_compare; - using Traits::get_key; - using Traits::allow_multimapping; - -private: - typedef std::pair pairii_t; - typedef std::pair paircc_t; - - static size_type const pointers_per_table = sizeof(size_type) * 8; // One bucket segment per bit - static const size_type initial_bucket_number = 8; // Initial number of buckets - static const size_type initial_bucket_load = 4; // Initial maximum number of elements per bucket - -protected: - // Constructors/Destructors - concurrent_unordered_base(size_type n_of_buckets = initial_bucket_number, - const hash_compare& hc = hash_compare(), const allocator_type& a = allocator_type()) - : Traits(hc), my_solist(a), - my_allocator(a), my_maximum_bucket_size((float) initial_bucket_load) - { - if( n_of_buckets == 0) ++n_of_buckets; - my_number_of_buckets = 1<<__TBB_Log2((uintptr_t)n_of_buckets*2-1); // round up to power of 2 - internal_init(); - } - - concurrent_unordered_base(const concurrent_unordered_base& right, const allocator_type& a) - : Traits(right.my_hash_compare), my_solist(a), my_allocator(a) - { - internal_copy(right); - } - - concurrent_unordered_base(const concurrent_unordered_base& right) - : Traits(right.my_hash_compare), my_solist(right.get_allocator()), my_allocator(right.get_allocator()) - { - internal_init(); - internal_copy(right); - } - - concurrent_unordered_base& operator=(const concurrent_unordered_base& right) { - if (this != &right) - internal_copy(right); - return (*this); - } - - ~concurrent_unordered_base() { - // Delete all node segments - internal_clear(); - } - -public: - allocator_type get_allocator() const { - return my_solist.get_allocator(); - } - - // Size and capacity function - bool empty() const { - return my_solist.empty(); - } - - size_type size() const { - return my_solist.size(); - } - - size_type max_size() const { - return my_solist.max_size(); - } - - // Iterators - iterator begin() { - return my_solist.begin(); - } - - const_iterator begin() const { - return my_solist.begin(); - } - - iterator end() { - return my_solist.end(); - } - - const_iterator end() const { - return my_solist.end(); - } - - const_iterator cbegin() const { - return my_solist.cbegin(); - } - - const_iterator cend() const { - return my_solist.cend(); - } - - // Parallel traversal support - class const_range_type : tbb::internal::no_assign { - const concurrent_unordered_base &my_table; - raw_const_iterator my_begin_node; - raw_const_iterator my_end_node; - mutable raw_const_iterator my_midpoint_node; - public: - //! Type for size of a range - typedef typename concurrent_unordered_base::size_type size_type; - typedef typename concurrent_unordered_base::value_type value_type; - typedef typename concurrent_unordered_base::reference reference; - typedef typename concurrent_unordered_base::difference_type difference_type; - typedef typename concurrent_unordered_base::const_iterator iterator; - - //! True if range is empty. - bool empty() const {return my_begin_node == my_end_node;} - - //! True if range can be partitioned into two subranges. - bool is_divisible() const { - return my_midpoint_node != my_end_node; - } - //! Split range. - const_range_type( const_range_type &r, split ) : - my_table(r.my_table), my_end_node(r.my_end_node) - { - r.my_end_node = my_begin_node = r.my_midpoint_node; - __TBB_ASSERT( !empty(), "Splitting despite the range is not divisible" ); - __TBB_ASSERT( !r.empty(), "Splitting despite the range is not divisible" ); - set_midpoint(); - r.set_midpoint(); - } - //! Init range with container and grainsize specified - const_range_type( const concurrent_unordered_base &a_table ) : - my_table(a_table), my_begin_node(a_table.my_solist.begin()), - my_end_node(a_table.my_solist.end()) - { - set_midpoint(); - } - iterator begin() const { return my_table.my_solist.get_iterator(my_begin_node); } - iterator end() const { return my_table.my_solist.get_iterator(my_end_node); } - //! The grain size for this range. - size_type grainsize() const { return 1; } - - //! Set my_midpoint_node to point approximately half way between my_begin_node and my_end_node. - void set_midpoint() const { - if( my_begin_node == my_end_node ) // not divisible - my_midpoint_node = my_end_node; - else { - sokey_t begin_key = solist_t::get_safe_order_key(my_begin_node); - sokey_t end_key = solist_t::get_safe_order_key(my_end_node); - size_t mid_bucket = __TBB_ReverseBits( begin_key + (end_key-begin_key)/2 ) % my_table.my_number_of_buckets; - while ( !my_table.is_initialized(mid_bucket) ) mid_bucket = my_table.get_parent(mid_bucket); - my_midpoint_node = my_table.my_solist.first_real_iterator(my_table.get_bucket( mid_bucket )); - if( my_midpoint_node == my_begin_node ) - my_midpoint_node = my_end_node; -#if TBB_USE_ASSERT - else { - sokey_t mid_key = solist_t::get_safe_order_key(my_midpoint_node); - __TBB_ASSERT( begin_key < mid_key, "my_begin_node is after my_midpoint_node" ); - __TBB_ASSERT( mid_key <= end_key, "my_midpoint_node is after my_end_node" ); - } -#endif // TBB_USE_ASSERT - } - } - }; - - class range_type : public const_range_type { - public: - typedef typename concurrent_unordered_base::iterator iterator; - //! Split range. - range_type( range_type &r, split ) : const_range_type( r, split() ) {} - //! Init range with container and grainsize specified - range_type( const concurrent_unordered_base &a_table ) : const_range_type(a_table) {} - - iterator begin() const { return solist_t::get_iterator( const_range_type::begin() ); } - iterator end() const { return solist_t::get_iterator( const_range_type::end() ); } - }; - - range_type range() { - return range_type( *this ); - } - - const_range_type range() const { - return const_range_type( *this ); - } - - // Modifiers - std::pair insert(const value_type& value) { - return internal_insert(value); - } - - iterator insert(const_iterator, const value_type& value) { - // Ignore hint - return insert(value).first; - } - - template - void insert(Iterator first, Iterator last) { - for (Iterator it = first; it != last; ++it) - insert(*it); - } - - iterator unsafe_erase(const_iterator where) { - return internal_erase(where); - } - - iterator unsafe_erase(const_iterator first, const_iterator last) { - while (first != last) - unsafe_erase(first++); - return my_solist.get_iterator(first); - } - - size_type unsafe_erase(const key_type& key) { - pairii_t where = equal_range(key); - size_type item_count = internal_distance(where.first, where.second); - unsafe_erase(where.first, where.second); - return item_count; - } - - void swap(concurrent_unordered_base& right) { - if (this != &right) { - std::swap(my_hash_compare, right.my_hash_compare); // TODO: check what ADL meant here - my_solist.swap(right.my_solist); - internal_swap_buckets(right); - std::swap(my_number_of_buckets, right.my_number_of_buckets); - std::swap(my_maximum_bucket_size, right.my_maximum_bucket_size); - } - } - - // Observers - void clear() { - // Clear list - my_solist.clear(); - - // Clear buckets - internal_clear(); - } - - // Lookup - iterator find(const key_type& key) { - return internal_find(key); - } - - const_iterator find(const key_type& key) const { - return const_cast(this)->internal_find(key); - } - - size_type count(const key_type& key) const { - if(allow_multimapping) { - paircc_t answer = equal_range(key); - size_type item_count = internal_distance(answer.first, answer.second); - return item_count; - } else { - return const_cast(this)->internal_find(key) == end()?0:1; - } - } - - std::pair equal_range(const key_type& key) { - return internal_equal_range(key); - } - - std::pair equal_range(const key_type& key) const { - return const_cast(this)->internal_equal_range(key); - } - - // Bucket interface - for debugging - size_type unsafe_bucket_count() const { - return my_number_of_buckets; - } - - size_type unsafe_max_bucket_count() const { - return segment_size(pointers_per_table-1); - } - - size_type unsafe_bucket_size(size_type bucket) { - size_type item_count = 0; - if (is_initialized(bucket)) { - raw_iterator it = get_bucket(bucket); - ++it; - for (; it != my_solist.raw_end() && !it.get_node_ptr()->is_dummy(); ++it) - ++item_count; - } - return item_count; - } - - size_type unsafe_bucket(const key_type& key) const { - sokey_t order_key = (sokey_t) my_hash_compare(key); - size_type bucket = order_key % my_number_of_buckets; - return bucket; - } - - // If the bucket is initialized, return a first non-dummy element in it - local_iterator unsafe_begin(size_type bucket) { - if (!is_initialized(bucket)) - return end(); - - raw_iterator it = get_bucket(bucket); - return my_solist.first_real_iterator(it); - } - - // If the bucket is initialized, return a first non-dummy element in it - const_local_iterator unsafe_begin(size_type bucket) const - { - if (!is_initialized(bucket)) - return end(); - - raw_const_iterator it = get_bucket(bucket); - return my_solist.first_real_iterator(it); - } - - // @REVIEW: Takes O(n) - // Returns the iterator after the last non-dummy element in the bucket - local_iterator unsafe_end(size_type bucket) - { - if (!is_initialized(bucket)) - return end(); - - raw_iterator it = get_bucket(bucket); - - // Find the end of the bucket, denoted by the dummy element - do ++it; - while(it != my_solist.raw_end() && !it.get_node_ptr()->is_dummy()); - - // Return the first real element past the end of the bucket - return my_solist.first_real_iterator(it); - } - - // @REVIEW: Takes O(n) - // Returns the iterator after the last non-dummy element in the bucket - const_local_iterator unsafe_end(size_type bucket) const - { - if (!is_initialized(bucket)) - return end(); - - raw_const_iterator it = get_bucket(bucket); - - // Find the end of the bucket, denoted by the dummy element - do ++it; - while(it != my_solist.raw_end() && !it.get_node_ptr()->is_dummy()); - - // Return the first real element past the end of the bucket - return my_solist.first_real_iterator(it); - } - - const_local_iterator unsafe_cbegin(size_type bucket) const { - return ((const self_type *) this)->begin(); - } - - const_local_iterator unsafe_cend(size_type bucket) const { - return ((const self_type *) this)->end(); - } - - // Hash policy - float load_factor() const { - return (float) size() / (float) unsafe_bucket_count(); - } - - float max_load_factor() const { - return my_maximum_bucket_size; - } - - void max_load_factor(float newmax) { - if (newmax != newmax || newmax < 0) - tbb::internal::throw_exception(tbb::internal::eid_invalid_load_factor); - my_maximum_bucket_size = newmax; - } - - // This function is a noop, because the underlying split-ordered list - // is already sorted, so an increase in the bucket number will be - // reflected next time this bucket is touched. - void rehash(size_type buckets) { - size_type current_buckets = my_number_of_buckets; - if (current_buckets >= buckets) - return; - my_number_of_buckets = 1<<__TBB_Log2((uintptr_t)buckets*2-1); // round up to power of 2 - } - -private: - - // Initialize the hash and keep the first bucket open - void internal_init() { - // Allocate an array of segment pointers - memset(my_buckets, 0, pointers_per_table * sizeof(void *)); - - // Insert the first element in the split-ordered list - raw_iterator dummy_node = my_solist.raw_begin(); - set_bucket(0, dummy_node); - } - - void internal_clear() { - for (size_type index = 0; index < pointers_per_table; ++index) { - if (my_buckets[index] != NULL) { - size_type sz = segment_size(index); - for (size_type index2 = 0; index2 < sz; ++index2) - my_allocator.destroy(&my_buckets[index][index2]); - my_allocator.deallocate(my_buckets[index], sz); - my_buckets[index] = 0; - } - } - } - - void internal_copy(const self_type& right) { - clear(); - - my_maximum_bucket_size = right.my_maximum_bucket_size; - my_number_of_buckets = right.my_number_of_buckets; - - __TBB_TRY { - insert(right.begin(), right.end()); - my_hash_compare = right.my_hash_compare; - } __TBB_CATCH(...) { - my_solist.clear(); - __TBB_RETHROW(); - } - } - - void internal_swap_buckets(concurrent_unordered_base& right) - { - // Swap all node segments - for (size_type index = 0; index < pointers_per_table; ++index) - { - raw_iterator * iterator_pointer = my_buckets[index]; - my_buckets[index] = right.my_buckets[index]; - right.my_buckets[index] = iterator_pointer; - } - } - - // Hash APIs - size_type internal_distance(const_iterator first, const_iterator last) const - { - size_type num = 0; - - for (const_iterator it = first; it != last; ++it) - ++num; - - return num; - } - - // Insert an element in the hash given its value - std::pair internal_insert(const value_type& value) - { - sokey_t order_key = (sokey_t) my_hash_compare(get_key(value)); - size_type bucket = order_key % my_number_of_buckets; - - // If bucket is empty, initialize it first - if (!is_initialized(bucket)) - init_bucket(bucket); - - size_type new_count; - order_key = split_order_key_regular(order_key); - raw_iterator it = get_bucket(bucket); - raw_iterator last = my_solist.raw_end(); - raw_iterator where = it; - - __TBB_ASSERT(where != last, "Invalid head node"); - - // First node is a dummy node - ++where; - - for (;;) - { - if (where == last || solist_t::get_order_key(where) > order_key) - { - // Try to insert it in the right place - std::pair result = my_solist.try_insert(it, where, value, order_key, &new_count); - - if (result.second) - { - // Insertion succeeded, adjust the table size, if needed - adjust_table_size(new_count, my_number_of_buckets); - return result; - } - else - { - // Insertion failed: either the same node was inserted by another thread, or - // another element was inserted at exactly the same place as this node. - // Proceed with the search from the previous location where order key was - // known to be larger (note: this is legal only because there is no safe - // concurrent erase operation supported). - where = it; - ++where; - continue; - } - } - else if (!allow_multimapping && solist_t::get_order_key(where) == order_key && my_hash_compare(get_key(*where), get_key(value)) == 0) - { - // Element already in the list, return it - return std::pair(my_solist.get_iterator(where), false); - } - - // Move the iterator forward - it = where; - ++where; - } - } - - // Find the element in the split-ordered list - iterator internal_find(const key_type& key) - { - sokey_t order_key = (sokey_t) my_hash_compare(key); - size_type bucket = order_key % my_number_of_buckets; - - // If bucket is empty, initialize it first - if (!is_initialized(bucket)) - init_bucket(bucket); - - order_key = split_order_key_regular(order_key); - raw_iterator last = my_solist.raw_end(); - - for (raw_iterator it = get_bucket(bucket); it != last; ++it) - { - if (solist_t::get_order_key(it) > order_key) - { - // If the order key is smaller than the current order key, the element - // is not in the hash. - return end(); - } - else if (solist_t::get_order_key(it) == order_key) - { - // The fact that order keys match does not mean that the element is found. - // Key function comparison has to be performed to check whether this is the - // right element. If not, keep searching while order key is the same. - if (!my_hash_compare(get_key(*it), key)) - return my_solist.get_iterator(it); - } - } - - return end(); - } - - // Erase an element from the list. This is not a concurrency safe function. - iterator internal_erase(const_iterator it) - { - key_type key = get_key(*it); - sokey_t order_key = (sokey_t) my_hash_compare(key); - size_type bucket = order_key % my_number_of_buckets; - - // If bucket is empty, initialize it first - if (!is_initialized(bucket)) - init_bucket(bucket); - - order_key = split_order_key_regular(order_key); - - raw_iterator previous = get_bucket(bucket); - raw_iterator last = my_solist.raw_end(); - raw_iterator where = previous; - - __TBB_ASSERT(where != last, "Invalid head node"); - - // First node is a dummy node - ++where; - - for (;;) { - if (where == last) - return end(); - else if (my_solist.get_iterator(where) == it) - return my_solist.erase_node(previous, it); - - // Move the iterator forward - previous = where; - ++where; - } - } - - // Return the [begin, end) pair of iterators with the same key values. - // This operation makes sense only if mapping is many-to-one. - pairii_t internal_equal_range(const key_type& key) - { - sokey_t order_key = (sokey_t) my_hash_compare(key); - size_type bucket = order_key % my_number_of_buckets; - - // If bucket is empty, initialize it first - if (!is_initialized(bucket)) - init_bucket(bucket); - - order_key = split_order_key_regular(order_key); - raw_iterator end_it = my_solist.raw_end(); - - for (raw_iterator it = get_bucket(bucket); it != end_it; ++it) - { - if (solist_t::get_order_key(it) > order_key) - { - // There is no element with the given key - return pairii_t(end(), end()); - } - else if (solist_t::get_order_key(it) == order_key && !my_hash_compare(get_key(*it), key)) - { - iterator first = my_solist.get_iterator(it); - iterator last = first; - do ++last; while( allow_multimapping && last != end() && !my_hash_compare(get_key(*last), key) ); - return pairii_t(first, last); - } - } - - return pairii_t(end(), end()); - } - - // Bucket APIs - void init_bucket(size_type bucket) - { - // Bucket 0 has no parent. Initialize it and return. - if (bucket == 0) { - internal_init(); - return; - } - - size_type parent_bucket = get_parent(bucket); - - // All parent_bucket buckets have to be initialized before this bucket is - if (!is_initialized(parent_bucket)) - init_bucket(parent_bucket); - - raw_iterator parent = get_bucket(parent_bucket); - - // Create a dummy first node in this bucket - raw_iterator dummy_node = my_solist.insert_dummy(parent, split_order_key_dummy(bucket)); - set_bucket(bucket, dummy_node); - } - - void adjust_table_size(size_type total_elements, size_type current_size) - { - // Grow the table by a factor of 2 if possible and needed - if ( ((float) total_elements / (float) current_size) > my_maximum_bucket_size ) - { - // Double the size of the hash only if size has not changed inbetween loads - __TBB_CompareAndSwapW((uintptr_t*)&my_number_of_buckets, uintptr_t(2u*current_size), uintptr_t(current_size) ); - //Simple "my_number_of_buckets.compare_and_swap( current_size<<1, current_size );" does not work for VC8 - //due to overzealous compiler warnings in /Wp64 mode - } - } - - size_type get_parent(size_type bucket) const - { - // Unsets bucket's most significant turned-on bit - size_type msb = __TBB_Log2((uintptr_t)bucket); - return bucket & ~(size_type(1) << msb); - } - - - // Dynamic sized array (segments) - //! @return segment index of given index in the array - static size_type segment_index_of( size_type index ) { - return size_type( __TBB_Log2( uintptr_t(index|1) ) ); - } - - //! @return the first array index of given segment - static size_type segment_base( size_type k ) { - return (size_type(1)< my_number_of_buckets; // Current table size - solist_t my_solist; // List where all the elements are kept - typename allocator_type::template rebind::other my_allocator; // Allocator object for segments - float my_maximum_bucket_size; // Maximum size of the bucket - atomic my_buckets[pointers_per_table]; // The segment table -}; -#if _MSC_VER -#pragma warning(pop) // warning 4127 -- while (true) has a constant expression in it -#endif - -//! Hash multiplier -static const size_t hash_multiplier = sizeof(size_t)==4? 2654435769U : 11400714819323198485ULL; -} // namespace internal -//! @endcond -//! Hasher functions -template -inline size_t tbb_hasher( const T& t ) { - return static_cast( t ) * internal::hash_multiplier; -} -template -inline size_t tbb_hasher( P* ptr ) { - size_t const h = reinterpret_cast( ptr ); - return (h >> 3) ^ h; -} -template -inline size_t tbb_hasher( const std::basic_string& s ) { - size_t h = 0; - for( const E* c = s.c_str(); *c; ++c ) - h = static_cast(*c) ^ (h * internal::hash_multiplier); - return h; -} -template -inline size_t tbb_hasher( const std::pair& p ) { - return tbb_hasher(p.first) ^ tbb_hasher(p.second); -} -} // namespace interface5 -using interface5::tbb_hasher; -} // namespace tbb -#endif// __TBB_concurrent_unordered_internal_H diff --git a/deal.II/bundled/tbb30_104oss/include/tbb/_tbb_windef.h b/deal.II/bundled/tbb30_104oss/include/tbb/_tbb_windef.h deleted file mode 100644 index 7ca10696bf..0000000000 --- a/deal.II/bundled/tbb30_104oss/include/tbb/_tbb_windef.h +++ /dev/null @@ -1,81 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_tbb_windef_H -#error Do not #include this file directly. Use "#include tbb/tbb_stddef.h" instead. -#endif /* __TBB_tbb_windef_H */ - -// Check that the target Windows version has all API calls requried for TBB. -// Do not increase the version in condition beyond 0x0500 without prior discussion! -#if defined(_WIN32_WINNT) && _WIN32_WINNT<0x0400 -#error TBB is unable to run on old Windows versions; _WIN32_WINNT must be 0x0400 or greater. -#endif - -#if !defined(_MT) -#error TBB requires linkage with multithreaded C/C++ runtime library. \ - Choose multithreaded DLL runtime in project settings, or use /MD[d] compiler switch. -#endif - -// Workaround for the problem with MVSC headers failing to define namespace std -namespace std { - using ::size_t; using ::ptrdiff_t; -} - -#define __TBB_STRING_AUX(x) #x -#define __TBB_STRING(x) __TBB_STRING_AUX(x) - -// Default setting of TBB_USE_DEBUG -#ifdef TBB_USE_DEBUG -# if TBB_USE_DEBUG -# if !defined(_DEBUG) -# pragma message(__FILE__ "(" __TBB_STRING(__LINE__) ") : Warning: Recommend using /MDd if compiling with TBB_USE_DEBUG!=0") -# endif -# else -# if defined(_DEBUG) -# pragma message(__FILE__ "(" __TBB_STRING(__LINE__) ") : Warning: Recommend using /MD if compiling with TBB_USE_DEBUG==0") -# endif -# endif -#else -# ifdef _DEBUG -# define TBB_USE_DEBUG 1 -# endif -#endif - -#if __TBB_BUILD && !defined(__TBB_NO_IMPLICIT_LINKAGE) -#define __TBB_NO_IMPLICIT_LINKAGE 1 -#endif - -#if _MSC_VER - #if !__TBB_NO_IMPLICIT_LINKAGE - #ifdef _DEBUG - #pragma comment(lib, "tbb_debug.lib") - #else - #pragma comment(lib, "tbb.lib") - #endif - #endif -#endif diff --git a/deal.II/bundled/tbb30_104oss/include/tbb/aligned_space.h b/deal.II/bundled/tbb30_104oss/include/tbb/aligned_space.h deleted file mode 100644 index 0d76b3ff37..0000000000 --- a/deal.II/bundled/tbb30_104oss/include/tbb/aligned_space.h +++ /dev/null @@ -1,55 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_aligned_space_H -#define __TBB_aligned_space_H - -#include "tbb_stddef.h" -#include "tbb_machine.h" - -namespace tbb { - -//! Block of space aligned sufficiently to construct an array T with N elements. -/** The elements are not constructed or destroyed by this class. - @ingroup memory_allocation */ -template -class aligned_space { -private: - typedef __TBB_TypeWithAlignmentAtLeastAsStrict(T) element_type; - element_type array[(sizeof(T)*N+sizeof(element_type)-1)/sizeof(element_type)]; -public: - //! Pointer to beginning of array - T* begin() {return internal::punned_cast(this);} - - //! Pointer to one past last element in array. - T* end() {return begin()+N;} -}; - -} // namespace tbb - -#endif /* __TBB_aligned_space_H */ diff --git a/deal.II/bundled/tbb30_104oss/include/tbb/atomic.h b/deal.II/bundled/tbb30_104oss/include/tbb/atomic.h deleted file mode 100644 index 52591ff448..0000000000 --- a/deal.II/bundled/tbb30_104oss/include/tbb/atomic.h +++ /dev/null @@ -1,363 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_atomic_H -#define __TBB_atomic_H - -#include -#include "tbb_stddef.h" - -#if _MSC_VER -#define __TBB_LONG_LONG __int64 -#else -#define __TBB_LONG_LONG long long -#endif /* _MSC_VER */ - -#include "tbb_machine.h" - -#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) - // Workaround for overzealous compiler warnings - #pragma warning (push) - #pragma warning (disable: 4244 4267) -#endif - -namespace tbb { - -//! Specifies memory fencing. -enum memory_semantics { - //! For internal use only. - __TBB_full_fence, - //! Acquire fence - acquire, - //! Release fence - release -}; - -//! @cond INTERNAL -namespace internal { - -#if __GNUC__ || __SUNPRO_CC -#define __TBB_DECL_ATOMIC_FIELD(t,f,a) t f __attribute__ ((aligned(a))); -#elif defined(__INTEL_COMPILER)||_MSC_VER >= 1300 -#define __TBB_DECL_ATOMIC_FIELD(t,f,a) __declspec(align(a)) t f; -#else -#error Do not know syntax for forcing alignment. -#endif /* __GNUC__ */ - -template -struct atomic_rep; // Primary template declared, but never defined. - -template<> -struct atomic_rep<1> { // Specialization - typedef int8_t word; - int8_t value; -}; -template<> -struct atomic_rep<2> { // Specialization - typedef int16_t word; - __TBB_DECL_ATOMIC_FIELD(int16_t,value,2) -}; -template<> -struct atomic_rep<4> { // Specialization -#if _MSC_VER && __TBB_WORDSIZE==4 - // Work-around that avoids spurious /Wp64 warnings - typedef intptr_t word; -#else - typedef int32_t word; -#endif - __TBB_DECL_ATOMIC_FIELD(int32_t,value,4) -}; -template<> -struct atomic_rep<8> { // Specialization - typedef int64_t word; - __TBB_DECL_ATOMIC_FIELD(int64_t,value,8) -}; - -template -struct atomic_traits; // Primary template declared, but not defined. - -#define __TBB_DECL_FENCED_ATOMIC_PRIMITIVES(S,M) \ - template<> struct atomic_traits { \ - typedef atomic_rep::word word; \ - inline static word compare_and_swap( volatile void* location, word new_value, word comparand ) {\ - return __TBB_CompareAndSwap##S##M(location,new_value,comparand); \ - } \ - inline static word fetch_and_add( volatile void* location, word addend ) { \ - return __TBB_FetchAndAdd##S##M(location,addend); \ - } \ - inline static word fetch_and_store( volatile void* location, word value ) {\ - return __TBB_FetchAndStore##S##M(location,value); \ - } \ - }; - -#define __TBB_DECL_ATOMIC_PRIMITIVES(S) \ - template \ - struct atomic_traits { \ - typedef atomic_rep::word word; \ - inline static word compare_and_swap( volatile void* location, word new_value, word comparand ) {\ - return __TBB_CompareAndSwap##S(location,new_value,comparand); \ - } \ - inline static word fetch_and_add( volatile void* location, word addend ) { \ - return __TBB_FetchAndAdd##S(location,addend); \ - } \ - inline static word fetch_and_store( volatile void* location, word value ) {\ - return __TBB_FetchAndStore##S(location,value); \ - } \ - }; - -#if __TBB_DECL_FENCED_ATOMICS -__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,__TBB_full_fence) -__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,__TBB_full_fence) -__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,__TBB_full_fence) -__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,__TBB_full_fence) -__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,acquire) -__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,acquire) -__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,acquire) -__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,acquire) -__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(1,release) -__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(2,release) -__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(4,release) -__TBB_DECL_FENCED_ATOMIC_PRIMITIVES(8,release) -#else -__TBB_DECL_ATOMIC_PRIMITIVES(1) -__TBB_DECL_ATOMIC_PRIMITIVES(2) -__TBB_DECL_ATOMIC_PRIMITIVES(4) -__TBB_DECL_ATOMIC_PRIMITIVES(8) -#endif - -//! Additive inverse of 1 for type T. -/** Various compilers issue various warnings if -1 is used with various integer types. - The baroque expression below avoids all the warnings (we hope). */ -#define __TBB_MINUS_ONE(T) (T(T(0)-T(1))) - -//! Base class that provides basic functionality for atomic without fetch_and_add. -/** Works for any type T that has the same size as an integral type, has a trivial constructor/destructor, - and can be copied/compared by memcpy/memcmp. */ -template -struct atomic_impl { -protected: - atomic_rep rep; -private: - //! Union type used to convert type T to underlying integral type. - union converter { - T value; - typename atomic_rep::word bits; - }; -public: - typedef T value_type; - - template - value_type fetch_and_store( value_type value ) { - converter u, w; - u.value = value; - w.bits = internal::atomic_traits::fetch_and_store(&rep.value,u.bits); - return w.value; - } - - value_type fetch_and_store( value_type value ) { - return fetch_and_store<__TBB_full_fence>(value); - } - - template - value_type compare_and_swap( value_type value, value_type comparand ) { - converter u, v, w; - u.value = value; - v.value = comparand; - w.bits = internal::atomic_traits::compare_and_swap(&rep.value,u.bits,v.bits); - return w.value; - } - - value_type compare_and_swap( value_type value, value_type comparand ) { - return compare_and_swap<__TBB_full_fence>(value,comparand); - } - - operator value_type() const volatile { // volatile qualifier here for backwards compatibility - converter w; - w.bits = __TBB_load_with_acquire( rep.value ); - return w.value; - } - -protected: - value_type store_with_release( value_type rhs ) { - converter u; - u.value = rhs; - __TBB_store_with_release(rep.value,u.bits); - return rhs; - } -}; - -//! Base class that provides basic functionality for atomic with fetch_and_add. -/** I is the underlying type. - D is the difference type. - StepType should be char if I is an integral type, and T if I is a T*. */ -template -struct atomic_impl_with_arithmetic: atomic_impl { -public: - typedef I value_type; - - template - value_type fetch_and_add( D addend ) { - return value_type(internal::atomic_traits::fetch_and_add( &this->rep.value, addend*sizeof(StepType) )); - } - - value_type fetch_and_add( D addend ) { - return fetch_and_add<__TBB_full_fence>(addend); - } - - template - value_type fetch_and_increment() { - return fetch_and_add(1); - } - - value_type fetch_and_increment() { - return fetch_and_add(1); - } - - template - value_type fetch_and_decrement() { - return fetch_and_add(__TBB_MINUS_ONE(D)); - } - - value_type fetch_and_decrement() { - return fetch_and_add(__TBB_MINUS_ONE(D)); - } - -public: - value_type operator+=( D addend ) { - return fetch_and_add(addend)+addend; - } - - value_type operator-=( D addend ) { - // Additive inverse of addend computed using binary minus, - // instead of unary minus, for sake of avoiding compiler warnings. - return operator+=(D(0)-addend); - } - - value_type operator++() { - return fetch_and_add(1)+1; - } - - value_type operator--() { - return fetch_and_add(__TBB_MINUS_ONE(D))-1; - } - - value_type operator++(int) { - return fetch_and_add(1); - } - - value_type operator--(int) { - return fetch_and_add(__TBB_MINUS_ONE(D)); - } -}; - -} /* Internal */ -//! @endcond - -//! Primary template for atomic. -/** See the Reference for details. - @ingroup synchronization */ -template -struct atomic: internal::atomic_impl { - T operator=( T rhs ) { - // "this" required here in strict ISO C++ because store_with_release is a dependent name - return this->store_with_release(rhs); - } - atomic& operator=( const atomic& rhs ) {this->store_with_release(rhs); return *this;} -}; - -#define __TBB_DECL_ATOMIC(T) \ - template<> struct atomic: internal::atomic_impl_with_arithmetic { \ - T operator=( T rhs ) {return store_with_release(rhs);} \ - atomic& operator=( const atomic& rhs ) {store_with_release(rhs); return *this;} \ - }; - -__TBB_DECL_ATOMIC(__TBB_LONG_LONG) -__TBB_DECL_ATOMIC(unsigned __TBB_LONG_LONG) -__TBB_DECL_ATOMIC(long) -__TBB_DECL_ATOMIC(unsigned long) - -#if defined(_MSC_VER) && __TBB_WORDSIZE==4 -/* Special version of __TBB_DECL_ATOMIC that avoids gratuitous warnings from cl /Wp64 option. - It is identical to __TBB_DECL_ATOMIC(unsigned) except that it replaces operator=(T) - with an operator=(U) that explicitly converts the U to a T. Types T and U should be - type synonyms on the platform. Type U should be the wider variant of T from the - perspective of /Wp64. */ -#define __TBB_DECL_ATOMIC_ALT(T,U) \ - template<> struct atomic: internal::atomic_impl_with_arithmetic { \ - T operator=( U rhs ) {return store_with_release(T(rhs));} \ - atomic& operator=( const atomic& rhs ) {store_with_release(rhs); return *this;} \ - }; -__TBB_DECL_ATOMIC_ALT(unsigned,size_t) -__TBB_DECL_ATOMIC_ALT(int,ptrdiff_t) -#else -__TBB_DECL_ATOMIC(unsigned) -__TBB_DECL_ATOMIC(int) -#endif /* defined(_MSC_VER) && __TBB_WORDSIZE==4 */ - -__TBB_DECL_ATOMIC(unsigned short) -__TBB_DECL_ATOMIC(short) -__TBB_DECL_ATOMIC(char) -__TBB_DECL_ATOMIC(signed char) -__TBB_DECL_ATOMIC(unsigned char) - -#if !defined(_MSC_VER)||defined(_NATIVE_WCHAR_T_DEFINED) -__TBB_DECL_ATOMIC(wchar_t) -#endif /* _MSC_VER||!defined(_NATIVE_WCHAR_T_DEFINED) */ - -//! Specialization for atomic with arithmetic and operator->. -template struct atomic: internal::atomic_impl_with_arithmetic { - T* operator=( T* rhs ) { - // "this" required here in strict ISO C++ because store_with_release is a dependent name - return this->store_with_release(rhs); - } - atomic& operator=( const atomic& rhs ) { - this->store_with_release(rhs); return *this; - } - T* operator->() const { - return (*this); - } -}; - -//! Specialization for atomic, for sake of not allowing arithmetic or operator->. -template<> struct atomic: internal::atomic_impl { - void* operator=( void* rhs ) { - // "this" required here in strict ISO C++ because store_with_release is a dependent name - return this->store_with_release(rhs); - } - atomic& operator=( const atomic& rhs ) { - this->store_with_release(rhs); return *this; - } -}; - -} // namespace tbb - -#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) - #pragma warning (pop) -#endif // warnings 4244, 4267 are back - -#endif /* __TBB_atomic_H */ diff --git a/deal.II/bundled/tbb30_104oss/include/tbb/blocked_range.h b/deal.II/bundled/tbb30_104oss/include/tbb/blocked_range.h deleted file mode 100644 index 52c12cc1a0..0000000000 --- a/deal.II/bundled/tbb30_104oss/include/tbb/blocked_range.h +++ /dev/null @@ -1,129 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_blocked_range_H -#define __TBB_blocked_range_H - -#include "tbb_stddef.h" - -namespace tbb { - -/** \page range_req Requirements on range concept - Class \c R implementing the concept of range must define: - - \code R::R( const R& ); \endcode Copy constructor - - \code R::~R(); \endcode Destructor - - \code bool R::is_divisible() const; \endcode True if range can be partitioned into two subranges - - \code bool R::empty() const; \endcode True if range is empty - - \code R::R( R& r, split ); \endcode Split range \c r into two subranges. -**/ - -//! A range over which to iterate. -/** @ingroup algorithms */ -template -class blocked_range { -public: - //! Type of a value - /** Called a const_iterator for sake of algorithms that need to treat a blocked_range - as an STL container. */ - typedef Value const_iterator; - - //! Type for size of a range - typedef std::size_t size_type; - - //! Construct range with default-constructed values for begin and end. - /** Requires that Value have a default constructor. */ - blocked_range() : my_end(), my_begin() {} - - //! Construct range over half-open interval [begin,end), with the given grainsize. - blocked_range( Value begin_, Value end_, size_type grainsize_=1 ) : - my_end(end_), my_begin(begin_), my_grainsize(grainsize_) - { - __TBB_ASSERT( my_grainsize>0, "grainsize must be positive" ); - } - - //! Beginning of range. - const_iterator begin() const {return my_begin;} - - //! One past last value in range. - const_iterator end() const {return my_end;} - - //! Size of the range - /** Unspecified if end() - friend class blocked_range2d; - - template - friend class blocked_range3d; -}; - -} // namespace tbb - -#endif /* __TBB_blocked_range_H */ diff --git a/deal.II/bundled/tbb30_104oss/include/tbb/blocked_range2d.h b/deal.II/bundled/tbb30_104oss/include/tbb/blocked_range2d.h deleted file mode 100644 index d541f42437..0000000000 --- a/deal.II/bundled/tbb30_104oss/include/tbb/blocked_range2d.h +++ /dev/null @@ -1,97 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_blocked_range2d_H -#define __TBB_blocked_range2d_H - -#include "tbb_stddef.h" -#include "blocked_range.h" - -namespace tbb { - -//! A 2-dimensional range that models the Range concept. -/** @ingroup algorithms */ -template -class blocked_range2d { -public: - //! Type for size of an iteation range - typedef blocked_range row_range_type; - typedef blocked_range col_range_type; - -private: - row_range_type my_rows; - col_range_type my_cols; - -public: - - blocked_range2d( RowValue row_begin, RowValue row_end, typename row_range_type::size_type row_grainsize, - ColValue col_begin, ColValue col_end, typename col_range_type::size_type col_grainsize ) : - my_rows(row_begin,row_end,row_grainsize), - my_cols(col_begin,col_end,col_grainsize) - { - } - - blocked_range2d( RowValue row_begin, RowValue row_end, - ColValue col_begin, ColValue col_end ) : - my_rows(row_begin,row_end), - my_cols(col_begin,col_end) - { - } - - //! True if range is empty - bool empty() const { - // Yes, it is a logical OR here, not AND. - return my_rows.empty() || my_cols.empty(); - } - - //! True if range is divisible into two pieces. - bool is_divisible() const { - return my_rows.is_divisible() || my_cols.is_divisible(); - } - - blocked_range2d( blocked_range2d& r, split ) : - my_rows(r.my_rows), - my_cols(r.my_cols) - { - if( my_rows.size()*double(my_cols.grainsize()) < my_cols.size()*double(my_rows.grainsize()) ) { - my_cols.my_begin = col_range_type::do_split(r.my_cols); - } else { - my_rows.my_begin = row_range_type::do_split(r.my_rows); - } - } - - //! The rows of the iteration space - const row_range_type& rows() const {return my_rows;} - - //! The columns of the iteration space - const col_range_type& cols() const {return my_cols;} -}; - -} // namespace tbb - -#endif /* __TBB_blocked_range2d_H */ diff --git a/deal.II/bundled/tbb30_104oss/include/tbb/blocked_range3d.h b/deal.II/bundled/tbb30_104oss/include/tbb/blocked_range3d.h deleted file mode 100644 index b0bfbe0e1c..0000000000 --- a/deal.II/bundled/tbb30_104oss/include/tbb/blocked_range3d.h +++ /dev/null @@ -1,116 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_blocked_range3d_H -#define __TBB_blocked_range3d_H - -#include "tbb_stddef.h" -#include "blocked_range.h" - -namespace tbb { - -//! A 3-dimensional range that models the Range concept. -/** @ingroup algorithms */ -template -class blocked_range3d { -public: - //! Type for size of an iteation range - typedef blocked_range page_range_type; - typedef blocked_range row_range_type; - typedef blocked_range col_range_type; - -private: - page_range_type my_pages; - row_range_type my_rows; - col_range_type my_cols; - -public: - - blocked_range3d( PageValue page_begin, PageValue page_end, - RowValue row_begin, RowValue row_end, - ColValue col_begin, ColValue col_end ) : - my_pages(page_begin,page_end), - my_rows(row_begin,row_end), - my_cols(col_begin,col_end) - { - } - - blocked_range3d( PageValue page_begin, PageValue page_end, typename page_range_type::size_type page_grainsize, - RowValue row_begin, RowValue row_end, typename row_range_type::size_type row_grainsize, - ColValue col_begin, ColValue col_end, typename col_range_type::size_type col_grainsize ) : - my_pages(page_begin,page_end,page_grainsize), - my_rows(row_begin,row_end,row_grainsize), - my_cols(col_begin,col_end,col_grainsize) - { - } - - //! True if range is empty - bool empty() const { - // Yes, it is a logical OR here, not AND. - return my_pages.empty() || my_rows.empty() || my_cols.empty(); - } - - //! True if range is divisible into two pieces. - bool is_divisible() const { - return my_pages.is_divisible() || my_rows.is_divisible() || my_cols.is_divisible(); - } - - blocked_range3d( blocked_range3d& r, split ) : - my_pages(r.my_pages), - my_rows(r.my_rows), - my_cols(r.my_cols) - { - if( my_pages.size()*double(my_rows.grainsize()) < my_rows.size()*double(my_pages.grainsize()) ) { - if ( my_rows.size()*double(my_cols.grainsize()) < my_cols.size()*double(my_rows.grainsize()) ) { - my_cols.my_begin = col_range_type::do_split(r.my_cols); - } else { - my_rows.my_begin = row_range_type::do_split(r.my_rows); - } - } else { - if ( my_pages.size()*double(my_cols.grainsize()) < my_cols.size()*double(my_pages.grainsize()) ) { - my_cols.my_begin = col_range_type::do_split(r.my_cols); - } else { - my_pages.my_begin = page_range_type::do_split(r.my_pages); - } - } - } - - //! The pages of the iteration space - const page_range_type& pages() const {return my_pages;} - - //! The rows of the iteration space - const row_range_type& rows() const {return my_rows;} - - //! The columns of the iteration space - const col_range_type& cols() const {return my_cols;} - -}; - -} // namespace tbb - -#endif /* __TBB_blocked_range3d_H */ diff --git a/deal.II/bundled/tbb30_104oss/include/tbb/cache_aligned_allocator.h b/deal.II/bundled/tbb30_104oss/include/tbb/cache_aligned_allocator.h deleted file mode 100644 index 5889682d62..0000000000 --- a/deal.II/bundled/tbb30_104oss/include/tbb/cache_aligned_allocator.h +++ /dev/null @@ -1,133 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_cache_aligned_allocator_H -#define __TBB_cache_aligned_allocator_H - -#include -#include "tbb_stddef.h" - -namespace tbb { - -//! @cond INTERNAL -namespace internal { - //! Cache/sector line size. - /** @ingroup memory_allocation */ - size_t __TBB_EXPORTED_FUNC NFS_GetLineSize(); - - //! Allocate memory on cache/sector line boundary. - /** @ingroup memory_allocation */ - void* __TBB_EXPORTED_FUNC NFS_Allocate( size_t n_element, size_t element_size, void* hint ); - - //! Free memory allocated by NFS_Allocate. - /** Freeing a NULL pointer is allowed, but has no effect. - @ingroup memory_allocation */ - void __TBB_EXPORTED_FUNC NFS_Free( void* ); -} -//! @endcond - -#if _MSC_VER && !defined(__INTEL_COMPILER) - // Workaround for erroneous "unreferenced parameter" warning in method destroy. - #pragma warning (push) - #pragma warning (disable: 4100) -#endif - -//! Meets "allocator" requirements of ISO C++ Standard, Section 20.1.5 -/** The members are ordered the same way they are in section 20.4.1 - of the ISO C++ standard. - @ingroup memory_allocation */ -template -class cache_aligned_allocator { -public: - typedef typename internal::allocator_type::value_type value_type; - typedef value_type* pointer; - typedef const value_type* const_pointer; - typedef value_type& reference; - typedef const value_type& const_reference; - typedef size_t size_type; - typedef ptrdiff_t difference_type; - template struct rebind { - typedef cache_aligned_allocator other; - }; - - cache_aligned_allocator() throw() {} - cache_aligned_allocator( const cache_aligned_allocator& ) throw() {} - template cache_aligned_allocator(const cache_aligned_allocator&) throw() {} - - pointer address(reference x) const {return &x;} - const_pointer address(const_reference x) const {return &x;} - - //! Allocate space for n objects, starting on a cache/sector line. - pointer allocate( size_type n, const void* hint=0 ) { - // The "hint" argument is always ignored in NFS_Allocate thus const_cast shouldn't hurt - return pointer(internal::NFS_Allocate( n, sizeof(value_type), const_cast(hint) )); - } - - //! Free block of memory that starts on a cache line - void deallocate( pointer p, size_type ) { - internal::NFS_Free(p); - } - - //! Largest value for which method allocate might succeed. - size_type max_size() const throw() { - return (~size_t(0)-internal::NFS_MaxLineSize)/sizeof(value_type); - } - - //! Copy-construct value at location pointed to by p. - void construct( pointer p, const value_type& value ) {::new((void*)(p)) value_type(value);} - - //! Destroy value at location pointed to by p. - void destroy( pointer p ) {p->~value_type();} -}; - -#if _MSC_VER && !defined(__INTEL_COMPILER) - #pragma warning (pop) -#endif // warning 4100 is back - -//! Analogous to std::allocator, as defined in ISO C++ Standard, Section 20.4.1 -/** @ingroup memory_allocation */ -template<> -class cache_aligned_allocator { -public: - typedef void* pointer; - typedef const void* const_pointer; - typedef void value_type; - template struct rebind { - typedef cache_aligned_allocator other; - }; -}; - -template -inline bool operator==( const cache_aligned_allocator&, const cache_aligned_allocator& ) {return true;} - -template -inline bool operator!=( const cache_aligned_allocator&, const cache_aligned_allocator& ) {return false;} - -} // namespace tbb - -#endif /* __TBB_cache_aligned_allocator_H */ diff --git a/deal.II/bundled/tbb30_104oss/include/tbb/combinable.h b/deal.II/bundled/tbb30_104oss/include/tbb/combinable.h deleted file mode 100644 index 5510595851..0000000000 --- a/deal.II/bundled/tbb30_104oss/include/tbb/combinable.h +++ /dev/null @@ -1,80 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_combinable_H -#define __TBB_combinable_H - -#include "enumerable_thread_specific.h" -#include "cache_aligned_allocator.h" - -namespace tbb { -/** \name combinable - **/ -//@{ -//! Thread-local storage with optional reduction -/** @ingroup containers */ - template - class combinable { - private: - typedef typename tbb::cache_aligned_allocator my_alloc; - - typedef typename tbb::enumerable_thread_specific my_ets_type; - my_ets_type my_ets; - - public: - - combinable() { } - - template - combinable( finit _finit) : my_ets(_finit) { } - - //! destructor - ~combinable() { - } - - combinable(const combinable& other) : my_ets(other.my_ets) { } - - combinable & operator=( const combinable & other) { my_ets = other.my_ets; return *this; } - - void clear() { my_ets.clear(); } - - T& local() { return my_ets.local(); } - - T& local(bool & exists) { return my_ets.local(exists); } - - // combine_func_t has signature T(T,T) or T(const T&, const T&) - template - T combine(combine_func_t f_combine) { return my_ets.combine(f_combine); } - - // combine_func_t has signature void(T) or void(const T&) - template - void combine_each(combine_func_t f_combine) { my_ets.combine_each(f_combine); } - - }; -} // namespace tbb -#endif /* __TBB_combinable_H */ diff --git a/deal.II/bundled/tbb30_104oss/include/tbb/compat/condition_variable b/deal.II/bundled/tbb30_104oss/include/tbb/compat/condition_variable deleted file mode 100644 index 2a2f600729..0000000000 --- a/deal.II/bundled/tbb30_104oss/include/tbb/compat/condition_variable +++ /dev/null @@ -1,459 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_condition_variable_H -#define __TBB_condition_variable_H - -#if _WIN32||_WIN64 -#include "../machine/windows_api.h" - -namespace tbb { -namespace interface5 { -namespace internal { -struct condition_variable_using_event -{ - //! Event for blocking waiting threads. - HANDLE event; - //! Protects invariants involving n_waiters, release_count, and epoch. - CRITICAL_SECTION mutex; - //! Number of threads waiting on this condition variable - int n_waiters; - //! Number of threads remaining that should no longer wait on this condition variable. - int release_count; - //! To keep threads from waking up prematurely with earlier signals. - unsigned epoch; -}; -}}} // namespace tbb::interface5::internal - -#ifndef CONDITION_VARIABLE_INIT -typedef void* CONDITION_VARIABLE; -typedef CONDITION_VARIABLE* PCONDITION_VARIABLE; -#endif - -#else /* if not _WIN32||_WIN64 */ -#include // some systems need it for ETIMEDOUT -#include -#endif /* _WIN32||_WIN64 */ - -#include "../tbb_stddef.h" -#include "../mutex.h" -#include "../tbb_thread.h" -#include "../tbb_exception.h" -#include "../tbb_profiling.h" - -namespace tbb { - -namespace interface5 { - -// C++0x standard working draft 30.4.3 -// Lock tag types -struct defer_lock_t { }; //! do not acquire ownership of the mutex -struct try_to_lock_t { }; //! try to acquire ownership of the mutex without blocking -struct adopt_lock_t { }; //! assume the calling thread has already -const defer_lock_t defer_lock = {}; -const try_to_lock_t try_to_lock = {}; -const adopt_lock_t adopt_lock = {}; - -// C++0x standard working draft 30.4.3.1 -//! lock_guard -template -class lock_guard : tbb::internal::no_copy { -public: - //! mutex type - typedef M mutex_type; - - //! Constructor - /** precondition: If mutex_type is not a recursive mutex, the calling thread - does not own the mutex m. */ - explicit lock_guard(mutex_type& m) : pm(m) {m.lock();} - - //! Adopt_lock constructor - /** precondition: the calling thread owns the mutex m. */ - lock_guard(mutex_type& m, adopt_lock_t) : pm(m) {} - - //! Destructor - ~lock_guard() { pm.unlock(); } -private: - mutex_type& pm; -}; - -// C++0x standard working draft 30.4.3.2 -//! unique_lock -template -class unique_lock : tbb::internal::no_copy { - friend class condition_variable; -public: - typedef M mutex_type; - - // 30.4.3.2.1 construct/copy/destroy - // NB: Without constructors that take an r-value reference to a unique_lock, the following constructor is of little use. - //! Constructor - /** postcondition: pm==0 && owns==false */ - unique_lock() : pm(NULL), owns(false) {} - - //! Constructor - /** precondition: if mutex_type is not a recursive mutex, the calling thread - does not own the mutex m. If the precondition is not met, a deadlock occurs. - postcondition: pm==&m and owns==true */ - explicit unique_lock(mutex_type& m) : pm(&m) {m.lock(); owns=true;} - - //! Defer_lock constructor - /** postcondition: pm==&m and owns==false */ - unique_lock(mutex_type& m, defer_lock_t) : pm(&m), owns(false) {} - - //! Try_to_lock constructor - /** precondition: if mutex_type is not a recursive mutex, the calling thread - does not own the mutex m. If the precondition is not met, a deadlock occurs. - postcondition: pm==&m and owns==res where res is the value returned by - the call to m.try_lock(). */ - unique_lock(mutex_type& m, try_to_lock_t) : pm(&m) {owns = m.try_lock();} - - //! Adopt_lock constructor - /** precondition: the calling thread owns the mutex. If it does not, mutex->unlock() would fail. - postcondition: pm==&m and owns==true */ - unique_lock(mutex_type& m, adopt_lock_t) : pm(&m), owns(true) {} - - //! Timed unique_lock acquisition. - /** To avoid requiring support for namespace chrono, this method deviates from the working draft in that - it uses tbb::tick_count::interval_t to specify the time duration. */ - unique_lock(mutex_type& m, const tick_count::interval_t &i) : pm(&m) {owns = try_lock_for( i );} - - //! Destructor - ~unique_lock() { if( owns ) pm->unlock(); } - - // 30.4.3.2.2 locking - //! Lock the mutex and own it. - void lock() { - if( pm ) { - if( !owns ) { - pm->lock(); - owns = true; - } else - throw_exception_v4( tbb::internal::eid_possible_deadlock ); - } else - throw_exception_v4( tbb::internal::eid_operation_not_permitted ); - __TBB_ASSERT( owns, NULL ); - } - - //! Try to lock the mutex. - /** If successful, note that this lock owns it. Otherwise, set it false. */ - bool try_lock() { - if( pm ) { - if( !owns ) - owns = pm->try_lock(); - else - throw_exception_v4( tbb::internal::eid_possible_deadlock ); - } else - throw_exception_v4( tbb::internal::eid_operation_not_permitted ); - return owns; - } - - //! Try to lock the mutex. - bool try_lock_for( const tick_count::interval_t &i ); - - //! Unlock the mutex - /** And note that this lock no longer owns it. */ - void unlock() { - if( owns ) { - pm->unlock(); - owns = false; - } else - throw_exception_v4( tbb::internal::eid_operation_not_permitted ); - __TBB_ASSERT( !owns, NULL ); - } - - // 30.4.3.2.3 modifiers - //! Swap the two unique locks - void swap(unique_lock& u) { - mutex_type* t_pm = u.pm; u.pm = pm; pm = t_pm; - bool t_owns = u.owns; u.owns = owns; owns = t_owns; - } - - //! Release control over the mutex. - mutex_type* release() { - mutex_type* o_pm = pm; - pm = NULL; - owns = false; - return o_pm; - } - - // 30.4.3.2.4 observers - //! Does this lock own the mutex? - bool owns_lock() const { return owns; } - - //! Does this lock own the mutex? - /*explicit*/ operator bool() const { return owns; } - - //! Return the mutex that this lock currently has. - mutex_type* mutex() const { return pm; } - -private: - mutex_type* pm; - bool owns; -}; - -template -bool unique_lock::try_lock_for( const tick_count::interval_t &i) -{ - const int unique_lock_tick = 100; /* microseconds; 0.1 milliseconds */ - // the smallest wait-time is 0.1 milliseconds. - bool res = pm->try_lock(); - int duration_in_micro; - if( !res && (duration_in_micro=int(i.seconds()*1e6))>unique_lock_tick ) { - tick_count::interval_t i_100( double(unique_lock_tick)/1e6 /* seconds */); // 100 microseconds = 0.1*10E-3 - do { - this_tbb_thread::sleep(i_100); // sleep for 100 micro seconds - duration_in_micro -= unique_lock_tick; - res = pm->try_lock(); - } while( !res && duration_in_micro>unique_lock_tick ); - } - return (owns=res); -} - -//! Swap the two unique locks that have the mutexes of same type -template -void swap(unique_lock& x, unique_lock& y) { x.swap( y ); } - -namespace internal { - -#if _WIN32||_WIN64 -union condvar_impl_t { - condition_variable_using_event cv_event; - CONDITION_VARIABLE cv_native; -}; - -void __TBB_EXPORTED_FUNC internal_initialize_condition_variable( condvar_impl_t& cv ); -void __TBB_EXPORTED_FUNC internal_destroy_condition_variable( condvar_impl_t& cv ); -void __TBB_EXPORTED_FUNC internal_condition_variable_notify_one( condvar_impl_t& cv ); -void __TBB_EXPORTED_FUNC internal_condition_variable_notify_all( condvar_impl_t& cv ); -bool __TBB_EXPORTED_FUNC internal_condition_variable_wait( condvar_impl_t& cv, mutex* mtx, const tick_count::interval_t* i = NULL ); -#else /* if !(_WIN32||_WIN64), i.e., POSIX threads */ -typedef pthread_cond_t condvar_impl_t; -#endif - -} // namespace internal - -//! cv_status -/** C++0x standard working draft 30.5 */ -enum cv_status { no_timeout, timeout }; - -//! condition variable -/** C++0x standard working draft 30.5.1 - @ingroup synchronization */ -class condition_variable : tbb::internal::no_copy { -public: - //! Constructor - condition_variable() { -#if _WIN32||_WIN64 - internal_initialize_condition_variable( my_cv ); -#else - pthread_cond_init( &my_cv, NULL ); -#endif - } - - //! Destructor - ~condition_variable() { - //precondition: There shall be no thread blocked on *this. -#if _WIN32||_WIN64 - internal_destroy_condition_variable( my_cv ); -#else - pthread_cond_destroy( &my_cv ); -#endif - } - - //! Notify one thread and wake it up - void notify_one() { -#if _WIN32||_WIN64 - internal_condition_variable_notify_one( my_cv ); -#else - pthread_cond_signal( &my_cv ); -#endif - } - - //! Notify all threads - void notify_all() { -#if _WIN32||_WIN64 - internal_condition_variable_notify_all( my_cv ); -#else - pthread_cond_broadcast( &my_cv ); -#endif - } - - //! Release the mutex associated with the lock and wait on this condition variable - void wait(unique_lock& lock); - - //! Wait on this condition variable while pred is false - template - void wait(unique_lock& lock, Predicate pred) { - while( !pred() ) - wait( lock ); - } - - //! Timed version of wait() - cv_status wait_for(unique_lock& lock, const tick_count::interval_t &i ); - - //! Timed version of the predicated wait - /** The loop terminates when pred() returns true or when the time duration specified by rel_time (i) has elapsed. */ - template - bool wait_for(unique_lock& lock, const tick_count::interval_t &i, Predicate pred) - { - while( !pred() ) { - cv_status st = wait_for( lock, i ); - if( st==timeout ) - return pred(); - } - return true; - } - - // C++0x standard working draft. 30.2.3 - typedef internal::condvar_impl_t* native_handle_type; - - native_handle_type native_handle() { return (native_handle_type) &my_cv; } - -private: - internal::condvar_impl_t my_cv; -}; - - -#if _WIN32||_WIN64 -inline void condition_variable::wait( unique_lock& lock ) -{ - __TBB_ASSERT( lock.owns, NULL ); - lock.owns = false; - if( !internal_condition_variable_wait( my_cv, lock.mutex() ) ) { - int ec = GetLastError(); - // on Windows 7, SleepConditionVariableCS() may return ERROR_TIMEOUT while the doc says it returns WAIT_TIMEOUT - __TBB_ASSERT_EX( ec!=WAIT_TIMEOUT&&ec!=ERROR_TIMEOUT, NULL ); - lock.owns = true; - throw_exception_v4( tbb::internal::eid_condvar_wait_failed ); - } - lock.owns = true; -} - -inline cv_status condition_variable::wait_for( unique_lock& lock, const tick_count::interval_t& i ) -{ - cv_status rc = no_timeout; - __TBB_ASSERT( lock.owns, NULL ); - lock.owns = false; - // condvar_wait could be SleepConditionVariableCS (or SleepConditionVariableSRW) or our own pre-vista cond_var_wait() - if( !internal_condition_variable_wait( my_cv, lock.mutex(), &i ) ) { - int ec = GetLastError(); - if( ec==WAIT_TIMEOUT || ec==ERROR_TIMEOUT ) - rc = timeout; - else { - lock.owns = true; - throw_exception_v4( tbb::internal::eid_condvar_wait_failed ); - } - } - lock.owns = true; - return rc; -} -#else -#if __linux__ -#include -#else /* generic Unix */ -#include -#endif - -inline void condition_variable::wait( unique_lock& lock ) -{ - __TBB_ASSERT( lock.owns, NULL ); - lock.owns = false; - if( pthread_cond_wait( &my_cv, lock.mutex()->native_handle() ) ) { - lock.owns = true; - throw_exception_v4( tbb::internal::eid_condvar_wait_failed ); - } - // upon successful return, the mutex has been locked and is owned by the calling thread. - lock.owns = true; -} - -inline cv_status condition_variable::wait_for( unique_lock& lock, const tick_count::interval_t& i ) -{ -#if __linux__ - struct timespec req; - double sec = i.seconds(); - clock_gettime( CLOCK_REALTIME, &req ); - req.tv_sec += static_cast(sec); - req.tv_nsec += static_cast( (sec - static_cast(sec))*1e9 ); -#else /* generic Unix */ - struct timeval tv; - struct timespec req; - double sec = i.seconds(); - int status = gettimeofday(&tv, NULL); - __TBB_ASSERT_EX( status==0, "gettimeofday failed" ); - req.tv_sec = tv.tv_sec + static_cast(sec); - req.tv_nsec = tv.tv_usec*1000 + static_cast( (sec - static_cast(sec))*1e9 ); -#endif /*(choice of OS) */ - - int ec; - cv_status rc = no_timeout; - __TBB_ASSERT( lock.owns, NULL ); - lock.owns = false; - if( ( ec=pthread_cond_timedwait( &my_cv, lock.mutex()->native_handle(), &req ) ) ) { - if( ec==ETIMEDOUT ) - rc = timeout; - else { - __TBB_ASSERT( lock.try_lock()==false, NULL ); - lock.owns = true; - throw_exception_v4( tbb::internal::eid_condvar_wait_failed ); - } - } - lock.owns = true; - return rc; -} -#endif /* !(_WIN32||_WIN64) */ - -} // namespace interface5 - -__TBB_DEFINE_PROFILING_SET_NAME(interface5::condition_variable) - -} // namespace tbb - -#if TBB_IMPLEMENT_CPP0X - -namespace std { - -using tbb::interface5::defer_lock_t; -using tbb::interface5::try_to_lock_t; -using tbb::interface5::adopt_lock_t; -using tbb::interface5::defer_lock; -using tbb::interface5::try_to_lock; -using tbb::interface5::adopt_lock; -using tbb::interface5::lock_guard; -using tbb::interface5::unique_lock; -using tbb::interface5::swap; /* this is for void std::swap(unique_lock&,unique_lock&) */ -using tbb::interface5::condition_variable; -using tbb::interface5::cv_status; -using tbb::interface5::timeout; -using tbb::interface5::no_timeout; - -} // namespace std - -#endif /* TBB_IMPLEMENT_CPP0X */ - -#endif /* __TBB_condition_variable_H */ diff --git a/deal.II/bundled/tbb30_104oss/include/tbb/compat/ppl.h b/deal.II/bundled/tbb30_104oss/include/tbb/compat/ppl.h deleted file mode 100644 index f524abfed0..0000000000 --- a/deal.II/bundled/tbb30_104oss/include/tbb/compat/ppl.h +++ /dev/null @@ -1,68 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_compat_ppl_H -#define __TBB_compat_ppl_H - -#include "../task_group.h" -#include "../parallel_invoke.h" -#include "../parallel_for_each.h" -#include "../parallel_for.h" -#include "../tbb_exception.h" -#include "../critical_section.h" -#include "../reader_writer_lock.h" -#include "../combinable.h" - -namespace Concurrency { - - using tbb::task_handle; - using tbb::task_group_status; - using tbb::task_group; - using tbb::structured_task_group; - using tbb::invalid_multiple_scheduling; - using tbb::missing_wait; - using tbb::make_task; - - using tbb::not_complete; - using tbb::complete; - using tbb::canceled; - - using tbb::is_current_task_group_canceling; - - using tbb::parallel_invoke; - using tbb::strict_ppl::parallel_for; - using tbb::parallel_for_each; - using tbb::critical_section; - using tbb::reader_writer_lock; - using tbb::combinable; - - using tbb::improper_lock; - -} // namespace Concurrency - -#endif /* __TBB_compat_ppl_H */ diff --git a/deal.II/bundled/tbb30_104oss/include/tbb/compat/thread b/deal.II/bundled/tbb30_104oss/include/tbb/compat/thread deleted file mode 100644 index e4d3b329d7..0000000000 --- a/deal.II/bundled/tbb30_104oss/include/tbb/compat/thread +++ /dev/null @@ -1,54 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_thread_H -#define __TBB_thread_H - -#include "../tbb_thread.h" - -#if TBB_IMPLEMENT_CPP0X - -namespace std { - -typedef tbb::tbb_thread thread; - -namespace this_thread { - using tbb::this_tbb_thread::get_id; - using tbb::this_tbb_thread::yield; - - inline void sleep_for(const tbb::tick_count::interval_t& rel_time) { - tbb::internal::thread_sleep_v3( rel_time ); - } - -} - -} - -#endif /* TBB_IMPLEMENT_CPP0X */ - -#endif /* __TBB_thread_H */ diff --git a/deal.II/bundled/tbb30_104oss/include/tbb/concurrent_hash_map.h b/deal.II/bundled/tbb30_104oss/include/tbb/concurrent_hash_map.h deleted file mode 100644 index 75b0f855ef..0000000000 --- a/deal.II/bundled/tbb30_104oss/include/tbb/concurrent_hash_map.h +++ /dev/null @@ -1,1406 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_concurrent_hash_map_H -#define __TBB_concurrent_hash_map_H - -#include "tbb_stddef.h" - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (push) - #pragma warning (disable: 4530) -#endif - -#include -#include // Need std::pair -#include // Need std::memset - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - #pragma warning (pop) -#endif - -#include "cache_aligned_allocator.h" -#include "tbb_allocator.h" -#include "spin_rw_mutex.h" -#include "atomic.h" -#include "aligned_space.h" -#include "tbb_exception.h" -#include "_concurrent_unordered_internal.h" // Need tbb_hasher -#if TBB_USE_PERFORMANCE_WARNINGS -#include -#endif - -namespace tbb { - -//! @cond INTERNAL -namespace internal { - //! ITT instrumented routine that loads pointer from location pointed to by src. - void* __TBB_EXPORTED_FUNC itt_load_pointer_with_acquire_v3( const void* src ); - //! ITT instrumented routine that stores src into location pointed to by dst. - void __TBB_EXPORTED_FUNC itt_store_pointer_with_release_v3( void* dst, void* src ); - //! Routine that loads pointer from location pointed to by src without causing ITT to report a race. - void* __TBB_EXPORTED_FUNC itt_load_pointer_v3( const void* src ); -} -//! @endcond - -//! hash_compare that is default argument for concurrent_hash_map -template -struct tbb_hash_compare { - static size_t hash( const Key& a ) { return tbb_hasher(a); } - static bool equal( const Key& a, const Key& b ) { return a == b; } -}; - -namespace interface4 { - - template, typename A = tbb_allocator > > - class concurrent_hash_map; - - //! @cond INTERNAL - namespace internal { - - - //! Type of a hash code. - typedef size_t hashcode_t; - //! Node base type - struct hash_map_node_base : tbb::internal::no_copy { - //! Mutex type - typedef spin_rw_mutex mutex_t; - //! Scoped lock type for mutex - typedef mutex_t::scoped_lock scoped_t; - //! Next node in chain - hash_map_node_base *next; - mutex_t mutex; - }; - //! Incompleteness flag value - static hash_map_node_base *const rehash_req = reinterpret_cast(size_t(3)); - //! Rehashed empty bucket flag - static hash_map_node_base *const empty_rehashed = reinterpret_cast(size_t(0)); - //! base class of concurrent_hash_map - class hash_map_base { - public: - //! Size type - typedef size_t size_type; - //! Type of a hash code. - typedef size_t hashcode_t; - //! Segment index type - typedef size_t segment_index_t; - //! Node base type - typedef hash_map_node_base node_base; - //! Bucket type - struct bucket : tbb::internal::no_copy { - //! Mutex type for buckets - typedef spin_rw_mutex mutex_t; - //! Scoped lock type for mutex - typedef mutex_t::scoped_lock scoped_t; - mutex_t mutex; - node_base *node_list; - }; - //! Count of segments in the first block - static size_type const embedded_block = 1; - //! Count of segments in the first block - static size_type const embedded_buckets = 1< my_mask; - //! Segment pointers table. Also prevents false sharing between my_mask and my_size - segments_table_t my_table; - //! Size of container in stored items - atomic my_size; // It must be in separate cache line from my_mask due to performance effects - //! Zero segment - bucket my_embedded_segment[embedded_buckets]; -#if __TBB_STATISTICS - atomic my_info_resizes; // concurrent ones - mutable atomic my_info_restarts; // race collisions - atomic my_info_rehashes; // invocations of rehash_bucket - #if !TBB_USE_PERFORMANCE_WARNINGS - #error Please enable TBB_USE_PERFORMANCE_WARNINGS as well - #endif -#endif - //! Constructor - hash_map_base() { - std::memset( this, 0, pointers_per_table*sizeof(segment_ptr_t) // 32*4=128 or 64*8=512 - + sizeof(my_size) + sizeof(my_mask) // 4+4 or 8+8 - + embedded_buckets*sizeof(bucket) ); // n*8 or n*16 - for( size_type i = 0; i < embedded_block; i++ ) // fill the table - my_table[i] = my_embedded_segment + segment_base(i); - my_mask = embedded_buckets - 1; - __TBB_ASSERT( embedded_block <= first_block, "The first block number must include embedded blocks"); -#if __TBB_STATISTICS - my_info_resizes = 0; // concurrent ones - my_info_restarts = 0; // race collisions - my_info_rehashes = 0; // invocations of rehash_bucket -#endif - } - - //! @return segment index of given index in the array - static segment_index_t segment_index_of( size_type index ) { - return segment_index_t( __TBB_Log2( index|1 ) ); - } - - //! @return the first array index of given segment - static segment_index_t segment_base( segment_index_t k ) { - return (segment_index_t(1)<(ptr) > size_t(63); - } - - //! Initialize buckets - static void init_buckets( segment_ptr_t ptr, size_type sz, bool is_initial ) { - if( is_initial ) std::memset(ptr, 0, sz*sizeof(bucket) ); - else for(size_type i = 0; i < sz; i++, ptr++) { - *reinterpret_cast(&ptr->mutex) = 0; - ptr->node_list = rehash_req; - } - } - - //! Add node @arg n to bucket @arg b - static void add_to_bucket( bucket *b, node_base *n ) { - __TBB_ASSERT(b->node_list != rehash_req, NULL); - n->next = b->node_list; - b->node_list = n; // its under lock and flag is set - } - - //! Exception safety helper - struct enable_segment_failsafe { - segment_ptr_t *my_segment_ptr; - enable_segment_failsafe(segments_table_t &table, segment_index_t k) : my_segment_ptr(&table[k]) {} - ~enable_segment_failsafe() { - if( my_segment_ptr ) *my_segment_ptr = 0; // indicate no allocation in progress - } - }; - - //! Enable segment - void enable_segment( segment_index_t k, bool is_initial = false ) { - __TBB_ASSERT( k, "Zero segment must be embedded" ); - enable_segment_failsafe watchdog( my_table, k ); - cache_aligned_allocator alloc; - size_type sz; - __TBB_ASSERT( !is_valid(my_table[k]), "Wrong concurrent assignment"); - if( k >= first_block ) { - sz = segment_size( k ); - segment_ptr_t ptr = alloc.allocate( sz ); - init_buckets( ptr, sz, is_initial ); -#if TBB_USE_THREADING_TOOLS - // TODO: actually, fence and notification are unnecessary here and below - itt_store_pointer_with_release_v3( my_table + k, ptr ); -#else - my_table[k] = ptr;// my_mask has release fence -#endif - sz <<= 1;// double it to get entire capacity of the container - } else { // the first block - __TBB_ASSERT( k == embedded_block, "Wrong segment index" ); - sz = segment_size( first_block ); - segment_ptr_t ptr = alloc.allocate( sz - embedded_buckets ); - init_buckets( ptr, sz - embedded_buckets, is_initial ); - ptr -= segment_base(embedded_block); - for(segment_index_t i = embedded_block; i < first_block; i++) // calc the offsets -#if TBB_USE_THREADING_TOOLS - itt_store_pointer_with_release_v3( my_table + i, ptr + segment_base(i) ); -#else - my_table[i] = ptr + segment_base(i); -#endif - } -#if TBB_USE_THREADING_TOOLS - itt_store_pointer_with_release_v3( &my_mask, (void*)(sz-1) ); -#else - my_mask = sz - 1; -#endif - watchdog.my_segment_ptr = 0; - } - - //! Get bucket by (masked) hashcode - bucket *get_bucket( hashcode_t h ) const throw() { // TODO: add throw() everywhere? - segment_index_t s = segment_index_of( h ); - h -= segment_base(s); - segment_ptr_t seg = my_table[s]; - __TBB_ASSERT( is_valid(seg), "hashcode must be cut by valid mask for allocated segments" ); - return &seg[h]; - } - - // internal serial rehashing helper - void mark_rehashed_levels( hashcode_t h ) throw () { - segment_index_t s = segment_index_of( h ); - while( segment_ptr_t seg = my_table[++s] ) - if( seg[h].node_list == rehash_req ) { - seg[h].node_list = empty_rehashed; - mark_rehashed_levels( h + segment_base(s) ); - } - } - - //! Check for mask race - // Splitting into two functions should help inlining - inline bool check_mask_race( const hashcode_t h, hashcode_t &m ) const { - hashcode_t m_now, m_old = m; -#if TBB_USE_THREADING_TOOLS - m_now = (hashcode_t) itt_load_pointer_with_acquire_v3( &my_mask ); -#else - m_now = my_mask; -#endif - if( m_old != m_now ) - return check_rehashing_collision( h, m_old, m = m_now ); - return false; - } - - //! Process mask race, check for rehashing collision - bool check_rehashing_collision( const hashcode_t h, hashcode_t m_old, hashcode_t m ) const { - __TBB_ASSERT(m_old != m, NULL); // TODO?: m arg could be optimized out by passing h = h&m - if( (h & m_old) != (h & m) ) { // mask changed for this hashcode, rare event - // condition above proves that 'h' has some other bits set beside 'm_old' - // find next applicable mask after m_old //TODO: look at bsl instruction - for( ++m_old; !(h & m_old); m_old <<= 1 ) // at maximum few rounds depending on the first block size - ; - m_old = (m_old<<1) - 1; // get full mask from a bit - __TBB_ASSERT((m_old&(m_old+1))==0 && m_old <= m, NULL); - // check whether it is rehashing/ed -#if TBB_USE_THREADING_TOOLS - if( itt_load_pointer_with_acquire_v3(&( get_bucket(h & m_old)->node_list )) != rehash_req ) -#else - if( __TBB_load_with_acquire(get_bucket( h & m_old )->node_list) != rehash_req ) -#endif - { -#if __TBB_STATISTICS - my_info_restarts++; // race collisions -#endif - return true; - } - } - return false; - } - - //! Insert a node and check for load factor. @return segment index to enable. - segment_index_t insert_new_node( bucket *b, node_base *n, hashcode_t mask ) { - size_type sz = ++my_size; // prefix form is to enforce allocation after the first item inserted - add_to_bucket( b, n ); - // check load factor - if( sz >= mask ) { // TODO: add custom load_factor - segment_index_t new_seg = segment_index_of( mask+1 ); - __TBB_ASSERT( is_valid(my_table[new_seg-1]), "new allocations must not publish new mask until segment has allocated"); -#if TBB_USE_THREADING_TOOLS - if( !itt_load_pointer_v3(my_table+new_seg) -#else - if( !my_table[new_seg] -#endif - && __TBB_CompareAndSwapW(&my_table[new_seg], 2, 0) == 0 ) - return new_seg; // The value must be processed - } - return 0; - } - - //! Prepare enough segments for number of buckets - void reserve(size_type buckets) { - if( !buckets-- ) return; - bool is_initial = !my_size; - for( size_type m = my_mask; buckets > m; m = my_mask ) - enable_segment( segment_index_of( m+1 ), is_initial ); - } - //! Swap hash_map_bases - void internal_swap(hash_map_base &table) { - std::swap(this->my_mask, table.my_mask); - std::swap(this->my_size, table.my_size); - for(size_type i = 0; i < embedded_buckets; i++) - std::swap(this->my_embedded_segment[i].node_list, table.my_embedded_segment[i].node_list); - for(size_type i = embedded_block; i < pointers_per_table; i++) - std::swap(this->my_table[i], table.my_table[i]); - } - }; - - template - class hash_map_range; - - //! Meets requirements of a forward iterator for STL */ - /** Value is either the T or const T type of the container. - @ingroup containers */ - template - class hash_map_iterator - : public std::iterator - { - typedef Container map_type; - typedef typename Container::node node; - typedef hash_map_base::node_base node_base; - typedef hash_map_base::bucket bucket; - - template - friend bool operator==( const hash_map_iterator& i, const hash_map_iterator& j ); - - template - friend bool operator!=( const hash_map_iterator& i, const hash_map_iterator& j ); - - template - friend ptrdiff_t operator-( const hash_map_iterator& i, const hash_map_iterator& j ); - - template - friend class hash_map_iterator; - - template - friend class hash_map_range; - - void advance_to_next_bucket() { // TODO?: refactor to iterator_base class - size_t k = my_index+1; - while( my_bucket && k <= my_map->my_mask ) { - // Following test uses 2's-complement wizardry - if( k& (k-2) ) // not the beginning of a segment - ++my_bucket; - else my_bucket = my_map->get_bucket( k ); - my_node = static_cast( my_bucket->node_list ); - if( hash_map_base::is_valid(my_node) ) { - my_index = k; return; - } - ++k; - } - my_bucket = 0; my_node = 0; my_index = k; // the end - } -#if !defined(_MSC_VER) || defined(__INTEL_COMPILER) - template - friend class interface4::concurrent_hash_map; -#else - public: // workaround -#endif - //! concurrent_hash_map over which we are iterating. - const Container *my_map; - - //! Index in hash table for current item - size_t my_index; - - //! Pointer to bucket - const bucket *my_bucket; - - //! Pointer to node that has current item - node *my_node; - - hash_map_iterator( const Container &map, size_t index, const bucket *b, node_base *n ); - - public: - //! Construct undefined iterator - hash_map_iterator() {} - hash_map_iterator( const hash_map_iterator &other ) : - my_map(other.my_map), - my_index(other.my_index), - my_bucket(other.my_bucket), - my_node(other.my_node) - {} - Value& operator*() const { - __TBB_ASSERT( hash_map_base::is_valid(my_node), "iterator uninitialized or at end of container?" ); - return my_node->item; - } - Value* operator->() const {return &operator*();} - hash_map_iterator& operator++(); - - //! Post increment - hash_map_iterator operator++(int) { - hash_map_iterator old(*this); - operator++(); - return old; - } - }; - - template - hash_map_iterator::hash_map_iterator( const Container &map, size_t index, const bucket *b, node_base *n ) : - my_map(&map), - my_index(index), - my_bucket(b), - my_node( static_cast(n) ) - { - if( b && !hash_map_base::is_valid(n) ) - advance_to_next_bucket(); - } - - template - hash_map_iterator& hash_map_iterator::operator++() { - my_node = static_cast( my_node->next ); - if( !my_node ) advance_to_next_bucket(); - return *this; - } - - template - bool operator==( const hash_map_iterator& i, const hash_map_iterator& j ) { - return i.my_node == j.my_node && i.my_map == j.my_map; - } - - template - bool operator!=( const hash_map_iterator& i, const hash_map_iterator& j ) { - return i.my_node != j.my_node || i.my_map != j.my_map; - } - - //! Range class used with concurrent_hash_map - /** @ingroup containers */ - template - class hash_map_range { - typedef typename Iterator::map_type map_type; - Iterator my_begin; - Iterator my_end; - mutable Iterator my_midpoint; - size_t my_grainsize; - //! Set my_midpoint to point approximately half way between my_begin and my_end. - void set_midpoint() const; - template friend class hash_map_range; - public: - //! Type for size of a range - typedef std::size_t size_type; - typedef typename Iterator::value_type value_type; - typedef typename Iterator::reference reference; - typedef typename Iterator::difference_type difference_type; - typedef Iterator iterator; - - //! True if range is empty. - bool empty() const {return my_begin==my_end;} - - //! True if range can be partitioned into two subranges. - bool is_divisible() const { - return my_midpoint!=my_end; - } - //! Split range. - hash_map_range( hash_map_range& r, split ) : - my_end(r.my_end), - my_grainsize(r.my_grainsize) - { - r.my_end = my_begin = r.my_midpoint; - __TBB_ASSERT( !empty(), "Splitting despite the range is not divisible" ); - __TBB_ASSERT( !r.empty(), "Splitting despite the range is not divisible" ); - set_midpoint(); - r.set_midpoint(); - } - //! type conversion - template - hash_map_range( hash_map_range& r) : - my_begin(r.my_begin), - my_end(r.my_end), - my_midpoint(r.my_midpoint), - my_grainsize(r.my_grainsize) - {} -#if TBB_DEPRECATED - //! Init range with iterators and grainsize specified - hash_map_range( const Iterator& begin_, const Iterator& end_, size_type grainsize_ = 1 ) : - my_begin(begin_), - my_end(end_), - my_grainsize(grainsize_) - { - if(!my_end.my_index && !my_end.my_bucket) // end - my_end.my_index = my_end.my_map->my_mask + 1; - set_midpoint(); - __TBB_ASSERT( grainsize_>0, "grainsize must be positive" ); - } -#endif - //! Init range with container and grainsize specified - hash_map_range( const map_type &map, size_type grainsize_ = 1 ) : - my_begin( Iterator( map, 0, map.my_embedded_segment, map.my_embedded_segment->node_list ) ), - my_end( Iterator( map, map.my_mask + 1, 0, 0 ) ), - my_grainsize( grainsize_ ) - { - __TBB_ASSERT( grainsize_>0, "grainsize must be positive" ); - set_midpoint(); - } - const Iterator& begin() const {return my_begin;} - const Iterator& end() const {return my_end;} - //! The grain size for this range. - size_type grainsize() const {return my_grainsize;} - }; - - template - void hash_map_range::set_midpoint() const { - // Split by groups of nodes - size_t m = my_end.my_index-my_begin.my_index; - if( m > my_grainsize ) { - m = my_begin.my_index + m/2u; - hash_map_base::bucket *b = my_begin.my_map->get_bucket(m); - my_midpoint = Iterator(*my_begin.my_map,m,b,b->node_list); - } else { - my_midpoint = my_end; - } - __TBB_ASSERT( my_begin.my_index <= my_midpoint.my_index, - "my_begin is after my_midpoint" ); - __TBB_ASSERT( my_midpoint.my_index <= my_end.my_index, - "my_midpoint is after my_end" ); - __TBB_ASSERT( my_begin != my_midpoint || my_begin == my_end, - "[my_begin, my_midpoint) range should not be empty" ); - } - - } // internal -//! @endcond - -//! Unordered map from Key to T. -/** concurrent_hash_map is associative container with concurrent access. - -@par Compatibility - The class meets all Container Requirements from C++ Standard (See ISO/IEC 14882:2003(E), clause 23.1). - -@par Exception Safety - - Hash function is not permitted to throw an exception. User-defined types Key and T are forbidden from throwing an exception in destructors. - - If exception happens during insert() operations, it has no effect (unless exception raised by HashCompare::hash() function during grow_segment). - - If exception happens during operator=() operation, the container can have a part of source items, and methods size() and empty() can return wrong results. - -@par Changes since TBB 2.1 - - Replaced internal algorithm and data structure. Patent is pending. - - Added buckets number argument for constructor - -@par Changes since TBB 2.0 - - Fixed exception-safety - - Added template argument for allocator - - Added allocator argument in constructors - - Added constructor from a range of iterators - - Added several new overloaded insert() methods - - Added get_allocator() - - Added swap() - - Added count() - - Added overloaded erase(accessor &) and erase(const_accessor&) - - Added equal_range() [const] - - Added [const_]pointer, [const_]reference, and allocator_type types - - Added global functions: operator==(), operator!=(), and swap() - - @ingroup containers */ -template -class concurrent_hash_map : protected internal::hash_map_base { - template - friend class internal::hash_map_iterator; - - template - friend class internal::hash_map_range; - -public: - typedef Key key_type; - typedef T mapped_type; - typedef std::pair value_type; - typedef hash_map_base::size_type size_type; - typedef ptrdiff_t difference_type; - typedef value_type *pointer; - typedef const value_type *const_pointer; - typedef value_type &reference; - typedef const value_type &const_reference; - typedef internal::hash_map_iterator iterator; - typedef internal::hash_map_iterator const_iterator; - typedef internal::hash_map_range range_type; - typedef internal::hash_map_range const_range_type; - typedef Allocator allocator_type; - -protected: - friend class const_accessor; - struct node; - typedef typename Allocator::template rebind::other node_allocator_type; - node_allocator_type my_allocator; - HashCompare my_hash_compare; - - struct node : public node_base { - value_type item; - node( const Key &key ) : item(key, T()) {} - node( const Key &key, const T &t ) : item(key, t) {} - // exception-safe allocation, see C++ Standard 2003, clause 5.3.4p17 - void *operator new( size_t /*size*/, node_allocator_type &a ) { - void *ptr = a.allocate(1); - if(!ptr) - tbb::internal::throw_exception(tbb::internal::eid_bad_alloc); - return ptr; - } - // match placement-new form above to be called if exception thrown in constructor - void operator delete( void *ptr, node_allocator_type &a ) {return a.deallocate(static_cast(ptr),1); } - }; - - void delete_node( node_base *n ) { - my_allocator.destroy( static_cast(n) ); - my_allocator.deallocate( static_cast(n), 1); - } - - node *search_bucket( const key_type &key, bucket *b ) const { - node *n = static_cast( b->node_list ); - while( is_valid(n) && !my_hash_compare.equal(key, n->item.first) ) - n = static_cast( n->next ); - __TBB_ASSERT(n != internal::rehash_req, "Search can be executed only for rehashed bucket"); - return n; - } - - //! bucket accessor is to find, rehash, acquire a lock, and access a bucket - class bucket_accessor : public bucket::scoped_t { - bool my_is_writer; // TODO: use it from base type - bucket *my_b; - public: - bucket_accessor( concurrent_hash_map *base, const hashcode_t h, bool writer = false ) { acquire( base, h, writer ); } - //! find a bucket by masked hashcode, optionally rehash, and acquire the lock - inline void acquire( concurrent_hash_map *base, const hashcode_t h, bool writer = false ) { - my_b = base->get_bucket( h ); -#if TBB_USE_THREADING_TOOLS - // TODO: actually, notification is unnecessary here, just hiding double-check - if( itt_load_pointer_with_acquire_v3(&my_b->node_list) == internal::rehash_req -#else - if( __TBB_load_with_acquire(my_b->node_list) == internal::rehash_req -#endif - && try_acquire( my_b->mutex, /*write=*/true ) ) - { - if( my_b->node_list == internal::rehash_req ) base->rehash_bucket( my_b, h ); //recursive rehashing - my_is_writer = true; - } - else bucket::scoped_t::acquire( my_b->mutex, /*write=*/my_is_writer = writer ); - __TBB_ASSERT( my_b->node_list != internal::rehash_req, NULL); - } - //! check whether bucket is locked for write - bool is_writer() { return my_is_writer; } - //! get bucket pointer - bucket *operator() () { return my_b; } - // TODO: optimize out - bool upgrade_to_writer() { my_is_writer = true; return bucket::scoped_t::upgrade_to_writer(); } - }; - - // TODO refactor to hash_base - void rehash_bucket( bucket *b_new, const hashcode_t h ) { - __TBB_ASSERT( *(intptr_t*)(&b_new->mutex), "b_new must be locked (for write)"); - __TBB_ASSERT( h > 1, "The lowermost buckets can't be rehashed" ); - __TBB_store_with_release(b_new->node_list, internal::empty_rehashed); // mark rehashed - hashcode_t mask = ( 1u<<__TBB_Log2( h ) ) - 1; // get parent mask from the topmost bit -#if __TBB_STATISTICS - my_info_rehashes++; // invocations of rehash_bucket -#endif - - bucket_accessor b_old( this, h & mask ); - - mask = (mask<<1) | 1; // get full mask for new bucket - __TBB_ASSERT( (mask&(mask+1))==0 && (h & mask) == h, NULL ); - restart: - for( node_base **p = &b_old()->node_list, *n = __TBB_load_with_acquire(*p); is_valid(n); n = *p ) { - hashcode_t c = my_hash_compare.hash( static_cast(n)->item.first ); -#if TBB_USE_ASSERT - hashcode_t bmask = h & (mask>>1); - bmask = bmask==0? 1 : ( 1u<<(__TBB_Log2( bmask )+1 ) ) - 1; // minimal mask of parent bucket - __TBB_ASSERT( (c & bmask) == (h & bmask), "hash() function changed for key in table" ); -#endif - if( (c & mask) == h ) { - if( !b_old.is_writer() ) - if( !b_old.upgrade_to_writer() ) { - goto restart; // node ptr can be invalid due to concurrent erase - } - *p = n->next; // exclude from b_old - add_to_bucket( b_new, n ); - } else p = &n->next; // iterate to next item - } - } - -public: - - class accessor; - //! Combines data access, locking, and garbage collection. - class const_accessor { - friend class concurrent_hash_map; - friend class accessor; - void operator=( const accessor & ) const; // Deny access - const_accessor( const accessor & ); // Deny access - public: - //! Type of value - typedef const typename concurrent_hash_map::value_type value_type; - - //! True if result is empty. - bool empty() const {return !my_node;} - - //! Set to null - void release() { - if( my_node ) { - my_lock.release(); - my_node = 0; - } - } - - //! Return reference to associated value in hash table. - const_reference operator*() const { - __TBB_ASSERT( my_node, "attempt to dereference empty accessor" ); - return my_node->item; - } - - //! Return pointer to associated value in hash table. - const_pointer operator->() const { - return &operator*(); - } - - //! Create empty result - const_accessor() : my_node(NULL) {} - - //! Destroy result after releasing the underlying reference. - ~const_accessor() { - my_node = NULL; // my_lock.release() is called in scoped_lock destructor - } - private: - node *my_node; - typename node::scoped_t my_lock; - hashcode_t my_hash; - }; - - //! Allows write access to elements and combines data access, locking, and garbage collection. - class accessor: public const_accessor { - public: - //! Type of value - typedef typename concurrent_hash_map::value_type value_type; - - //! Return reference to associated value in hash table. - reference operator*() const { - __TBB_ASSERT( this->my_node, "attempt to dereference empty accessor" ); - return this->my_node->item; - } - - //! Return pointer to associated value in hash table. - pointer operator->() const { - return &operator*(); - } - }; - - //! Construct empty table. - concurrent_hash_map(const allocator_type &a = allocator_type()) - : internal::hash_map_base(), my_allocator(a) - {} - - //! Construct empty table with n preallocated buckets. This number serves also as initial concurrency level. - concurrent_hash_map(size_type n, const allocator_type &a = allocator_type()) - : my_allocator(a) - { - reserve( n ); - } - - //! Copy constructor - concurrent_hash_map( const concurrent_hash_map& table, const allocator_type &a = allocator_type()) - : internal::hash_map_base(), my_allocator(a) - { - internal_copy(table); - } - - //! Construction with copying iteration range and given allocator instance - template - concurrent_hash_map(I first, I last, const allocator_type &a = allocator_type()) - : my_allocator(a) - { - reserve( std::distance(first, last) ); // TODO: load_factor? - internal_copy(first, last); - } - - //! Assignment - concurrent_hash_map& operator=( const concurrent_hash_map& table ) { - if( this!=&table ) { - clear(); - internal_copy(table); - } - return *this; - } - - - //! Rehashes and optionally resizes the whole table. - /** Useful to optimize performance before or after concurrent operations. - Also enables using of find() and count() concurrent methods in serial context. */ - void rehash(size_type n = 0); - - //! Clear table - void clear(); - - //! Clear table and destroy it. - ~concurrent_hash_map() { clear(); } - - //------------------------------------------------------------------------ - // Parallel algorithm support - //------------------------------------------------------------------------ - range_type range( size_type grainsize=1 ) { - return range_type( *this, grainsize ); - } - const_range_type range( size_type grainsize=1 ) const { - return const_range_type( *this, grainsize ); - } - - //------------------------------------------------------------------------ - // STL support - not thread-safe methods - //------------------------------------------------------------------------ - iterator begin() {return iterator(*this,0,my_embedded_segment,my_embedded_segment->node_list);} - iterator end() {return iterator(*this,0,0,0);} - const_iterator begin() const {return const_iterator(*this,0,my_embedded_segment,my_embedded_segment->node_list);} - const_iterator end() const {return const_iterator(*this,0,0,0);} - std::pair equal_range( const Key& key ) { return internal_equal_range(key, end()); } - std::pair equal_range( const Key& key ) const { return internal_equal_range(key, end()); } - - //! Number of items in table. - size_type size() const { return my_size; } - - //! True if size()==0. - bool empty() const { return my_size == 0; } - - //! Upper bound on size. - size_type max_size() const {return (~size_type(0))/sizeof(node);} - - //! Returns the current number of buckets - size_type bucket_count() const { return my_mask+1; } - - //! return allocator object - allocator_type get_allocator() const { return this->my_allocator; } - - //! swap two instances. Iterators are invalidated - void swap(concurrent_hash_map &table); - - //------------------------------------------------------------------------ - // concurrent map operations - //------------------------------------------------------------------------ - - //! Return count of items (0 or 1) - size_type count( const Key &key ) const { - return const_cast(this)->lookup(/*insert*/false, key, NULL, NULL, /*write=*/false ); - } - - //! Find item and acquire a read lock on the item. - /** Return true if item is found, false otherwise. */ - bool find( const_accessor &result, const Key &key ) const { - result.release(); - return const_cast(this)->lookup(/*insert*/false, key, NULL, &result, /*write=*/false ); - } - - //! Find item and acquire a write lock on the item. - /** Return true if item is found, false otherwise. */ - bool find( accessor &result, const Key &key ) { - result.release(); - return lookup(/*insert*/false, key, NULL, &result, /*write=*/true ); - } - - //! Insert item (if not already present) and acquire a read lock on the item. - /** Returns true if item is new. */ - bool insert( const_accessor &result, const Key &key ) { - result.release(); - return lookup(/*insert*/true, key, NULL, &result, /*write=*/false ); - } - - //! Insert item (if not already present) and acquire a write lock on the item. - /** Returns true if item is new. */ - bool insert( accessor &result, const Key &key ) { - result.release(); - return lookup(/*insert*/true, key, NULL, &result, /*write=*/true ); - } - - //! Insert item by copying if there is no such key present already and acquire a read lock on the item. - /** Returns true if item is new. */ - bool insert( const_accessor &result, const value_type &value ) { - result.release(); - return lookup(/*insert*/true, value.first, &value.second, &result, /*write=*/false ); - } - - //! Insert item by copying if there is no such key present already and acquire a write lock on the item. - /** Returns true if item is new. */ - bool insert( accessor &result, const value_type &value ) { - result.release(); - return lookup(/*insert*/true, value.first, &value.second, &result, /*write=*/true ); - } - - //! Insert item by copying if there is no such key present already - /** Returns true if item is inserted. */ - bool insert( const value_type &value ) { - return lookup(/*insert*/true, value.first, &value.second, NULL, /*write=*/false ); - } - - //! Insert range [first, last) - template - void insert(I first, I last) { - for(; first != last; ++first) - insert( *first ); - } - - //! Erase item. - /** Return true if item was erased by particularly this call. */ - bool erase( const Key& key ); - - //! Erase item by const_accessor. - /** Return true if item was erased by particularly this call. */ - bool erase( const_accessor& item_accessor ) { - return exclude( item_accessor, /*readonly=*/ true ); - } - - //! Erase item by accessor. - /** Return true if item was erased by particularly this call. */ - bool erase( accessor& item_accessor ) { - return exclude( item_accessor, /*readonly=*/ false ); - } - -protected: - //! Insert or find item and optionally acquire a lock on the item. - bool lookup( bool op_insert, const Key &key, const T *t, const_accessor *result, bool write ); - - //! delete item by accessor - bool exclude( const_accessor &item_accessor, bool readonly ); - - //! Returns an iterator for an item defined by the key, or for the next item after it (if upper==true) - template - std::pair internal_equal_range( const Key& key, I end ) const; - - //! Copy "source" to *this, where *this must start out empty. - void internal_copy( const concurrent_hash_map& source ); - - template - void internal_copy(I first, I last); - - //! Fast find when no concurrent erasure is used. For internal use inside TBB only! - /** Return pointer to item with given key, or NULL if no such item exists. - Must not be called concurrently with erasure operations. */ - const_pointer internal_fast_find( const Key& key ) const { - hashcode_t h = my_hash_compare.hash( key ); -#if TBB_USE_THREADING_TOOLS - hashcode_t m = (hashcode_t) itt_load_pointer_with_acquire_v3( &my_mask ); -#else - hashcode_t m = my_mask; -#endif - node *n; - restart: - __TBB_ASSERT((m&(m+1))==0, NULL); - bucket *b = get_bucket( h & m ); -#if TBB_USE_THREADING_TOOLS - // TODO: actually, notification is unnecessary here, just hiding double-check - if( itt_load_pointer_with_acquire_v3(&b->node_list) == internal::rehash_req ) -#else - if( __TBB_load_with_acquire(b->node_list) == internal::rehash_req ) -#endif - { - bucket::scoped_t lock; - if( lock.try_acquire( b->mutex, /*write=*/true ) ) { - if( b->node_list == internal::rehash_req) - const_cast(this)->rehash_bucket( b, h & m ); //recursive rehashing - } - else lock.acquire( b->mutex, /*write=*/false ); - __TBB_ASSERT(b->node_list!=internal::rehash_req,NULL); - } - n = search_bucket( key, b ); - if( n ) - return &n->item; - else if( check_mask_race( h, m ) ) - goto restart; - return 0; - } -}; - -#if _MSC_VER && !defined(__INTEL_COMPILER) - // Suppress "conditional expression is constant" warning. - #pragma warning( push ) - #pragma warning( disable: 4127 ) -#endif - -template -bool concurrent_hash_map::lookup( bool op_insert, const Key &key, const T *t, const_accessor *result, bool write ) { - __TBB_ASSERT( !result || !result->my_node, NULL ); - bool return_value; - hashcode_t const h = my_hash_compare.hash( key ); -#if TBB_USE_THREADING_TOOLS - hashcode_t m = (hashcode_t) itt_load_pointer_with_acquire_v3( &my_mask ); -#else - hashcode_t m = my_mask; -#endif - segment_index_t grow_segment = 0; - node *n, *tmp_n = 0; - restart: - {//lock scope - __TBB_ASSERT((m&(m+1))==0, NULL); - return_value = false; - // get bucket - bucket_accessor b( this, h & m ); - - // find a node - n = search_bucket( key, b() ); - if( op_insert ) { - // [opt] insert a key - if( !n ) { - if( !tmp_n ) { - if(t) tmp_n = new( my_allocator ) node(key, *t); - else tmp_n = new( my_allocator ) node(key); - } - if( !b.is_writer() && !b.upgrade_to_writer() ) { // TODO: improved insertion - // Rerun search_list, in case another thread inserted the item during the upgrade. - n = search_bucket( key, b() ); - if( is_valid(n) ) { // unfortunately, it did - b.downgrade_to_reader(); - goto exists; - } - } - if( check_mask_race(h, m) ) - goto restart; // b.release() is done in ~b(). - // insert and set flag to grow the container - grow_segment = insert_new_node( b(), n = tmp_n, m ); - tmp_n = 0; - return_value = true; - } - } else { // find or count - if( !n ) { - if( check_mask_race( h, m ) ) - goto restart; // b.release() is done in ~b(). TODO: replace by continue - return false; - } - return_value = true; - } - exists: - if( !result ) goto check_growth; - // TODO: the following seems as generic/regular operation - // acquire the item - if( !result->my_lock.try_acquire( n->mutex, write ) ) { - // we are unlucky, prepare for longer wait - tbb::internal::atomic_backoff trials; - do { - if( !trials.bounded_pause() ) { - // the wait takes really long, restart the operation - b.release(); - __TBB_ASSERT( !op_insert || !return_value, "Can't acquire new item in locked bucket?" ); - __TBB_Yield(); -#if TBB_USE_THREADING_TOOLS - m = (hashcode_t) itt_load_pointer_with_acquire_v3( &my_mask ); -#else - m = my_mask; -#endif - goto restart; - } - } while( !result->my_lock.try_acquire( n->mutex, write ) ); - } - }//lock scope - result->my_node = n; - result->my_hash = h; -check_growth: - // [opt] grow the container - if( grow_segment ) { -#if __TBB_STATISTICS - my_info_resizes++; // concurrent ones -#endif - enable_segment( grow_segment ); - } - if( tmp_n ) // if op_insert only - delete_node( tmp_n ); - return return_value; -} - -template -template -std::pair concurrent_hash_map::internal_equal_range( const Key& key, I end_ ) const { - hashcode_t h = my_hash_compare.hash( key ); - hashcode_t m = my_mask; - __TBB_ASSERT((m&(m+1))==0, NULL); - h &= m; - bucket *b = get_bucket( h ); - while( b->node_list == internal::rehash_req ) { - m = ( 1u<<__TBB_Log2( h ) ) - 1; // get parent mask from the topmost bit - b = get_bucket( h &= m ); - } - node *n = search_bucket( key, b ); - if( !n ) - return std::make_pair(end_, end_); - iterator lower(*this, h, b, n), upper(lower); - return std::make_pair(lower, ++upper); -} - -template -bool concurrent_hash_map::exclude( const_accessor &item_accessor, bool readonly ) { - __TBB_ASSERT( item_accessor.my_node, NULL ); - node_base *const n = item_accessor.my_node; - item_accessor.my_node = NULL; // we ought release accessor anyway - hashcode_t const h = item_accessor.my_hash; -#if TBB_USE_THREADING_TOOLS - hashcode_t m = (hashcode_t) itt_load_pointer_with_acquire_v3( &my_mask ); -#else - hashcode_t m = my_mask; -#endif - do { - // get bucket - bucket_accessor b( this, h & m, /*writer=*/true ); - node_base **p = &b()->node_list; - while( *p && *p != n ) - p = &(*p)->next; - if( !*p ) { // someone else was the first - if( check_mask_race( h, m ) ) - continue; - item_accessor.my_lock.release(); - return false; - } - __TBB_ASSERT( *p == n, NULL ); - *p = n->next; // remove from container - my_size--; - break; - } while(true); - if( readonly ) // need to get exclusive lock - item_accessor.my_lock.upgrade_to_writer(); // return value means nothing here - item_accessor.my_lock.release(); - delete_node( n ); // Only one thread can delete it due to write lock on the chain_mutex - return true; -} - -template -bool concurrent_hash_map::erase( const Key &key ) { - node_base *n; - hashcode_t const h = my_hash_compare.hash( key ); -#if TBB_USE_THREADING_TOOLS - hashcode_t m = (hashcode_t) itt_load_pointer_with_acquire_v3( &my_mask ); -#else - hashcode_t m = my_mask; -#endif -restart: - {//lock scope - // get bucket - bucket_accessor b( this, h & m ); - search: - node_base **p = &b()->node_list; - n = *p; - while( is_valid(n) && !my_hash_compare.equal(key, static_cast(n)->item.first ) ) { - p = &n->next; - n = *p; - } - if( !n ) { // not found, but mask could be changed - if( check_mask_race( h, m ) ) - goto restart; - return false; - } - else if( !b.is_writer() && !b.upgrade_to_writer() ) { - if( check_mask_race( h, m ) ) // contended upgrade, check mask - goto restart; - goto search; - } - *p = n->next; - my_size--; - } - { - typename node::scoped_t item_locker( n->mutex, /*write=*/true ); - } - // note: there should be no threads pretending to acquire this mutex again, do not try to upgrade const_accessor! - delete_node( n ); // Only one thread can delete it due to write lock on the bucket - return true; -} - -template -void concurrent_hash_map::swap(concurrent_hash_map &table) { - std::swap(this->my_allocator, table.my_allocator); - std::swap(this->my_hash_compare, table.my_hash_compare); - internal_swap(table); -} - -template -void concurrent_hash_map::rehash(size_type sz) { - reserve( sz ); // TODO: add reduction of number of buckets as well - hashcode_t mask = my_mask; - hashcode_t b = (mask+1)>>1; // size or first index of the last segment - __TBB_ASSERT((b&(b-1))==0, NULL); - bucket *bp = get_bucket( b ); // only the last segment should be scanned for rehashing - for(; b <= mask; b++, bp++ ) { - node_base *n = bp->node_list; - __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed || n == internal::rehash_req, "Broken internal structure" ); - __TBB_ASSERT( *reinterpret_cast(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during rehash() execution" ); - if( n == internal::rehash_req ) { // rehash bucket, conditional because rehashing of a previous bucket may affect this one - hashcode_t h = b; bucket *b_old = bp; - do { - __TBB_ASSERT( h > 1, "The lowermost buckets can't be rehashed" ); - hashcode_t m = ( 1u<<__TBB_Log2( h ) ) - 1; // get parent mask from the topmost bit - b_old = get_bucket( h &= m ); - } while( b_old->node_list == internal::rehash_req ); - // now h - is index of the root rehashed bucket b_old - mark_rehashed_levels( h ); // mark all non-rehashed children recursively across all segments - for( node_base **p = &b_old->node_list, *q = *p; is_valid(q); q = *p ) { - hashcode_t c = my_hash_compare.hash( static_cast(q)->item.first ); - if( (c & mask) != h ) { // should be rehashed - *p = q->next; // exclude from b_old - bucket *b_new = get_bucket( c & mask ); - __TBB_ASSERT( b_new->node_list != internal::rehash_req, "hash() function changed for key in table or internal error" ); - add_to_bucket( b_new, q ); - } else p = &q->next; // iterate to next item - } - } - } -#if TBB_USE_PERFORMANCE_WARNINGS - int current_size = int(my_size), buckets = int(mask)+1, empty_buckets = 0, overpopulated_buckets = 0; // usage statistics - static bool reported = false; -#endif -#if TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS - for( b = 0; b <= mask; b++ ) {// only last segment should be scanned for rehashing - if( b & (b-2) ) ++bp; // not the beginning of a segment - else bp = get_bucket( b ); - node_base *n = bp->node_list; - __TBB_ASSERT( *reinterpret_cast(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during rehash() execution" ); - __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed, "Broken internal structure" ); -#if TBB_USE_PERFORMANCE_WARNINGS - if( n == internal::empty_rehashed ) empty_buckets++; - else if( n->next ) overpopulated_buckets++; -#endif -#if TBB_USE_ASSERT - for( ; is_valid(n); n = n->next ) { - hashcode_t h = my_hash_compare.hash( static_cast(n)->item.first ) & mask; - __TBB_ASSERT( h == b, "hash() function changed for key in table or internal error" ); - } -#endif - } -#endif // TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS -#if TBB_USE_PERFORMANCE_WARNINGS - if( buckets > current_size) empty_buckets -= buckets - current_size; - else overpopulated_buckets -= current_size - buckets; // TODO: load_factor? - if( !reported && buckets >= 512 && ( 2*empty_buckets > current_size || 2*overpopulated_buckets > current_size ) ) { - tbb::internal::runtime_warning( - "Performance is not optimal because the hash function produces bad randomness in lower bits in %s.\nSize: %d Empties: %d Overlaps: %d", - typeid(*this).name(), current_size, empty_buckets, overpopulated_buckets ); - reported = true; - } -#endif -} - -template -void concurrent_hash_map::clear() { - hashcode_t m = my_mask; - __TBB_ASSERT((m&(m+1))==0, NULL); -#if TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS -#if TBB_USE_PERFORMANCE_WARNINGS - int current_size = int(my_size), buckets = int(m)+1, empty_buckets = 0, overpopulated_buckets = 0; // usage statistics - static bool reported = false; -#endif - bucket *bp = 0; - // check consistency - for( segment_index_t b = 0; b <= m; b++ ) { - if( b & (b-2) ) ++bp; // not the beginning of a segment - else bp = get_bucket( b ); - node_base *n = bp->node_list; - __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed || n == internal::rehash_req, "Broken internal structure" ); - __TBB_ASSERT( *reinterpret_cast(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during clear() execution" ); -#if TBB_USE_PERFORMANCE_WARNINGS - if( n == internal::empty_rehashed ) empty_buckets++; - else if( n == internal::rehash_req ) buckets--; - else if( n->next ) overpopulated_buckets++; -#endif -#if __TBB_EXTRA_DEBUG - for(; is_valid(n); n = n->next ) { - hashcode_t h = my_hash_compare.hash( static_cast(n)->item.first ); - h &= m; - __TBB_ASSERT( h == b || get_bucket(h)->node_list == internal::rehash_req, "hash() function changed for key in table or internal error" ); - } -#endif - } -#if TBB_USE_PERFORMANCE_WARNINGS -#if __TBB_STATISTICS - printf( "items=%d buckets: capacity=%d rehashed=%d empty=%d overpopulated=%d" - " concurrent: resizes=%u rehashes=%u restarts=%u\n", - current_size, int(m+1), buckets, empty_buckets, overpopulated_buckets, - unsigned(my_info_resizes), unsigned(my_info_rehashes), unsigned(my_info_restarts) ); - my_info_resizes = 0; // concurrent ones - my_info_restarts = 0; // race collisions - my_info_rehashes = 0; // invocations of rehash_bucket -#endif - if( buckets > current_size) empty_buckets -= buckets - current_size; - else overpopulated_buckets -= current_size - buckets; // TODO: load_factor? - if( !reported && buckets >= 512 && ( 2*empty_buckets > current_size || 2*overpopulated_buckets > current_size ) ) { - tbb::internal::runtime_warning( - "Performance is not optimal because the hash function produces bad randomness in lower bits in %s.\nSize: %d Empties: %d Overlaps: %d", - typeid(*this).name(), current_size, empty_buckets, overpopulated_buckets ); - reported = true; - } -#endif -#endif//TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS - my_size = 0; - segment_index_t s = segment_index_of( m ); - __TBB_ASSERT( s+1 == pointers_per_table || !my_table[s+1], "wrong mask or concurrent grow" ); - cache_aligned_allocator alloc; - do { - __TBB_ASSERT( is_valid( my_table[s] ), "wrong mask or concurrent grow" ); - segment_ptr_t buckets_ptr = my_table[s]; - size_type sz = segment_size( s ? s : 1 ); - for( segment_index_t i = 0; i < sz; i++ ) - for( node_base *n = buckets_ptr[i].node_list; is_valid(n); n = buckets_ptr[i].node_list ) { - buckets_ptr[i].node_list = n->next; - delete_node( n ); - } - if( s >= first_block) // the first segment or the next - alloc.deallocate( buckets_ptr, sz ); - else if( s == embedded_block && embedded_block != first_block ) - alloc.deallocate( buckets_ptr, segment_size(first_block)-embedded_buckets ); - if( s >= embedded_block ) my_table[s] = 0; - } while(s-- > 0); - my_mask = embedded_buckets - 1; -} - -template -void concurrent_hash_map::internal_copy( const concurrent_hash_map& source ) { - reserve( source.my_size ); // TODO: load_factor? - hashcode_t mask = source.my_mask; - if( my_mask == mask ) { // optimized version - bucket *dst = 0, *src = 0; - bool rehash_required = false; - for( hashcode_t k = 0; k <= mask; k++ ) { - if( k & (k-2) ) ++dst,src++; // not the beginning of a segment - else { dst = get_bucket( k ); src = source.get_bucket( k ); } - __TBB_ASSERT( dst->node_list != internal::rehash_req, "Invalid bucket in destination table"); - node *n = static_cast( src->node_list ); - if( n == internal::rehash_req ) { // source is not rehashed, items are in previous buckets - rehash_required = true; - dst->node_list = internal::rehash_req; - } else for(; n; n = static_cast( n->next ) ) { - add_to_bucket( dst, new( my_allocator ) node(n->item.first, n->item.second) ); - ++my_size; // TODO: replace by non-atomic op - } - } - if( rehash_required ) rehash(); - } else internal_copy( source.begin(), source.end() ); -} - -template -template -void concurrent_hash_map::internal_copy(I first, I last) { - hashcode_t m = my_mask; - for(; first != last; ++first) { - hashcode_t h = my_hash_compare.hash( first->first ); - bucket *b = get_bucket( h & m ); - __TBB_ASSERT( b->node_list != internal::rehash_req, "Invalid bucket in destination table"); - node *n = new( my_allocator ) node(first->first, first->second); - add_to_bucket( b, n ); - ++my_size; // TODO: replace by non-atomic op - } -} - -} // namespace interface4 - -using interface4::concurrent_hash_map; - - -template -inline bool operator==(const concurrent_hash_map &a, const concurrent_hash_map &b) { - if(a.size() != b.size()) return false; - typename concurrent_hash_map::const_iterator i(a.begin()), i_end(a.end()); - typename concurrent_hash_map::const_iterator j, j_end(b.end()); - for(; i != i_end; ++i) { - j = b.equal_range(i->first).first; - if( j == j_end || !(i->second == j->second) ) return false; - } - return true; -} - -template -inline bool operator!=(const concurrent_hash_map &a, const concurrent_hash_map &b) -{ return !(a == b); } - -template -inline void swap(concurrent_hash_map &a, concurrent_hash_map &b) -{ a.swap( b ); } - -#if _MSC_VER && !defined(__INTEL_COMPILER) - #pragma warning( pop ) -#endif // warning 4127 is back - -} // namespace tbb - -#endif /* __TBB_concurrent_hash_map_H */ diff --git a/deal.II/bundled/tbb30_104oss/include/tbb/concurrent_queue.h b/deal.II/bundled/tbb30_104oss/include/tbb/concurrent_queue.h deleted file mode 100644 index 6f502178e4..0000000000 --- a/deal.II/bundled/tbb30_104oss/include/tbb/concurrent_queue.h +++ /dev/null @@ -1,413 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_concurrent_queue_H -#define __TBB_concurrent_queue_H - -#include "_concurrent_queue_internal.h" - -namespace tbb { - -namespace strict_ppl { - -//! A high-performance thread-safe non-blocking concurrent queue. -/** Multiple threads may each push and pop concurrently. - Assignment construction is not allowed. - @ingroup containers */ -template > -class concurrent_queue: public internal::concurrent_queue_base_v3 { - template friend class internal::concurrent_queue_iterator; - - //! Allocator type - typedef typename A::template rebind::other page_allocator_type; - page_allocator_type my_allocator; - - //! Allocates a block of size n (bytes) - /*overide*/ virtual void *allocate_block( size_t n ) { - void *b = reinterpret_cast(my_allocator.allocate( n )); - if( !b ) - internal::throw_exception(internal::eid_bad_alloc); - return b; - } - - //! Deallocates block created by allocate_block. - /*override*/ virtual void deallocate_block( void *b, size_t n ) { - my_allocator.deallocate( reinterpret_cast(b), n ); - } - -public: - //! Element type in the queue. - typedef T value_type; - - //! Reference type - typedef T& reference; - - //! Const reference type - typedef const T& const_reference; - - //! Integral type for representing size of the queue. - typedef size_t size_type; - - //! Difference type for iterator - typedef ptrdiff_t difference_type; - - //! Allocator type - typedef A allocator_type; - - //! Construct empty queue - explicit concurrent_queue(const allocator_type& a = allocator_type()) : - my_allocator( a ) - { - } - - //! [begin,end) constructor - template - concurrent_queue( InputIterator begin, InputIterator end, const allocator_type& a = allocator_type()) : - my_allocator( a ) - { - for( ; begin != end; ++begin ) - this->internal_push(&*begin); - } - - //! Copy constructor - concurrent_queue( const concurrent_queue& src, const allocator_type& a = allocator_type()) : - internal::concurrent_queue_base_v3(), my_allocator( a ) - { - this->assign( src ); - } - - //! Destroy queue - ~concurrent_queue(); - - //! Enqueue an item at tail of queue. - void push( const T& source ) { - this->internal_push( &source ); - } - - //! Attempt to dequeue an item from head of queue. - /** Does not wait for item to become available. - Returns true if successful; false otherwise. */ - bool try_pop( T& result ) { - return this->internal_try_pop( &result ); - } - - //! Return the number of items in the queue; thread unsafe - size_type unsafe_size() const {return this->internal_size();} - - //! Equivalent to size()==0. - bool empty() const {return this->internal_empty();} - - //! Clear the queue. not thread-safe. - void clear() ; - - //! Return allocator object - allocator_type get_allocator() const { return this->my_allocator; } - - typedef internal::concurrent_queue_iterator iterator; - typedef internal::concurrent_queue_iterator const_iterator; - - //------------------------------------------------------------------------ - // The iterators are intended only for debugging. They are slow and not thread safe. - //------------------------------------------------------------------------ - iterator unsafe_begin() {return iterator(*this);} - iterator unsafe_end() {return iterator();} - const_iterator unsafe_begin() const {return const_iterator(*this);} - const_iterator unsafe_end() const {return const_iterator();} -} ; - -template -concurrent_queue::~concurrent_queue() { - clear(); - this->internal_finish_clear(); -} - -template -void concurrent_queue::clear() { - while( !empty() ) { - T value; - this->internal_try_pop(&value); - } -} - -} // namespace strict_ppl - -//! A high-performance thread-safe blocking concurrent bounded queue. -/** This is the pre-PPL TBB concurrent queue which supports boundedness and blocking semantics. - Note that method names agree with the PPL-style concurrent queue. - Multiple threads may each push and pop concurrently. - Assignment construction is not allowed. - @ingroup containers */ -template > -class concurrent_bounded_queue: public internal::concurrent_queue_base_v3 { - template friend class internal::concurrent_queue_iterator; - - //! Allocator type - typedef typename A::template rebind::other page_allocator_type; - page_allocator_type my_allocator; - - typedef typename concurrent_queue_base_v3::padded_page padded_page; - - //! Class used to ensure exception-safety of method "pop" - class destroyer: internal::no_copy { - T& my_value; - public: - destroyer( T& value ) : my_value(value) {} - ~destroyer() {my_value.~T();} - }; - - T& get_ref( page& p, size_t index ) { - __TBB_ASSERT( index(static_cast(&p))->last)[index]; - } - - /*override*/ virtual void copy_item( page& dst, size_t index, const void* src ) { - new( &get_ref(dst,index) ) T(*static_cast(src)); - } - - /*override*/ virtual void copy_page_item( page& dst, size_t dindex, const page& src, size_t sindex ) { - new( &get_ref(dst,dindex) ) T( get_ref( const_cast(src), sindex ) ); - } - - /*override*/ virtual void assign_and_destroy_item( void* dst, page& src, size_t index ) { - T& from = get_ref(src,index); - destroyer d(from); - *static_cast(dst) = from; - } - - /*overide*/ virtual page *allocate_page() { - size_t n = sizeof(padded_page) + (items_per_page-1)*sizeof(T); - page *p = reinterpret_cast(my_allocator.allocate( n )); - if( !p ) - internal::throw_exception(internal::eid_bad_alloc); - return p; - } - - /*override*/ virtual void deallocate_page( page *p ) { - size_t n = sizeof(padded_page) + items_per_page*sizeof(T); - my_allocator.deallocate( reinterpret_cast(p), n ); - } - -public: - //! Element type in the queue. - typedef T value_type; - - //! Allocator type - typedef A allocator_type; - - //! Reference type - typedef T& reference; - - //! Const reference type - typedef const T& const_reference; - - //! Integral type for representing size of the queue. - /** Notice that the size_type is a signed integral type. - This is because the size can be negative if there are pending pops without corresponding pushes. */ - typedef std::ptrdiff_t size_type; - - //! Difference type for iterator - typedef std::ptrdiff_t difference_type; - - //! Construct empty queue - explicit concurrent_bounded_queue(const allocator_type& a = allocator_type()) : - concurrent_queue_base_v3( sizeof(T) ), my_allocator( a ) - { - } - - //! Copy constructor - concurrent_bounded_queue( const concurrent_bounded_queue& src, const allocator_type& a = allocator_type()) : - concurrent_queue_base_v3( sizeof(T) ), my_allocator( a ) - { - assign( src ); - } - - //! [begin,end) constructor - template - concurrent_bounded_queue( InputIterator begin, InputIterator end, const allocator_type& a = allocator_type()) : - concurrent_queue_base_v3( sizeof(T) ), my_allocator( a ) - { - for( ; begin != end; ++begin ) - internal_push_if_not_full(&*begin); - } - - //! Destroy queue - ~concurrent_bounded_queue(); - - //! Enqueue an item at tail of queue. - void push( const T& source ) { - internal_push( &source ); - } - - //! Dequeue item from head of queue. - /** Block until an item becomes available, and then dequeue it. */ - void pop( T& destination ) { - internal_pop( &destination ); - } - - //! Enqueue an item at tail of queue if queue is not already full. - /** Does not wait for queue to become not full. - Returns true if item is pushed; false if queue was already full. */ - bool try_push( const T& source ) { - return internal_push_if_not_full( &source ); - } - - //! Attempt to dequeue an item from head of queue. - /** Does not wait for item to become available. - Returns true if successful; false otherwise. */ - bool try_pop( T& destination ) { - return internal_pop_if_present( &destination ); - } - - //! Return number of pushes minus number of pops. - /** Note that the result can be negative if there are pops waiting for the - corresponding pushes. The result can also exceed capacity() if there - are push operations in flight. */ - size_type size() const {return internal_size();} - - //! Equivalent to size()<=0. - bool empty() const {return internal_empty();} - - //! Maximum number of allowed elements - size_type capacity() const { - return my_capacity; - } - - //! Set the capacity - /** Setting the capacity to 0 causes subsequent try_push operations to always fail, - and subsequent push operations to block forever. */ - void set_capacity( size_type new_capacity ) { - internal_set_capacity( new_capacity, sizeof(T) ); - } - - //! return allocator object - allocator_type get_allocator() const { return this->my_allocator; } - - //! clear the queue. not thread-safe. - void clear() ; - - typedef internal::concurrent_queue_iterator iterator; - typedef internal::concurrent_queue_iterator const_iterator; - - //------------------------------------------------------------------------ - // The iterators are intended only for debugging. They are slow and not thread safe. - //------------------------------------------------------------------------ - iterator unsafe_begin() {return iterator(*this);} - iterator unsafe_end() {return iterator();} - const_iterator unsafe_begin() const {return const_iterator(*this);} - const_iterator unsafe_end() const {return const_iterator();} - -}; - -template -concurrent_bounded_queue::~concurrent_bounded_queue() { - clear(); - internal_finish_clear(); -} - -template -void concurrent_bounded_queue::clear() { - while( !empty() ) { - T value; - internal_pop_if_present(&value); - } -} - -namespace deprecated { - -//! A high-performance thread-safe blocking concurrent bounded queue. -/** This is the pre-PPL TBB concurrent queue which support boundedness and blocking semantics. - Note that method names agree with the PPL-style concurrent queue. - Multiple threads may each push and pop concurrently. - Assignment construction is not allowed. - @ingroup containers */ -template > -class concurrent_queue: public concurrent_bounded_queue { -#if !__TBB_TEMPLATE_FRIENDS_BROKEN - template friend class internal::concurrent_queue_iterator; -#endif - -public: - //! Construct empty queue - explicit concurrent_queue(const A& a = A()) : - concurrent_bounded_queue( a ) - { - } - - //! Copy constructor - concurrent_queue( const concurrent_queue& src, const A& a = A()) : - concurrent_bounded_queue( src, a ) - { - } - - //! [begin,end) constructor - template - concurrent_queue( InputIterator b /*begin*/, InputIterator e /*end*/, const A& a = A()) : - concurrent_bounded_queue( b, e, a ) - { - } - - //! Enqueue an item at tail of queue if queue is not already full. - /** Does not wait for queue to become not full. - Returns true if item is pushed; false if queue was already full. */ - bool push_if_not_full( const T& source ) { - return this->try_push( source ); - } - - //! Attempt to dequeue an item from head of queue. - /** Does not wait for item to become available. - Returns true if successful; false otherwise. - @deprecated Use try_pop() - */ - bool pop_if_present( T& destination ) { - return this->try_pop( destination ); - } - - typedef typename concurrent_bounded_queue::iterator iterator; - typedef typename concurrent_bounded_queue::const_iterator const_iterator; - // - //------------------------------------------------------------------------ - // The iterators are intended only for debugging. They are slow and not thread safe. - //------------------------------------------------------------------------ - iterator begin() {return this->unsafe_begin();} - iterator end() {return this->unsafe_end();} - const_iterator begin() const {return this->unsafe_begin();} - const_iterator end() const {return this->unsafe_end();} -}; - -} - - -#if TBB_DEPRECATED -using deprecated::concurrent_queue; -#else -using strict_ppl::concurrent_queue; -#endif - -} // namespace tbb - -#endif /* __TBB_concurrent_queue_H */ diff --git a/deal.II/bundled/tbb30_104oss/include/tbb/concurrent_unordered_map.h b/deal.II/bundled/tbb30_104oss/include/tbb/concurrent_unordered_map.h deleted file mode 100644 index 2521961481..0000000000 --- a/deal.II/bundled/tbb30_104oss/include/tbb/concurrent_unordered_map.h +++ /dev/null @@ -1,241 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -/* Container implementations in this header are based on PPL implementations - provided by Microsoft. */ - -#ifndef __TBB_concurrent_unordered_map_H -#define __TBB_concurrent_unordered_map_H - -#include "_concurrent_unordered_internal.h" - -namespace tbb -{ - -// Template class for hash compare -template -class tbb_hash -{ -public: - tbb_hash() {} - - size_t operator()(const Key& key) const - { - return tbb_hasher(key); - } -}; - -namespace interface5 { - -// Template class for hash map traits -template -class concurrent_unordered_map_traits -{ -protected: - typedef std::pair value_type; - typedef Key key_type; - typedef Hash_compare hash_compare; - typedef typename Allocator::template rebind::other allocator_type; - enum { allow_multimapping = Allow_multimapping }; - - concurrent_unordered_map_traits() : my_hash_compare() {} - concurrent_unordered_map_traits(const hash_compare& hc) : my_hash_compare(hc) {} - - class value_compare : public std::binary_function - { - friend class concurrent_unordered_map_traits; - - public: - bool operator()(const value_type& left, const value_type& right) const - { - return (my_hash_compare(left.first, right.first)); - } - - value_compare(const hash_compare& comparator) : my_hash_compare(comparator) {} - - protected: - hash_compare my_hash_compare; // the comparator predicate for keys - }; - - template - static const Key& get_key(const std::pair& value) { - return (value.first); - } - - hash_compare my_hash_compare; // the comparator predicate for keys -}; - -template , typename Key_equality = std::equal_to, typename Allocator = tbb::tbb_allocator > > -class concurrent_unordered_map : public internal::concurrent_unordered_base< concurrent_unordered_map_traits, Allocator, false> > -{ - // Base type definitions - typedef internal::hash_compare hash_compare; - typedef internal::concurrent_unordered_base< concurrent_unordered_map_traits > base_type; - typedef concurrent_unordered_map_traits, Allocator, false> traits_type; - using traits_type::my_hash_compare; -#if __TBB_EXTRA_DEBUG -public: -#endif - using traits_type::allow_multimapping; -public: - using base_type::end; - using base_type::find; - using base_type::insert; - - // Type definitions - typedef Key key_type; - typedef typename base_type::value_type value_type; - typedef T mapped_type; - typedef Hasher hasher; - typedef Key_equality key_equal; - typedef hash_compare key_compare; - - typedef typename base_type::allocator_type allocator_type; - typedef typename base_type::pointer pointer; - typedef typename base_type::const_pointer const_pointer; - typedef typename base_type::reference reference; - typedef typename base_type::const_reference const_reference; - - typedef typename base_type::size_type size_type; - typedef typename base_type::difference_type difference_type; - - typedef typename base_type::iterator iterator; - typedef typename base_type::const_iterator const_iterator; - typedef typename base_type::iterator local_iterator; - typedef typename base_type::const_iterator const_local_iterator; - - // Construction/destruction/copying - explicit concurrent_unordered_map(size_type n_of_buckets = 8, const hasher& a_hasher = hasher(), - const key_equal& a_keyeq = key_equal(), const allocator_type& a = allocator_type()) - : base_type(n_of_buckets, key_compare(a_hasher, a_keyeq), a) - { - } - - concurrent_unordered_map(const Allocator& a) : base_type(8, key_compare(), a) - { - } - - template - concurrent_unordered_map(Iterator first, Iterator last, size_type n_of_buckets = 8, const hasher& a_hasher = hasher(), - const key_equal& a_keyeq = key_equal(), const allocator_type& a = allocator_type()) - : base_type(n_of_buckets, key_compare(a_hasher, a_keyeq), a) - { - for (; first != last; ++first) - base_type::insert(*first); - } - - concurrent_unordered_map(const concurrent_unordered_map& table) : base_type(table) - { - } - - concurrent_unordered_map(const concurrent_unordered_map& table, const Allocator& a) - : base_type(table, a) - { - } - - concurrent_unordered_map& operator=(const concurrent_unordered_map& table) - { - base_type::operator=(table); - return (*this); - } - - iterator unsafe_erase(const_iterator where) - { - return base_type::unsafe_erase(where); - } - - size_type unsafe_erase(const key_type& key) - { - return base_type::unsafe_erase(key); - } - - iterator unsafe_erase(const_iterator first, const_iterator last) - { - return base_type::unsafe_erase(first, last); - } - - void swap(concurrent_unordered_map& table) - { - base_type::swap(table); - } - - // Observers - hasher hash_function() const - { - return my_hash_compare.my_hash_object; - } - - key_equal key_eq() const - { - return my_hash_compare.my_key_compare_object; - } - - mapped_type& operator[](const key_type& key) - { - iterator where = find(key); - - if (where == end()) - { - where = insert(std::pair(key, mapped_type())).first; - } - - return ((*where).second); - } - - mapped_type& at(const key_type& key) - { - iterator where = find(key); - - if (where == end()) - { - tbb::internal::throw_exception(tbb::internal::eid_invalid_key); - } - - return ((*where).second); - } - - const mapped_type& at(const key_type& key) const - { - const_iterator where = find(key); - - if (where == end()) - { - tbb::internal::throw_exception(tbb::internal::eid_invalid_key); - } - - return ((*where).second); - } -}; - -} // namespace interface5 - -using interface5::concurrent_unordered_map; - -} // namespace tbb - -#endif// __TBB_concurrent_unordered_map_H diff --git a/deal.II/bundled/tbb30_104oss/include/tbb/concurrent_vector.h b/deal.II/bundled/tbb30_104oss/include/tbb/concurrent_vector.h deleted file mode 100644 index 8106eb4941..0000000000 --- a/deal.II/bundled/tbb30_104oss/include/tbb/concurrent_vector.h +++ /dev/null @@ -1,1060 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_concurrent_vector_H -#define __TBB_concurrent_vector_H - -#include "tbb_stddef.h" -#include "tbb_exception.h" -#include "atomic.h" -#include "cache_aligned_allocator.h" -#include "blocked_range.h" -#include "tbb_machine.h" -#include - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (push) - #pragma warning (disable: 4530) -#endif - -#include -#include - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - #pragma warning (pop) -#endif - -#if _MSC_VER==1500 && !__INTEL_COMPILER - // VS2008/VC9 seems to have an issue; limits pull in math.h - #pragma warning( push ) - #pragma warning( disable: 4985 ) -#endif -#include /* std::numeric_limits */ -#if _MSC_VER==1500 && !__INTEL_COMPILER - #pragma warning( pop ) -#endif - -#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) && defined(_Wp64) - // Workaround for overzealous compiler warnings in /Wp64 mode - #pragma warning (push) - #pragma warning (disable: 4267) -#endif - -namespace tbb { - -template > -class concurrent_vector; - -//! @cond INTERNAL -namespace internal { - - //! Bad allocation marker - static void *const vector_allocation_error_flag = reinterpret_cast(size_t(63)); - - //! Routine that loads pointer from location pointed to by src without any fence, without causing ITT to report a race. - void* __TBB_EXPORTED_FUNC itt_load_pointer_v3( const void* src ); - - //! Base class of concurrent vector implementation. - /** @ingroup containers */ - class concurrent_vector_base_v3 { - protected: - - // Basic types declarations - typedef size_t segment_index_t; - typedef size_t size_type; - - // Using enumerations due to Mac linking problems of static const variables - enum { - // Size constants - default_initial_segments = 1, // 2 initial items - //! Number of slots for segment's pointers inside the class - pointers_per_short_table = 3, // to fit into 8 words of entire structure - pointers_per_long_table = sizeof(segment_index_t) * 8 // one segment per bit - }; - - // Segment pointer. Can be zero-initialized - struct segment_t { - void* array; -#if TBB_USE_ASSERT - ~segment_t() { - __TBB_ASSERT( array <= internal::vector_allocation_error_flag, "should have been freed by clear" ); - } -#endif /* TBB_USE_ASSERT */ - }; - - // Data fields - - //! allocator function pointer - void* (*vector_allocator_ptr)(concurrent_vector_base_v3 &, size_t); - - //! count of segments in the first block - atomic my_first_block; - - //! Requested size of vector - atomic my_early_size; - - //! Pointer to the segments table - atomic my_segment; - - //! embedded storage of segment pointers - segment_t my_storage[pointers_per_short_table]; - - // Methods - - concurrent_vector_base_v3() { - my_early_size = 0; - my_first_block = 0; // here is not default_initial_segments - for( segment_index_t i = 0; i < pointers_per_short_table; i++) - my_storage[i].array = NULL; - my_segment = my_storage; - } - __TBB_EXPORTED_METHOD ~concurrent_vector_base_v3(); - - static segment_index_t segment_index_of( size_type index ) { - return segment_index_t( __TBB_Log2( index|1 ) ); - } - - static segment_index_t segment_base( segment_index_t k ) { - return (segment_index_t(1)< - class vector_iterator - { - //! concurrent_vector over which we are iterating. - Container* my_vector; - - //! Index into the vector - size_t my_index; - - //! Caches my_vector->internal_subscript(my_index) - /** NULL if cached value is not available */ - mutable Value* my_item; - - template - friend vector_iterator operator+( ptrdiff_t offset, const vector_iterator& v ); - - template - friend bool operator==( const vector_iterator& i, const vector_iterator& j ); - - template - friend bool operator<( const vector_iterator& i, const vector_iterator& j ); - - template - friend ptrdiff_t operator-( const vector_iterator& i, const vector_iterator& j ); - - template - friend class internal::vector_iterator; - -#if !defined(_MSC_VER) || defined(__INTEL_COMPILER) - template - friend class tbb::concurrent_vector; -#else -public: // workaround for MSVC -#endif - - vector_iterator( const Container& vector, size_t index, void *ptr = 0 ) : - my_vector(const_cast(&vector)), - my_index(index), - my_item(static_cast(ptr)) - {} - - public: - //! Default constructor - vector_iterator() : my_vector(NULL), my_index(~size_t(0)), my_item(NULL) {} - - vector_iterator( const vector_iterator& other ) : - my_vector(other.my_vector), - my_index(other.my_index), - my_item(other.my_item) - {} - - vector_iterator operator+( ptrdiff_t offset ) const { - return vector_iterator( *my_vector, my_index+offset ); - } - vector_iterator &operator+=( ptrdiff_t offset ) { - my_index+=offset; - my_item = NULL; - return *this; - } - vector_iterator operator-( ptrdiff_t offset ) const { - return vector_iterator( *my_vector, my_index-offset ); - } - vector_iterator &operator-=( ptrdiff_t offset ) { - my_index-=offset; - my_item = NULL; - return *this; - } - Value& operator*() const { - Value* item = my_item; - if( !item ) { - item = my_item = &my_vector->internal_subscript(my_index); - } - __TBB_ASSERT( item==&my_vector->internal_subscript(my_index), "corrupt cache" ); - return *item; - } - Value& operator[]( ptrdiff_t k ) const { - return my_vector->internal_subscript(my_index+k); - } - Value* operator->() const {return &operator*();} - - //! Pre increment - vector_iterator& operator++() { - size_t k = ++my_index; - if( my_item ) { - // Following test uses 2's-complement wizardry - if( (k& (k-2))==0 ) { - // k is a power of two that is at least k-2 - my_item= NULL; - } else { - ++my_item; - } - } - return *this; - } - - //! Pre decrement - vector_iterator& operator--() { - __TBB_ASSERT( my_index>0, "operator--() applied to iterator already at beginning of concurrent_vector" ); - size_t k = my_index--; - if( my_item ) { - // Following test uses 2's-complement wizardry - if( (k& (k-2))==0 ) { - // k is a power of two that is at least k-2 - my_item= NULL; - } else { - --my_item; - } - } - return *this; - } - - //! Post increment - vector_iterator operator++(int) { - vector_iterator result = *this; - operator++(); - return result; - } - - //! Post decrement - vector_iterator operator--(int) { - vector_iterator result = *this; - operator--(); - return result; - } - - // STL support - - typedef ptrdiff_t difference_type; - typedef Value value_type; - typedef Value* pointer; - typedef Value& reference; - typedef std::random_access_iterator_tag iterator_category; - }; - - template - vector_iterator operator+( ptrdiff_t offset, const vector_iterator& v ) { - return vector_iterator( *v.my_vector, v.my_index+offset ); - } - - template - bool operator==( const vector_iterator& i, const vector_iterator& j ) { - return i.my_index==j.my_index && i.my_vector == j.my_vector; - } - - template - bool operator!=( const vector_iterator& i, const vector_iterator& j ) { - return !(i==j); - } - - template - bool operator<( const vector_iterator& i, const vector_iterator& j ) { - return i.my_index - bool operator>( const vector_iterator& i, const vector_iterator& j ) { - return j - bool operator>=( const vector_iterator& i, const vector_iterator& j ) { - return !(i - bool operator<=( const vector_iterator& i, const vector_iterator& j ) { - return !(j - ptrdiff_t operator-( const vector_iterator& i, const vector_iterator& j ) { - return ptrdiff_t(i.my_index)-ptrdiff_t(j.my_index); - } - - template - class allocator_base { - public: - typedef typename A::template - rebind::other allocator_type; - allocator_type my_allocator; - - allocator_base(const allocator_type &a = allocator_type() ) : my_allocator(a) {} - }; - -} // namespace internal -//! @endcond - -//! Concurrent vector container -/** concurrent_vector is a container having the following main properties: - - It provides random indexed access to its elements. The index of the first element is 0. - - It ensures safe concurrent growing its size (different threads can safely append new elements). - - Adding new elements does not invalidate existing iterators and does not change indices of existing items. - -@par Compatibility - The class meets all Container Requirements and Reversible Container Requirements from - C++ Standard (See ISO/IEC 14882:2003(E), clause 23.1). But it doesn't meet - Sequence Requirements due to absence of insert() and erase() methods. - -@par Exception Safety - Methods working with memory allocation and/or new elements construction can throw an - exception if allocator fails to allocate memory or element's default constructor throws one. - Concurrent vector's element of type T must conform to the following requirements: - - Throwing an exception is forbidden for destructor of T. - - Default constructor of T must not throw an exception OR its non-virtual destructor must safely work when its object memory is zero-initialized. - . - Otherwise, the program's behavior is undefined. -@par - If an exception happens inside growth or assignment operation, an instance of the vector becomes invalid unless it is stated otherwise in the method documentation. - Invalid state means: - - There are no guaranties that all items were initialized by a constructor. The rest of items is zero-filled, including item where exception happens. - - An invalid vector instance cannot be repaired; it is unable to grow anymore. - - Size and capacity reported by the vector are incorrect, and calculated as if the failed operation were successful. - - Attempt to access not allocated elements using operator[] or iterators results in access violation or segmentation fault exception, and in case of using at() method a C++ exception is thrown. - . - If a concurrent grow operation successfully completes, all the elements it has added to the vector will remain valid and accessible even if one of subsequent grow operations fails. - -@par Fragmentation - Unlike an STL vector, a concurrent_vector does not move existing elements if it needs - to allocate more memory. The container is divided into a series of contiguous arrays of - elements. The first reservation, growth, or assignment operation determines the size of - the first array. Using small number of elements as initial size incurs fragmentation that - may increase element access time. Internal layout can be optimized by method compact() that - merges several smaller arrays into one solid. - -@par Changes since TBB 2.1 - - Fixed guarantees of concurrent_vector::size() and grow_to_at_least() methods to assure elements are allocated. - - Methods end()/rbegin()/back() are partly thread-safe since they use size() to get the end of vector - - Added resize() methods (not thread-safe) - - Added cbegin/cend/crbegin/crend methods - - Changed return type of methods grow* and push_back to iterator - -@par Changes since TBB 2.0 - - Implemented exception-safety guaranties - - Added template argument for allocator - - Added allocator argument in constructors - - Faster index calculation - - First growth call specifies a number of segments to be merged in the first allocation. - - Fixed memory blow up for swarm of vector's instances of small size - - Added grow_by(size_type n, const_reference t) growth using copying constructor to init new items. - - Added STL-like constructors. - - Added operators ==, < and derivatives - - Added at() method, approved for using after an exception was thrown inside the vector - - Added get_allocator() method. - - Added assign() methods - - Added compact() method to defragment first segments - - Added swap() method - - range() defaults on grainsize = 1 supporting auto grainsize algorithms. - - @ingroup containers */ -template -class concurrent_vector: protected internal::allocator_base, - private internal::concurrent_vector_base { -private: - template - class generic_range_type: public blocked_range { - public: - typedef T value_type; - typedef T& reference; - typedef const T& const_reference; - typedef I iterator; - typedef ptrdiff_t difference_type; - generic_range_type( I begin_, I end_, size_t grainsize_ = 1) : blocked_range(begin_,end_,grainsize_) {} - template - generic_range_type( const generic_range_type& r) : blocked_range(r.begin(),r.end(),r.grainsize()) {} - generic_range_type( generic_range_type& r, split ) : blocked_range(r,split()) {} - }; - - template - friend class internal::vector_iterator; -public: - //------------------------------------------------------------------------ - // STL compatible types - //------------------------------------------------------------------------ - typedef internal::concurrent_vector_base_v3::size_type size_type; - typedef typename internal::allocator_base::allocator_type allocator_type; - - typedef T value_type; - typedef ptrdiff_t difference_type; - typedef T& reference; - typedef const T& const_reference; - typedef T *pointer; - typedef const T *const_pointer; - - typedef internal::vector_iterator iterator; - typedef internal::vector_iterator const_iterator; - -#if !defined(_MSC_VER) || _CPPLIB_VER>=300 - // Assume ISO standard definition of std::reverse_iterator - typedef std::reverse_iterator reverse_iterator; - typedef std::reverse_iterator const_reverse_iterator; -#else - // Use non-standard std::reverse_iterator - typedef std::reverse_iterator reverse_iterator; - typedef std::reverse_iterator const_reverse_iterator; -#endif /* defined(_MSC_VER) && (_MSC_VER<1300) */ - - //------------------------------------------------------------------------ - // Parallel algorithm support - //------------------------------------------------------------------------ - typedef generic_range_type range_type; - typedef generic_range_type const_range_type; - - //------------------------------------------------------------------------ - // STL compatible constructors & destructors - //------------------------------------------------------------------------ - - //! Construct empty vector. - explicit concurrent_vector(const allocator_type &a = allocator_type()) - : internal::allocator_base(a), internal::concurrent_vector_base() - { - vector_allocator_ptr = &internal_allocator; - } - - //! Copying constructor - concurrent_vector( const concurrent_vector& vector, const allocator_type& a = allocator_type() ) - : internal::allocator_base(a), internal::concurrent_vector_base() - { - vector_allocator_ptr = &internal_allocator; - __TBB_TRY { - internal_copy(vector, sizeof(T), ©_array); - } __TBB_CATCH(...) { - segment_t *table = my_segment; - internal_free_segments( reinterpret_cast(table), internal_clear(&destroy_array), my_first_block ); - __TBB_RETHROW(); - } - } - - //! Copying constructor for vector with different allocator type - template - concurrent_vector( const concurrent_vector& vector, const allocator_type& a = allocator_type() ) - : internal::allocator_base(a), internal::concurrent_vector_base() - { - vector_allocator_ptr = &internal_allocator; - __TBB_TRY { - internal_copy(vector.internal_vector_base(), sizeof(T), ©_array); - } __TBB_CATCH(...) { - segment_t *table = my_segment; - internal_free_segments( reinterpret_cast(table), internal_clear(&destroy_array), my_first_block ); - __TBB_RETHROW(); - } - } - - //! Construction with initial size specified by argument n - explicit concurrent_vector(size_type n) - { - vector_allocator_ptr = &internal_allocator; - __TBB_TRY { - internal_resize( n, sizeof(T), max_size(), NULL, &destroy_array, &initialize_array ); - } __TBB_CATCH(...) { - segment_t *table = my_segment; - internal_free_segments( reinterpret_cast(table), internal_clear(&destroy_array), my_first_block ); - __TBB_RETHROW(); - } - } - - //! Construction with initial size specified by argument n, initialization by copying of t, and given allocator instance - concurrent_vector(size_type n, const_reference t, const allocator_type& a = allocator_type()) - : internal::allocator_base(a) - { - vector_allocator_ptr = &internal_allocator; - __TBB_TRY { - internal_resize( n, sizeof(T), max_size(), static_cast(&t), &destroy_array, &initialize_array_by ); - } __TBB_CATCH(...) { - segment_t *table = my_segment; - internal_free_segments( reinterpret_cast(table), internal_clear(&destroy_array), my_first_block ); - __TBB_RETHROW(); - } - } - - //! Construction with copying iteration range and given allocator instance - template - concurrent_vector(I first, I last, const allocator_type &a = allocator_type()) - : internal::allocator_base(a) - { - vector_allocator_ptr = &internal_allocator; - __TBB_TRY { - internal_assign_range(first, last, static_cast::is_integer> *>(0) ); - } __TBB_CATCH(...) { - segment_t *table = my_segment; - internal_free_segments( reinterpret_cast(table), internal_clear(&destroy_array), my_first_block ); - __TBB_RETHROW(); - } - } - - //! Assignment - concurrent_vector& operator=( const concurrent_vector& vector ) { - if( this != &vector ) - internal_assign(vector, sizeof(T), &destroy_array, &assign_array, ©_array); - return *this; - } - - //! Assignment for vector with different allocator type - template - concurrent_vector& operator=( const concurrent_vector& vector ) { - if( static_cast( this ) != static_cast( &vector ) ) - internal_assign(vector.internal_vector_base(), - sizeof(T), &destroy_array, &assign_array, ©_array); - return *this; - } - - //------------------------------------------------------------------------ - // Concurrent operations - //------------------------------------------------------------------------ - //! Grow by "delta" elements. -#if TBB_DEPRECATED - /** Returns old size. */ - size_type grow_by( size_type delta ) { - return delta ? internal_grow_by( delta, sizeof(T), &initialize_array, NULL ) : my_early_size; - } -#else - /** Returns iterator pointing to the first new element. */ - iterator grow_by( size_type delta ) { - return iterator(*this, delta ? internal_grow_by( delta, sizeof(T), &initialize_array, NULL ) : my_early_size); - } -#endif - - //! Grow by "delta" elements using copying constuctor. -#if TBB_DEPRECATED - /** Returns old size. */ - size_type grow_by( size_type delta, const_reference t ) { - return delta ? internal_grow_by( delta, sizeof(T), &initialize_array_by, static_cast(&t) ) : my_early_size; - } -#else - /** Returns iterator pointing to the first new element. */ - iterator grow_by( size_type delta, const_reference t ) { - return iterator(*this, delta ? internal_grow_by( delta, sizeof(T), &initialize_array_by, static_cast(&t) ) : my_early_size); - } -#endif - - //! Append minimal sequence of elements such that size()>=n. -#if TBB_DEPRECATED - /** The new elements are default constructed. Blocks until all elements in range [0..n) are allocated. - May return while other elements are being constructed by other threads. */ - void grow_to_at_least( size_type n ) { - if( n ) internal_grow_to_at_least_with_result( n, sizeof(T), &initialize_array, NULL ); - }; -#else - /** The new elements are default constructed. Blocks until all elements in range [0..n) are allocated. - May return while other elements are being constructed by other threads. - Returns iterator that points to beginning of appended sequence. - If no elements were appended, returns iterator pointing to nth element. */ - iterator grow_to_at_least( size_type n ) { - size_type m=0; - if( n ) { - m = internal_grow_to_at_least_with_result( n, sizeof(T), &initialize_array, NULL ); - if( m>n ) m=n; - } - return iterator(*this, m); - }; -#endif - - //! Push item -#if TBB_DEPRECATED - size_type push_back( const_reference item ) -#else - /** Returns iterator pointing to the new element. */ - iterator push_back( const_reference item ) -#endif - { - size_type k; - void *ptr = internal_push_back(sizeof(T),k); - internal_loop_guide loop(1, ptr); - loop.init(&item); -#if TBB_DEPRECATED - return k; -#else - return iterator(*this, k, ptr); -#endif - } - - //! Get reference to element at given index. - /** This method is thread-safe for concurrent reads, and also while growing the vector, - as long as the calling thread has checked that index<size(). */ - reference operator[]( size_type index ) { - return internal_subscript(index); - } - - //! Get const reference to element at given index. - const_reference operator[]( size_type index ) const { - return internal_subscript(index); - } - - //! Get reference to element at given index. Throws exceptions on errors. - reference at( size_type index ) { - return internal_subscript_with_exceptions(index); - } - - //! Get const reference to element at given index. Throws exceptions on errors. - const_reference at( size_type index ) const { - return internal_subscript_with_exceptions(index); - } - - //! Get range for iterating with parallel algorithms - range_type range( size_t grainsize = 1) { - return range_type( begin(), end(), grainsize ); - } - - //! Get const range for iterating with parallel algorithms - const_range_type range( size_t grainsize = 1 ) const { - return const_range_type( begin(), end(), grainsize ); - } - //------------------------------------------------------------------------ - // Capacity - //------------------------------------------------------------------------ - //! Return size of vector. It may include elements under construction - size_type size() const { - size_type sz = my_early_size, cp = internal_capacity(); - return cp < sz ? cp : sz; - } - - //! Return true if vector is not empty or has elements under construction at least. - bool empty() const {return !my_early_size;} - - //! Maximum size to which array can grow without allocating more memory. Concurrent allocations are not included in the value. - size_type capacity() const {return internal_capacity();} - - //! Allocate enough space to grow to size n without having to allocate more memory later. - /** Like most of the methods provided for STL compatibility, this method is *not* thread safe. - The capacity afterwards may be bigger than the requested reservation. */ - void reserve( size_type n ) { - if( n ) - internal_reserve(n, sizeof(T), max_size()); - } - - //! Resize the vector. Not thread-safe. - void resize( size_type n ) { - internal_resize( n, sizeof(T), max_size(), NULL, &destroy_array, &initialize_array ); - } - - //! Resize the vector, copy t for new elements. Not thread-safe. - void resize( size_type n, const_reference t ) { - internal_resize( n, sizeof(T), max_size(), static_cast(&t), &destroy_array, &initialize_array_by ); - } - -#if TBB_DEPRECATED - //! An alias for shrink_to_fit() - void compact() {shrink_to_fit();} -#endif /* TBB_DEPRECATED */ - - //! Optimize memory usage and fragmentation. - void shrink_to_fit(); - - //! Upper bound on argument to reserve. - size_type max_size() const {return (~size_type(0))/sizeof(T);} - - //------------------------------------------------------------------------ - // STL support - //------------------------------------------------------------------------ - - //! start iterator - iterator begin() {return iterator(*this,0);} - //! end iterator - iterator end() {return iterator(*this,size());} - //! start const iterator - const_iterator begin() const {return const_iterator(*this,0);} - //! end const iterator - const_iterator end() const {return const_iterator(*this,size());} - //! start const iterator - const_iterator cbegin() const {return const_iterator(*this,0);} - //! end const iterator - const_iterator cend() const {return const_iterator(*this,size());} - //! reverse start iterator - reverse_iterator rbegin() {return reverse_iterator(end());} - //! reverse end iterator - reverse_iterator rend() {return reverse_iterator(begin());} - //! reverse start const iterator - const_reverse_iterator rbegin() const {return const_reverse_iterator(end());} - //! reverse end const iterator - const_reverse_iterator rend() const {return const_reverse_iterator(begin());} - //! reverse start const iterator - const_reverse_iterator crbegin() const {return const_reverse_iterator(end());} - //! reverse end const iterator - const_reverse_iterator crend() const {return const_reverse_iterator(begin());} - //! the first item - reference front() { - __TBB_ASSERT( size()>0, NULL); - return static_cast(my_segment[0].array)[0]; - } - //! the first item const - const_reference front() const { - __TBB_ASSERT( size()>0, NULL); - return static_cast(my_segment[0].array)[0]; - } - //! the last item - reference back() { - __TBB_ASSERT( size()>0, NULL); - return internal_subscript( size()-1 ); - } - //! the last item const - const_reference back() const { - __TBB_ASSERT( size()>0, NULL); - return internal_subscript( size()-1 ); - } - //! return allocator object - allocator_type get_allocator() const { return this->my_allocator; } - - //! assign n items by copying t item - void assign(size_type n, const_reference t) { - clear(); - internal_resize( n, sizeof(T), max_size(), static_cast(&t), &destroy_array, &initialize_array_by ); - } - - //! assign range [first, last) - template - void assign(I first, I last) { - clear(); internal_assign_range( first, last, static_cast::is_integer> *>(0) ); - } - - //! swap two instances - void swap(concurrent_vector &vector) { - if( this != &vector ) { - concurrent_vector_base_v3::internal_swap(static_cast(vector)); - std::swap(this->my_allocator, vector.my_allocator); - } - } - - //! Clear container while keeping memory allocated. - /** To free up the memory, use in conjunction with method compact(). Not thread safe **/ - void clear() { - internal_clear(&destroy_array); - } - - //! Clear and destroy vector. - ~concurrent_vector() { - segment_t *table = my_segment; - internal_free_segments( reinterpret_cast(table), internal_clear(&destroy_array), my_first_block ); - // base class destructor call should be then - } - - const internal::concurrent_vector_base_v3 &internal_vector_base() const { return *this; } -private: - //! Allocate k items - static void *internal_allocator(internal::concurrent_vector_base_v3 &vb, size_t k) { - return static_cast&>(vb).my_allocator.allocate(k); - } - //! Free k segments from table - void internal_free_segments(void *table[], segment_index_t k, segment_index_t first_block); - - //! Get reference to element at given index. - T& internal_subscript( size_type index ) const; - - //! Get reference to element at given index with errors checks - T& internal_subscript_with_exceptions( size_type index ) const; - - //! assign n items by copying t - void internal_assign_n(size_type n, const_pointer p) { - internal_resize( n, sizeof(T), max_size(), static_cast(p), &destroy_array, p? &initialize_array_by : &initialize_array ); - } - - //! helper class - template class is_integer_tag; - - //! assign integer items by copying when arguments are treated as iterators. See C++ Standard 2003 23.1.1p9 - template - void internal_assign_range(I first, I last, is_integer_tag *) { - internal_assign_n(static_cast(first), &static_cast(last)); - } - //! inline proxy assign by iterators - template - void internal_assign_range(I first, I last, is_integer_tag *) { - internal_assign_iterators(first, last); - } - //! assign by iterators - template - void internal_assign_iterators(I first, I last); - - //! Construct n instances of T, starting at "begin". - static void __TBB_EXPORTED_FUNC initialize_array( void* begin, const void*, size_type n ); - - //! Construct n instances of T, starting at "begin". - static void __TBB_EXPORTED_FUNC initialize_array_by( void* begin, const void* src, size_type n ); - - //! Construct n instances of T, starting at "begin". - static void __TBB_EXPORTED_FUNC copy_array( void* dst, const void* src, size_type n ); - - //! Assign n instances of T, starting at "begin". - static void __TBB_EXPORTED_FUNC assign_array( void* dst, const void* src, size_type n ); - - //! Destroy n instances of T, starting at "begin". - static void __TBB_EXPORTED_FUNC destroy_array( void* begin, size_type n ); - - //! Exception-aware helper class for filling a segment by exception-danger operators of user class - class internal_loop_guide : internal::no_copy { - public: - const pointer array; - const size_type n; - size_type i; - internal_loop_guide(size_type ntrials, void *ptr) - : array(static_cast(ptr)), n(ntrials), i(0) {} - void init() { for(; i < n; ++i) new( &array[i] ) T(); } - void init(const void *src) { for(; i < n; ++i) new( &array[i] ) T(*static_cast(src)); } - void copy(const void *src) { for(; i < n; ++i) new( &array[i] ) T(static_cast(src)[i]); } - void assign(const void *src) { for(; i < n; ++i) array[i] = static_cast(src)[i]; } - template void iterate(I &src) { for(; i < n; ++i, ++src) new( &array[i] ) T( *src ); } - ~internal_loop_guide() { - if(i < n) // if exception raised, do zerroing on the rest of items - std::memset(array+i, 0, (n-i)*sizeof(value_type)); - } - }; -}; - -template -void concurrent_vector::shrink_to_fit() { - internal_segments_table old; - __TBB_TRY { - if( internal_compact( sizeof(T), &old, &destroy_array, ©_array ) ) - internal_free_segments( old.table, pointers_per_long_table, old.first_block ); // free joined and unnecessary segments - } __TBB_CATCH(...) { - if( old.first_block ) // free segment allocated for compacting. Only for support of exceptions in ctor of user T[ype] - internal_free_segments( old.table, 1, old.first_block ); - __TBB_RETHROW(); - } -} - -template -void concurrent_vector::internal_free_segments(void *table[], segment_index_t k, segment_index_t first_block) { - // Free the arrays - while( k > first_block ) { - --k; - T* array = static_cast(table[k]); - table[k] = NULL; - if( array > internal::vector_allocation_error_flag ) // check for correct segment pointer - this->my_allocator.deallocate( array, segment_size(k) ); - } - T* array = static_cast(table[0]); - if( array > internal::vector_allocation_error_flag ) { - __TBB_ASSERT( first_block > 0, NULL ); - while(k > 0) table[--k] = NULL; - this->my_allocator.deallocate( array, segment_size(first_block) ); - } -} - -template -T& concurrent_vector::internal_subscript( size_type index ) const { - __TBB_ASSERT( index < my_early_size, "index out of bounds" ); - size_type j = index; - segment_index_t k = segment_base_index_of( j ); - __TBB_ASSERT( (segment_t*)my_segment != my_storage || k < pointers_per_short_table, "index is being allocated" ); - // no need in __TBB_load_with_acquire since thread works in own space or gets -#if TBB_USE_THREADING_TOOLS - T* array = static_cast( tbb::internal::itt_load_pointer_v3(&my_segment[k].array)); -#else - T* array = static_cast(my_segment[k].array); -#endif /* TBB_USE_THREADING_TOOLS */ - __TBB_ASSERT( array != internal::vector_allocation_error_flag, "the instance is broken by bad allocation. Use at() instead" ); - __TBB_ASSERT( array, "index is being allocated" ); - return array[j]; -} - -template -T& concurrent_vector::internal_subscript_with_exceptions( size_type index ) const { - if( index >= my_early_size ) - internal::throw_exception(internal::eid_out_of_range); // throw std::out_of_range - size_type j = index; - segment_index_t k = segment_base_index_of( j ); - if( (segment_t*)my_segment == my_storage && k >= pointers_per_short_table ) - internal::throw_exception(internal::eid_segment_range_error); // throw std::range_error - void *array = my_segment[k].array; // no need in __TBB_load_with_acquire - if( array <= internal::vector_allocation_error_flag ) // check for correct segment pointer - internal::throw_exception(internal::eid_index_range_error); // throw std::range_error - return static_cast(array)[j]; -} - -template template -void concurrent_vector::internal_assign_iterators(I first, I last) { - __TBB_ASSERT(my_early_size == 0, NULL); - size_type n = std::distance(first, last); - if( !n ) return; - internal_reserve(n, sizeof(T), max_size()); - my_early_size = n; - segment_index_t k = 0; - size_type sz = segment_size( my_first_block ); - while( sz < n ) { - internal_loop_guide loop(sz, my_segment[k].array); - loop.iterate(first); - n -= sz; - if( !k ) k = my_first_block; - else { ++k; sz <<= 1; } - } - internal_loop_guide loop(n, my_segment[k].array); - loop.iterate(first); -} - -template -void concurrent_vector::initialize_array( void* begin, const void *, size_type n ) { - internal_loop_guide loop(n, begin); loop.init(); -} - -template -void concurrent_vector::initialize_array_by( void* begin, const void *src, size_type n ) { - internal_loop_guide loop(n, begin); loop.init(src); -} - -template -void concurrent_vector::copy_array( void* dst, const void* src, size_type n ) { - internal_loop_guide loop(n, dst); loop.copy(src); -} - -template -void concurrent_vector::assign_array( void* dst, const void* src, size_type n ) { - internal_loop_guide loop(n, dst); loop.assign(src); -} - -#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) - // Workaround for overzealous compiler warning - #pragma warning (push) - #pragma warning (disable: 4189) -#endif -template -void concurrent_vector::destroy_array( void* begin, size_type n ) { - T* array = static_cast(begin); - for( size_type j=n; j>0; --j ) - array[j-1].~T(); // destructors are supposed to not throw any exceptions -} -#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) - #pragma warning (pop) -#endif // warning 4189 is back - -// concurrent_vector's template functions -template -inline bool operator==(const concurrent_vector &a, const concurrent_vector &b) { - // Simply: return a.size() == b.size() && std::equal(a.begin(), a.end(), b.begin()); - if(a.size() != b.size()) return false; - typename concurrent_vector::const_iterator i(a.begin()); - typename concurrent_vector::const_iterator j(b.begin()); - for(; i != a.end(); ++i, ++j) - if( !(*i == *j) ) return false; - return true; -} - -template -inline bool operator!=(const concurrent_vector &a, const concurrent_vector &b) -{ return !(a == b); } - -template -inline bool operator<(const concurrent_vector &a, const concurrent_vector &b) -{ return (std::lexicographical_compare(a.begin(), a.end(), b.begin(), b.end())); } - -template -inline bool operator>(const concurrent_vector &a, const concurrent_vector &b) -{ return b < a; } - -template -inline bool operator<=(const concurrent_vector &a, const concurrent_vector &b) -{ return !(b < a); } - -template -inline bool operator>=(const concurrent_vector &a, const concurrent_vector &b) -{ return !(a < b); } - -template -inline void swap(concurrent_vector &a, concurrent_vector &b) -{ a.swap( b ); } - -} // namespace tbb - -#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) && defined(_Wp64) - #pragma warning (pop) -#endif // warning 4267 is back - -#endif /* __TBB_concurrent_vector_H */ diff --git a/deal.II/bundled/tbb30_104oss/include/tbb/critical_section.h b/deal.II/bundled/tbb30_104oss/include/tbb/critical_section.h deleted file mode 100644 index a3e435a0bc..0000000000 --- a/deal.II/bundled/tbb30_104oss/include/tbb/critical_section.h +++ /dev/null @@ -1,141 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef _TBB_CRITICAL_SECTION_H_ -#define _TBB_CRITICAL_SECTION_H_ - -#if _WIN32||_WIN64 -#include "machine/windows_api.h" -#else -#include -#include -#endif // _WIN32||WIN64 - -#include "tbb_stddef.h" -#include "tbb_thread.h" -#include "tbb_exception.h" - -#include "tbb_profiling.h" - -namespace tbb { - - namespace internal { -class critical_section_v4 : internal::no_copy { -#if _WIN32||_WIN64 - CRITICAL_SECTION my_impl; -#else - pthread_mutex_t my_impl; -#endif - tbb_thread::id my_tid; -public: - - void __TBB_EXPORTED_METHOD internal_construct(); - - critical_section_v4() { -#if _WIN32||_WIN64 - InitializeCriticalSection(&my_impl); -#else - pthread_mutex_init(&my_impl, NULL); -#endif - internal_construct(); - } - - ~critical_section_v4() { - __TBB_ASSERT(my_tid == tbb_thread::id(), "Destroying a still-held critical section"); -#if _WIN32||_WIN64 - DeleteCriticalSection(&my_impl); -#else - pthread_mutex_destroy(&my_impl); -#endif - } - - class scoped_lock : internal::no_copy { - private: - critical_section_v4 &my_crit; - public: - scoped_lock( critical_section_v4& lock_me) :my_crit(lock_me) { - my_crit.lock(); - } - - ~scoped_lock() { - my_crit.unlock(); - } - }; - - void lock() { - tbb_thread::id local_tid = this_tbb_thread::get_id(); - if(local_tid == my_tid) throw_exception( eid_improper_lock ); -#if _WIN32||_WIN64 - EnterCriticalSection( &my_impl ); -#else - int rval = pthread_mutex_lock(&my_impl); - __TBB_ASSERT_EX(!rval, "critical_section::lock: pthread_mutex_lock failed"); -#endif - __TBB_ASSERT(my_tid == tbb_thread::id(), NULL); - my_tid = local_tid; - } - - bool try_lock() { - bool gotlock; - tbb_thread::id local_tid = this_tbb_thread::get_id(); - if(local_tid == my_tid) return false; -#if _WIN32||_WIN64 - gotlock = TryEnterCriticalSection( &my_impl ) != 0; -#else - int rval = pthread_mutex_trylock(&my_impl); - // valid returns are 0 (locked) and [EBUSY] - __TBB_ASSERT(rval == 0 || rval == EBUSY, "critical_section::trylock: pthread_mutex_trylock failed"); - gotlock = rval == 0; -#endif - if(gotlock) { - my_tid = local_tid; - } - return gotlock; - } - - void unlock() { - __TBB_ASSERT(this_tbb_thread::get_id() == my_tid, "thread unlocking critical_section is not thread that locked it"); - my_tid = tbb_thread::id(); -#if _WIN32||_WIN64 - LeaveCriticalSection( &my_impl ); -#else - int rval = pthread_mutex_unlock(&my_impl); - __TBB_ASSERT_EX(!rval, "critical_section::unlock: pthread_mutex_unlock failed"); -#endif - } - - static const bool is_rw_mutex = false; - static const bool is_recursive_mutex = false; - static const bool is_fair_mutex = true; -}; // critical_section_v4 -} // namespace internal -typedef internal::critical_section_v4 critical_section; - -__TBB_DEFINE_PROFILING_SET_NAME(critical_section) -} // namespace tbb -#endif // _TBB_CRITICAL_SECTION_H_ diff --git a/deal.II/bundled/tbb30_104oss/include/tbb/enumerable_thread_specific.h b/deal.II/bundled/tbb30_104oss/include/tbb/enumerable_thread_specific.h deleted file mode 100644 index c475e17595..0000000000 --- a/deal.II/bundled/tbb30_104oss/include/tbb/enumerable_thread_specific.h +++ /dev/null @@ -1,999 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_enumerable_thread_specific_H -#define __TBB_enumerable_thread_specific_H - -#include "concurrent_vector.h" -#include "tbb_thread.h" -#include "cache_aligned_allocator.h" -#include "aligned_space.h" -#include // for memcpy - -#if _WIN32||_WIN64 -#include "machine/windows_api.h" -#else -#include -#endif - -namespace tbb { - -//! enum for selecting between single key and key-per-instance versions -enum ets_key_usage_type { ets_key_per_instance, ets_no_key }; - -namespace interface6 { - - //! @cond - namespace internal { - - template - class ets_base: tbb::internal::no_copy { - protected: -#if _WIN32||_WIN64 - typedef DWORD key_type; -#else - typedef pthread_t key_type; -#endif -#if __TBB_GCC_3_3_PROTECTED_BROKEN - public: -#endif - struct slot; - - struct array { - array* next; - size_t lg_size; - slot& at( size_t k ) { - return ((slot*)(void*)(this+1))[k]; - } - size_t size() const {return (size_t)1<>(8*sizeof(size_t)-lg_size); - } - }; - struct slot { - key_type key; - void* ptr; - bool empty() const {return !key;} - bool match( key_type k ) const {return key==k;} - bool claim( key_type k ) { - __TBB_ASSERT(sizeof(tbb::atomic)==sizeof(key_type), NULL); - return tbb::internal::punned_cast*>(&key)->compare_and_swap(k,0)==0; - } - }; -#if __TBB_GCC_3_3_PROTECTED_BROKEN - protected: -#endif - - static key_type key_of_current_thread() { - tbb::tbb_thread::id id = tbb::this_tbb_thread::get_id(); - key_type k; - memcpy( &k, &id, sizeof(k) ); - return k; - } - - //! Root of linked list of arrays of decreasing size. - /** NULL if and only if my_count==0. - Each array in the list is half the size of its predecessor. */ - atomic my_root; - atomic my_count; - virtual void* create_local() = 0; - virtual void* create_array(size_t _size) = 0; // _size in bytes - virtual void free_array(void* ptr, size_t _size) = 0; // _size in bytes - array* allocate( size_t lg_size ) { - size_t n = 1<(create_array( sizeof(array)+n*sizeof(slot) )); - a->lg_size = lg_size; - std::memset( a+1, 0, n*sizeof(slot) ); - return a; - } - void free(array* a) { - size_t n = 1<<(a->lg_size); - free_array( (void *)a, size_t(sizeof(array)+n*sizeof(slot)) ); - } - static size_t hash( key_type k ) { - // Multiplicative hashing. Client should use *upper* bits. - // casts required for Mac gcc4.* compiler -#if __TBB_WORDSIZE == 4 - return uintptr_t(k)*0x9E3779B9; -#else - return uintptr_t(k)*0x9E3779B97F4A7C15; -#endif - } - - ets_base() {my_root=NULL; my_count=0;} - virtual ~ets_base(); // g++ complains if this is not virtual... - void* table_lookup( bool& exists ); - void table_clear(); - slot& table_find( key_type k ) { - size_t h = hash(k); - array* r = my_root; - size_t mask = r->mask(); - for(size_t i = r->start(h);;i=(i+1)&mask) { - slot& s = r->at(i); - if( s.empty() || s.match(k) ) - return s; - } - } - void table_reserve_for_copy( const ets_base& other ) { - __TBB_ASSERT(!my_root,NULL); - __TBB_ASSERT(!my_count,NULL); - if( other.my_root ) { - array* a = allocate(other.my_root->lg_size); - a->next = NULL; - my_root = a; - my_count = other.my_count; - } - } - }; - - template - ets_base::~ets_base() { - __TBB_ASSERT(!my_root, NULL); - } - - template - void ets_base::table_clear() { - while( array* r = my_root ) { - my_root = r->next; - free(r); - } - my_count = 0; - } - - template - void* ets_base::table_lookup( bool& exists ) { - const key_type k = key_of_current_thread(); - - __TBB_ASSERT(k!=0,NULL); - void* found; - size_t h = hash(k); - for( array* r=my_root; r; r=r->next ) { - size_t mask=r->mask(); - for(size_t i = r->start(h); ;i=(i+1)&mask) { - slot& s = r->at(i); - if( s.empty() ) break; - if( s.match(k) ) { - if( r==my_root ) { - // Success at top level - exists = true; - return s.ptr; - } else { - // Success at some other level. Need to insert at top level. - exists = true; - found = s.ptr; - goto insert; - } - } - } - } - // Key does not yet exist - exists = false; - found = create_local(); - { - size_t c = ++my_count; - array* r = my_root; - if( !r || c>r->size()/2 ) { - size_t s = r ? r->lg_size : 2; - while( c>size_t(1)<<(s-1) ) ++s; - array* a = allocate(s); - for(;;) { - a->next = my_root; - array* new_r = my_root.compare_and_swap(a,r); - if( new_r==r ) break; - if( new_r->lg_size>=s ) { - // Another thread inserted an equal or bigger array, so our array is superfluous. - free(a); - break; - } - r = new_r; - } - } - } - insert: - // Guaranteed to be room for it, and it is not present, so search for empty slot and grab it. - array* ir = my_root; - size_t mask = ir->mask(); - for(size_t i = ir->start(h);;i=(i+1)&mask) { - slot& s = ir->at(i); - if( s.empty() ) { - if( s.claim(k) ) { - s.ptr = found; - return found; - } - } - } - } - - //! Specialization that exploits native TLS - template <> - class ets_base: protected ets_base { - typedef ets_base super; -#if _WIN32||_WIN64 - typedef DWORD tls_key_t; - void create_key() { my_key = TlsAlloc(); } - void destroy_key() { TlsFree(my_key); } - void set_tls(void * value) { TlsSetValue(my_key, (LPVOID)value); } - void* get_tls() { return (void *)TlsGetValue(my_key); } -#else - typedef pthread_key_t tls_key_t; - void create_key() { pthread_key_create(&my_key, NULL); } - void destroy_key() { pthread_key_delete(my_key); } - void set_tls( void * value ) const { pthread_setspecific(my_key, value); } - void* get_tls() const { return pthread_getspecific(my_key); } -#endif - tls_key_t my_key; - virtual void* create_local() = 0; - virtual void* create_array(size_t _size) = 0; // _size in bytes - virtual void free_array(void* ptr, size_t _size) = 0; // size in bytes - public: - ets_base() {create_key();} - ~ets_base() {destroy_key();} - void* table_lookup( bool& exists ) { - void* found = get_tls(); - if( found ) { - exists=true; - } else { - found = super::table_lookup(exists); - set_tls(found); - } - return found; - } - void table_clear() { - destroy_key(); - create_key(); - super::table_clear(); - } - }; - - //! Random access iterator for traversing the thread local copies. - template< typename Container, typename Value > - class enumerable_thread_specific_iterator -#if defined(_WIN64) && defined(_MSC_VER) - // Ensure that Microsoft's internal template function _Val_type works correctly. - : public std::iterator -#endif /* defined(_WIN64) && defined(_MSC_VER) */ - { - //! current position in the concurrent_vector - - Container *my_container; - typename Container::size_type my_index; - mutable Value *my_value; - - template - friend enumerable_thread_specific_iterator operator+( ptrdiff_t offset, - const enumerable_thread_specific_iterator& v ); - - template - friend bool operator==( const enumerable_thread_specific_iterator& i, - const enumerable_thread_specific_iterator& j ); - - template - friend bool operator<( const enumerable_thread_specific_iterator& i, - const enumerable_thread_specific_iterator& j ); - - template - friend ptrdiff_t operator-( const enumerable_thread_specific_iterator& i, const enumerable_thread_specific_iterator& j ); - - template - friend class enumerable_thread_specific_iterator; - - public: - - enumerable_thread_specific_iterator( const Container &container, typename Container::size_type index ) : - my_container(&const_cast(container)), my_index(index), my_value(NULL) {} - - //! Default constructor - enumerable_thread_specific_iterator() : my_container(NULL), my_index(0), my_value(NULL) {} - - template - enumerable_thread_specific_iterator( const enumerable_thread_specific_iterator& other ) : - my_container( other.my_container ), my_index( other.my_index), my_value( const_cast(other.my_value) ) {} - - enumerable_thread_specific_iterator operator+( ptrdiff_t offset ) const { - return enumerable_thread_specific_iterator(*my_container, my_index + offset); - } - - enumerable_thread_specific_iterator &operator+=( ptrdiff_t offset ) { - my_index += offset; - my_value = NULL; - return *this; - } - - enumerable_thread_specific_iterator operator-( ptrdiff_t offset ) const { - return enumerable_thread_specific_iterator( *my_container, my_index-offset ); - } - - enumerable_thread_specific_iterator &operator-=( ptrdiff_t offset ) { - my_index -= offset; - my_value = NULL; - return *this; - } - - Value& operator*() const { - Value* value = my_value; - if( !value ) { - value = my_value = reinterpret_cast(&(*my_container)[my_index].value); - } - __TBB_ASSERT( value==reinterpret_cast(&(*my_container)[my_index].value), "corrupt cache" ); - return *value; - } - - Value& operator[]( ptrdiff_t k ) const { - return (*my_container)[my_index + k].value; - } - - Value* operator->() const {return &operator*();} - - enumerable_thread_specific_iterator& operator++() { - ++my_index; - my_value = NULL; - return *this; - } - - enumerable_thread_specific_iterator& operator--() { - --my_index; - my_value = NULL; - return *this; - } - - //! Post increment - enumerable_thread_specific_iterator operator++(int) { - enumerable_thread_specific_iterator result = *this; - ++my_index; - my_value = NULL; - return result; - } - - //! Post decrement - enumerable_thread_specific_iterator operator--(int) { - enumerable_thread_specific_iterator result = *this; - --my_index; - my_value = NULL; - return result; - } - - // STL support - typedef ptrdiff_t difference_type; - typedef Value value_type; - typedef Value* pointer; - typedef Value& reference; - typedef std::random_access_iterator_tag iterator_category; - }; - - template - enumerable_thread_specific_iterator operator+( ptrdiff_t offset, - const enumerable_thread_specific_iterator& v ) { - return enumerable_thread_specific_iterator( v.my_container, v.my_index + offset ); - } - - template - bool operator==( const enumerable_thread_specific_iterator& i, - const enumerable_thread_specific_iterator& j ) { - return i.my_index==j.my_index && i.my_container == j.my_container; - } - - template - bool operator!=( const enumerable_thread_specific_iterator& i, - const enumerable_thread_specific_iterator& j ) { - return !(i==j); - } - - template - bool operator<( const enumerable_thread_specific_iterator& i, - const enumerable_thread_specific_iterator& j ) { - return i.my_index - bool operator>( const enumerable_thread_specific_iterator& i, - const enumerable_thread_specific_iterator& j ) { - return j - bool operator>=( const enumerable_thread_specific_iterator& i, - const enumerable_thread_specific_iterator& j ) { - return !(i - bool operator<=( const enumerable_thread_specific_iterator& i, - const enumerable_thread_specific_iterator& j ) { - return !(j - ptrdiff_t operator-( const enumerable_thread_specific_iterator& i, - const enumerable_thread_specific_iterator& j ) { - return i.my_index-j.my_index; - } - - template - class segmented_iterator -#if defined(_WIN64) && defined(_MSC_VER) - : public std::iterator -#endif - { - template - friend bool operator==(const segmented_iterator& i, const segmented_iterator& j); - - template - friend bool operator!=(const segmented_iterator& i, const segmented_iterator& j); - - template - friend class segmented_iterator; - - public: - - segmented_iterator() {my_segcont = NULL;} - - segmented_iterator( const SegmentedContainer& _segmented_container ) : - my_segcont(const_cast(&_segmented_container)), - outer_iter(my_segcont->end()) { } - - ~segmented_iterator() {} - - typedef typename SegmentedContainer::iterator outer_iterator; - typedef typename SegmentedContainer::value_type InnerContainer; - typedef typename InnerContainer::iterator inner_iterator; - - // STL support - typedef ptrdiff_t difference_type; - typedef Value value_type; - typedef typename SegmentedContainer::size_type size_type; - typedef Value* pointer; - typedef Value& reference; - typedef std::input_iterator_tag iterator_category; - - // Copy Constructor - template - segmented_iterator(const segmented_iterator& other) : - my_segcont(other.my_segcont), - outer_iter(other.outer_iter), - // can we assign a default-constructed iterator to inner if we're at the end? - inner_iter(other.inner_iter) - {} - - // assignment - template - segmented_iterator& operator=( const segmented_iterator& other) { - if(this != &other) { - my_segcont = other.my_segcont; - outer_iter = other.outer_iter; - if(outer_iter != my_segcont->end()) inner_iter = other.inner_iter; - } - return *this; - } - - // allow assignment of outer iterator to segmented iterator. Once it is - // assigned, move forward until a non-empty inner container is found or - // the end of the outer container is reached. - segmented_iterator& operator=(const outer_iterator& new_outer_iter) { - __TBB_ASSERT(my_segcont != NULL, NULL); - // check that this iterator points to something inside the segmented container - for(outer_iter = new_outer_iter ;outer_iter!=my_segcont->end(); ++outer_iter) { - if( !outer_iter->empty() ) { - inner_iter = outer_iter->begin(); - break; - } - } - return *this; - } - - // pre-increment - segmented_iterator& operator++() { - advance_me(); - return *this; - } - - // post-increment - segmented_iterator operator++(int) { - segmented_iterator tmp = *this; - operator++(); - return tmp; - } - - bool operator==(const outer_iterator& other_outer) const { - __TBB_ASSERT(my_segcont != NULL, NULL); - return (outer_iter == other_outer && - (outer_iter == my_segcont->end() || inner_iter == outer_iter->begin())); - } - - bool operator!=(const outer_iterator& other_outer) const { - return !operator==(other_outer); - - } - - // (i)* RHS - reference operator*() const { - __TBB_ASSERT(my_segcont != NULL, NULL); - __TBB_ASSERT(outer_iter != my_segcont->end(), "Dereferencing a pointer at end of container"); - __TBB_ASSERT(inner_iter != outer_iter->end(), NULL); // should never happen - return *inner_iter; - } - - // i-> - pointer operator->() const { return &operator*();} - - private: - SegmentedContainer* my_segcont; - outer_iterator outer_iter; - inner_iterator inner_iter; - - void advance_me() { - __TBB_ASSERT(my_segcont != NULL, NULL); - __TBB_ASSERT(outer_iter != my_segcont->end(), NULL); // not true if there are no inner containers - __TBB_ASSERT(inner_iter != outer_iter->end(), NULL); // not true if the inner containers are all empty. - ++inner_iter; - while(inner_iter == outer_iter->end() && ++outer_iter != my_segcont->end()) { - inner_iter = outer_iter->begin(); - } - } - }; // segmented_iterator - - template - bool operator==( const segmented_iterator& i, - const segmented_iterator& j ) { - if(i.my_segcont != j.my_segcont) return false; - if(i.my_segcont == NULL) return true; - if(i.outer_iter != j.outer_iter) return false; - if(i.outer_iter == i.my_segcont->end()) return true; - return i.inner_iter == j.inner_iter; - } - - // != - template - bool operator!=( const segmented_iterator& i, - const segmented_iterator& j ) { - return !(i==j); - } - - template - struct destruct_only: tbb::internal::no_copy { - tbb::aligned_space value; - ~destruct_only() {value.begin()[0].~T();} - }; - - template - struct construct_by_default: tbb::internal::no_assign { - void construct(void*where) {new(where) T();} // C++ note: the () in T() ensure zero initialization. - construct_by_default( int ) {} - }; - - template - struct construct_by_exemplar: tbb::internal::no_assign { - const T exemplar; - void construct(void*where) {new(where) T(exemplar);} - construct_by_exemplar( const T& t ) : exemplar(t) {} - }; - - template - struct construct_by_finit: tbb::internal::no_assign { - Finit f; - void construct(void* where) {new(where) T(f());} - construct_by_finit( const Finit& f_ ) : f(f_) {} - }; - - // storage for initialization function pointer - template - class callback_base { - public: - // Clone *this - virtual callback_base* clone() = 0; - // Destruct and free *this - virtual void destroy() = 0; - // Need virtual destructor to satisfy GCC compiler warning - virtual ~callback_base() { } - // Construct T at where - virtual void construct(void* where) = 0; - }; - - template - class callback_leaf: public callback_base, Constructor { - template callback_leaf( const X& x ) : Constructor(x) {} - - typedef typename tbb::tbb_allocator my_allocator_type; - - /*override*/ callback_base* clone() { - void* where = my_allocator_type().allocate(1); - return new(where) callback_leaf(*this); - } - - /*override*/ void destroy() { - my_allocator_type().destroy(this); - my_allocator_type().deallocate(this,1); - } - - /*override*/ void construct(void* where) { - Constructor::construct(where); - } - public: - template - static callback_base* make( const X& x ) { - void* where = my_allocator_type().allocate(1); - return new(where) callback_leaf(x); - } - }; - - //! Template for adding padding in order to avoid false sharing - /** ModularSize should be sizeof(U) modulo the cache line size. - All maintenance of the space will be done explicitly on push_back, - and all thread local copies must be destroyed before the concurrent - vector is deleted. - */ - template - struct ets_element { - char value[ModularSize==0 ? sizeof(U) : sizeof(U)+(tbb::internal::NFS_MaxLineSize-ModularSize)]; - void unconstruct() { - tbb::internal::punned_cast(&value)->~U(); - } - }; - - } // namespace internal - //! @endcond - - //! The enumerable_thread_specific container - /** enumerable_thread_specific has the following properties: - - thread-local copies are lazily created, with default, exemplar or function initialization. - - thread-local copies do not move (during lifetime, and excepting clear()) so the address of a copy is invariant. - - the contained objects need not have operator=() defined if combine is not used. - - enumerable_thread_specific containers may be copy-constructed or assigned. - - thread-local copies can be managed by hash-table, or can be accessed via TLS storage for speed. - - outside of parallel contexts, the contents of all thread-local copies are accessible by iterator or using combine or combine_each methods - - @par Segmented iterator - When the thread-local objects are containers with input_iterators defined, a segmented iterator may - be used to iterate over all the elements of all thread-local copies. - - @par combine and combine_each - - Both methods are defined for enumerable_thread_specific. - - combine() requires the the type T have operator=() defined. - - neither method modifies the contents of the object (though there is no guarantee that the applied methods do not modify the object.) - - Both are evaluated in serial context (the methods are assumed to be non-benign.) - - @ingroup containers */ - template , - ets_key_usage_type ETS_key_type=ets_no_key > - class enumerable_thread_specific: internal::ets_base { - - template friend class enumerable_thread_specific; - - typedef internal::ets_element padded_element; - - //! A generic range, used to create range objects from the iterators - template - class generic_range_type: public blocked_range { - public: - typedef T value_type; - typedef T& reference; - typedef const T& const_reference; - typedef I iterator; - typedef ptrdiff_t difference_type; - generic_range_type( I begin_, I end_, size_t grainsize_ = 1) : blocked_range(begin_,end_,grainsize_) {} - template - generic_range_type( const generic_range_type& r) : blocked_range(r.begin(),r.end(),r.grainsize()) {} - generic_range_type( generic_range_type& r, split ) : blocked_range(r,split()) {} - }; - - typedef typename Allocator::template rebind< padded_element >::other padded_allocator_type; - typedef tbb::concurrent_vector< padded_element, padded_allocator_type > internal_collection_type; - - internal::callback_base *my_construct_callback; - - internal_collection_type my_locals; - - /*override*/ void* create_local() { -#if TBB_DEPRECATED - void* lref = &my_locals[my_locals.push_back(padded_element())]; -#else - void* lref = &*my_locals.push_back(padded_element()); -#endif - my_construct_callback->construct(lref); - return lref; - } - - void unconstruct_locals() { - for(typename internal_collection_type::iterator cvi = my_locals.begin(); cvi != my_locals.end(); ++cvi) { - cvi->unconstruct(); - } - } - - typedef typename Allocator::template rebind< uintptr_t >::other array_allocator_type; - - // _size is in bytes - /*override*/ void* create_array(size_t _size) { - size_t nelements = (_size + sizeof(uintptr_t) -1) / sizeof(uintptr_t); - return array_allocator_type().allocate(nelements); - } - - /*override*/ void free_array( void* _ptr, size_t _size) { - size_t nelements = (_size + sizeof(uintptr_t) -1) / sizeof(uintptr_t); - array_allocator_type().deallocate( reinterpret_cast(_ptr),nelements); - } - - public: - - //! Basic types - typedef Allocator allocator_type; - typedef T value_type; - typedef T& reference; - typedef const T& const_reference; - typedef T* pointer; - typedef const T* const_pointer; - typedef typename internal_collection_type::size_type size_type; - typedef typename internal_collection_type::difference_type difference_type; - - // Iterator types - typedef typename internal::enumerable_thread_specific_iterator< internal_collection_type, value_type > iterator; - typedef typename internal::enumerable_thread_specific_iterator< internal_collection_type, const value_type > const_iterator; - - // Parallel range types - typedef generic_range_type< iterator > range_type; - typedef generic_range_type< const_iterator > const_range_type; - - //! Default constructor. Each local instance of T is default constructed. - enumerable_thread_specific() : - my_construct_callback( internal::callback_leaf >::make(/*dummy argument*/0) ) - {} - - //! Constructor with initializer functor. Each local instance of T is constructed by T(finit()). - template - enumerable_thread_specific( Finit finit ) : - my_construct_callback( internal::callback_leaf >::make( finit ) ) - {} - - //! Constuctor with exemplar. Each local instance of T is copied-constructed from the exemplar. - enumerable_thread_specific(const T& exemplar) : - my_construct_callback( internal::callback_leaf >::make( exemplar ) ) - {} - - //! Destructor - ~enumerable_thread_specific() { - my_construct_callback->destroy(); - this->clear(); // deallocation before the derived class is finished destructing - // So free(array *) is still accessible - } - - //! returns reference to local, discarding exists - reference local() { - bool exists; - return local(exists); - } - - //! Returns reference to calling thread's local copy, creating one if necessary - reference local(bool& exists) { - __TBB_ASSERT(ETS_key_type==ets_no_key,"ets_key_per_instance not yet implemented"); - void* ptr = this->table_lookup(exists); - return *(T*)ptr; - } - - //! Get the number of local copies - size_type size() const { return my_locals.size(); } - - //! true if there have been no local copies created - bool empty() const { return my_locals.empty(); } - - //! begin iterator - iterator begin() { return iterator( my_locals, 0 ); } - //! end iterator - iterator end() { return iterator(my_locals, my_locals.size() ); } - - //! begin const iterator - const_iterator begin() const { return const_iterator(my_locals, 0); } - - //! end const iterator - const_iterator end() const { return const_iterator(my_locals, my_locals.size()); } - - //! Get range for parallel algorithms - range_type range( size_t grainsize=1 ) { return range_type( begin(), end(), grainsize ); } - - //! Get const range for parallel algorithms - const_range_type range( size_t grainsize=1 ) const { return const_range_type( begin(), end(), grainsize ); } - - //! Destroys local copies - void clear() { - unconstruct_locals(); - my_locals.clear(); - this->table_clear(); - // callback is not destroyed - // exemplar is not destroyed - } - - private: - - template - void internal_copy( const enumerable_thread_specific& other); - - public: - - template - enumerable_thread_specific( const enumerable_thread_specific& other ) : internal::ets_base () - { - internal_copy(other); - } - - enumerable_thread_specific( const enumerable_thread_specific& other ) : internal::ets_base () - { - internal_copy(other); - } - - private: - - template - enumerable_thread_specific & - internal_assign(const enumerable_thread_specific& other) { - if(static_cast( this ) != static_cast( &other )) { - this->clear(); - my_construct_callback->destroy(); - my_construct_callback = 0; - internal_copy( other ); - } - return *this; - } - - public: - - // assignment - enumerable_thread_specific& operator=(const enumerable_thread_specific& other) { - return internal_assign(other); - } - - template - enumerable_thread_specific& operator=(const enumerable_thread_specific& other) - { - return internal_assign(other); - } - - // combine_func_t has signature T(T,T) or T(const T&, const T&) - template - T combine(combine_func_t f_combine) { - if(begin() == end()) { - internal::destruct_only location; - my_construct_callback->construct(location.value.begin()); - return *location.value.begin(); - } - const_iterator ci = begin(); - T my_result = *ci; - while(++ci != end()) - my_result = f_combine( my_result, *ci ); - return my_result; - } - - // combine_func_t has signature void(T) or void(const T&) - template - void combine_each(combine_func_t f_combine) { - for(const_iterator ci = begin(); ci != end(); ++ci) { - f_combine( *ci ); - } - } - - }; // enumerable_thread_specific - - template - template - void enumerable_thread_specific::internal_copy( const enumerable_thread_specific& other) { - // Initialize my_construct_callback first, so that it is valid even if rest of this routine throws an exception. - my_construct_callback = other.my_construct_callback->clone(); - - typedef internal::ets_base base; - __TBB_ASSERT(my_locals.size()==0,NULL); - this->table_reserve_for_copy( other ); - for( base::array* r=other.my_root; r; r=r->next ) { - for( size_t i=0; isize(); ++i ) { - base::slot& s1 = r->at(i); - if( !s1.empty() ) { - base::slot& s2 = this->table_find(s1.key); - if( s2.empty() ) { -#if TBB_DEPRECATED - void* lref = &my_locals[my_locals.push_back(padded_element())]; -#else - void* lref = &*my_locals.push_back(padded_element()); -#endif - s2.ptr = new(lref) T(*(U*)s1.ptr); - s2.key = s1.key; - } else { - // Skip the duplicate - } - } - } - } - } - - template< typename Container > - class flattened2d { - - // This intermediate typedef is to address issues with VC7.1 compilers - typedef typename Container::value_type conval_type; - - public: - - //! Basic types - typedef typename conval_type::size_type size_type; - typedef typename conval_type::difference_type difference_type; - typedef typename conval_type::allocator_type allocator_type; - typedef typename conval_type::value_type value_type; - typedef typename conval_type::reference reference; - typedef typename conval_type::const_reference const_reference; - typedef typename conval_type::pointer pointer; - typedef typename conval_type::const_pointer const_pointer; - - typedef typename internal::segmented_iterator iterator; - typedef typename internal::segmented_iterator const_iterator; - - flattened2d( const Container &c, typename Container::const_iterator b, typename Container::const_iterator e ) : - my_container(const_cast(&c)), my_begin(b), my_end(e) { } - - flattened2d( const Container &c ) : - my_container(const_cast(&c)), my_begin(c.begin()), my_end(c.end()) { } - - iterator begin() { return iterator(*my_container) = my_begin; } - iterator end() { return iterator(*my_container) = my_end; } - const_iterator begin() const { return const_iterator(*my_container) = my_begin; } - const_iterator end() const { return const_iterator(*my_container) = my_end; } - - size_type size() const { - size_type tot_size = 0; - for(typename Container::const_iterator i = my_begin; i != my_end; ++i) { - tot_size += i->size(); - } - return tot_size; - } - - private: - - Container *my_container; - typename Container::const_iterator my_begin; - typename Container::const_iterator my_end; - - }; - - template - flattened2d flatten2d(const Container &c, const typename Container::const_iterator b, const typename Container::const_iterator e) { - return flattened2d(c, b, e); - } - - template - flattened2d flatten2d(const Container &c) { - return flattened2d(c); - } - -} // interface6 - -namespace internal { -using interface6::internal::segmented_iterator; -} - -using interface6::enumerable_thread_specific; -using interface6::flattened2d; -using interface6::flatten2d; - -} // namespace tbb - -#endif diff --git a/deal.II/bundled/tbb30_104oss/include/tbb/index.html b/deal.II/bundled/tbb30_104oss/include/tbb/index.html deleted file mode 100644 index 7e4552e30c..0000000000 --- a/deal.II/bundled/tbb30_104oss/include/tbb/index.html +++ /dev/null @@ -1,28 +0,0 @@ - - - -

Overview

-Include files for Threading Building Blocks classes and functions. - -
Click here to see all files in the directory. - -

Directories

-
-
machine -
Include files for low-level architecture specific functionality. -
compat -
Include files for source level compatibility with other frameworks. -
- -
-Up to parent directory -

-Copyright © 2005-2010 Intel Corporation. All Rights Reserved. -

-Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are -registered trademarks or trademarks of Intel Corporation or its -subsidiaries in the United States and other countries. -

-* Other names and brands may be claimed as the property of others. - - diff --git a/deal.II/bundled/tbb30_104oss/include/tbb/machine/ibm_aix51.h b/deal.II/bundled/tbb30_104oss/include/tbb/machine/ibm_aix51.h deleted file mode 100644 index 408d48dfd1..0000000000 --- a/deal.II/bundled/tbb30_104oss/include/tbb/machine/ibm_aix51.h +++ /dev/null @@ -1,60 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_machine_H -#error Do not include this file directly; include tbb_machine.h instead -#endif - -#define __TBB_WORDSIZE 8 -#define __TBB_BIG_ENDIAN 1 - -#include -#include -#include - -extern "C" { - -int32_t __TBB_machine_cas_32 (volatile void* ptr, int32_t value, int32_t comparand); -int64_t __TBB_machine_cas_64 (volatile void* ptr, int64_t value, int64_t comparand); -void __TBB_machine_flush (); - -} - -#define __TBB_CompareAndSwap4(P,V,C) __TBB_machine_cas_32(P,V,C) -#define __TBB_CompareAndSwap8(P,V,C) __TBB_machine_cas_64(P,V,C) -#define __TBB_CompareAndSwapW(P,V,C) __TBB_machine_cas_64(P,V,C) -#define __TBB_Yield() sched_yield() - -#if __GNUC__ -#define __TBB_full_memory_fence() __asm__ __volatile__("sync": : :"memory") -#define __TBB_release_consistency_helper() __asm__ __volatile__("lwsync": : :"memory") -#else -// IBM C++ Compiler does not support inline assembly -#define __TBB_full_memory_fence() __TBB_machine_flush () -#define __TBB_release_consistency_helper() __TBB_machine_flush () -#endif diff --git a/deal.II/bundled/tbb30_104oss/include/tbb/machine/linux_common.h b/deal.II/bundled/tbb30_104oss/include/tbb/machine/linux_common.h deleted file mode 100644 index 2306eb002f..0000000000 --- a/deal.II/bundled/tbb30_104oss/include/tbb/machine/linux_common.h +++ /dev/null @@ -1,91 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_machine_H -#error Do not include this file directly; include tbb_machine.h instead -#endif - -#include -#define __TBB_Yield() sched_yield() - -/* Futex definitions */ -#include - -#if defined(SYS_futex) - -#define __TBB_USE_FUTEX 1 -#include -#include -// Unfortunately, some versions of Linux do not have a header that defines FUTEX_WAIT and FUTEX_WAKE. - -#ifdef FUTEX_WAIT -#define __TBB_FUTEX_WAIT FUTEX_WAIT -#else -#define __TBB_FUTEX_WAIT 0 -#endif - -#ifdef FUTEX_WAKE -#define __TBB_FUTEX_WAKE FUTEX_WAKE -#else -#define __TBB_FUTEX_WAKE 1 -#endif - -#ifndef __TBB_ASSERT -#error machine specific headers must be included after tbb_stddef.h -#endif - -namespace tbb { - -namespace internal { - -inline int futex_wait( void *futex, int comparand ) { - int r = ::syscall( SYS_futex,futex,__TBB_FUTEX_WAIT,comparand,NULL,NULL,0 ); -#if TBB_USE_ASSERT - int e = errno; - __TBB_ASSERT( r==0||r==EWOULDBLOCK||(r==-1&&(e==EAGAIN||e==EINTR)), "futex_wait failed." ); -#endif /* TBB_USE_ASSERT */ - return r; -} - -inline int futex_wakeup_one( void *futex ) { - int r = ::syscall( SYS_futex,futex,__TBB_FUTEX_WAKE,1,NULL,NULL,0 ); - __TBB_ASSERT( r==0||r==1, "futex_wakeup_one: more than one thread woken up?" ); - return r; -} - -inline int futex_wakeup_all( void *futex ) { - int r = ::syscall( SYS_futex,futex,__TBB_FUTEX_WAKE,INT_MAX,NULL,NULL,0 ); - __TBB_ASSERT( r>=0, "futex_wakeup_all: error in waking up threads" ); - return r; -} - -} /* namespace internal */ - -} /* namespace tbb */ - -#endif /* SYS_futex */ diff --git a/deal.II/bundled/tbb30_104oss/include/tbb/machine/linux_ia32.h b/deal.II/bundled/tbb30_104oss/include/tbb/machine/linux_ia32.h deleted file mode 100644 index eb4028a43e..0000000000 --- a/deal.II/bundled/tbb30_104oss/include/tbb/machine/linux_ia32.h +++ /dev/null @@ -1,216 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_machine_H -#error Do not include this file directly; include tbb_machine.h instead -#endif - -#include -#include - -#define __TBB_WORDSIZE 4 -#define __TBB_BIG_ENDIAN 0 - -#define __TBB_release_consistency_helper() __asm__ __volatile__("": : :"memory") -#define __TBB_full_memory_fence() __asm__ __volatile__("mfence": : :"memory") - -#if __TBB_ICC_ASM_VOLATILE_BROKEN -#define __TBB_VOLATILE -#else -#define __TBB_VOLATILE volatile -#endif - -#define __MACHINE_DECL_ATOMICS(S,T,X) \ -static inline T __TBB_machine_cmpswp##S (volatile void *ptr, T value, T comparand ) \ -{ \ - T result; \ - \ - __asm__ __volatile__("lock\ncmpxchg" X " %2,%1" \ - : "=a"(result), "=m"(*(__TBB_VOLATILE T*)ptr) \ - : "q"(value), "0"(comparand), "m"(*(__TBB_VOLATILE T*)ptr) \ - : "memory"); \ - return result; \ -} \ - \ -static inline T __TBB_machine_fetchadd##S(volatile void *ptr, T addend) \ -{ \ - T result; \ - __asm__ __volatile__("lock\nxadd" X " %0,%1" \ - : "=r"(result), "=m"(*(__TBB_VOLATILE T*)ptr) \ - : "0"(addend), "m"(*(__TBB_VOLATILE T*)ptr) \ - : "memory"); \ - return result; \ -} \ - \ -static inline T __TBB_machine_fetchstore##S(volatile void *ptr, T value) \ -{ \ - T result; \ - __asm__ __volatile__("lock\nxchg" X " %0,%1" \ - : "=r"(result), "=m"(*(__TBB_VOLATILE T*)ptr) \ - : "0"(value), "m"(*(__TBB_VOLATILE T*)ptr) \ - : "memory"); \ - return result; \ -} \ - -__MACHINE_DECL_ATOMICS(1,int8_t,"") -__MACHINE_DECL_ATOMICS(2,int16_t,"") -__MACHINE_DECL_ATOMICS(4,int32_t,"l") - -static inline int64_t __TBB_machine_cmpswp8 (volatile void *ptr, int64_t value, int64_t comparand ) -{ - int64_t result; -#if __PIC__ - /* compiling position-independent code */ - // EBX register preserved for compliance with position-independent code rules on IA32 - __asm__ __volatile__ ( - "pushl %%ebx\n\t" - "movl (%%ecx),%%ebx\n\t" - "movl 4(%%ecx),%%ecx\n\t" - "lock\n\t cmpxchg8b %1\n\t" - "popl %%ebx" - : "=A"(result), "=m"(*(int64_t *)ptr) - : "m"(*(int64_t *)ptr) - , "0"(comparand) - , "c"(&value) - : "memory", "esp" -#if __INTEL_COMPILER - ,"ebx" -#endif - ); -#else /* !__PIC__ */ - union { - int64_t i64; - int32_t i32[2]; - }; - i64 = value; - __asm__ __volatile__ ( - "lock\n\t cmpxchg8b %1\n\t" - : "=A"(result), "=m"(*(__TBB_VOLATILE int64_t *)ptr) - : "m"(*(__TBB_VOLATILE int64_t *)ptr) - , "0"(comparand) - , "b"(i32[0]), "c"(i32[1]) - : "memory" - ); -#endif /* __PIC__ */ - return result; -} - -static inline int32_t __TBB_machine_lg( uint32_t x ) { - int32_t j; - __asm__ ("bsr %1,%0" : "=r"(j) : "r"(x)); - return j; -} - -static inline void __TBB_machine_or( volatile void *ptr, uint32_t addend ) { - __asm__ __volatile__("lock\norl %1,%0" : "=m"(*(__TBB_VOLATILE uint32_t *)ptr) : "r"(addend), "m"(*(__TBB_VOLATILE uint32_t *)ptr) : "memory"); -} - -static inline void __TBB_machine_and( volatile void *ptr, uint32_t addend ) { - __asm__ __volatile__("lock\nandl %1,%0" : "=m"(*(__TBB_VOLATILE uint32_t *)ptr) : "r"(addend), "m"(*(__TBB_VOLATILE uint32_t *)ptr) : "memory"); -} - -static inline void __TBB_machine_pause( int32_t delay ) { - for (int32_t i = 0; i < delay; i++) { - __asm__ __volatile__("pause;"); - } - return; -} - -static inline int64_t __TBB_machine_load8 (const volatile void *ptr) { - int64_t result; - if( ((uint32_t)ptr&7u)==0 ) { - // Aligned load - __asm__ __volatile__ ( "fildq %1\n\t" - "fistpq %0" : "=m"(result) : "m"(*(const __TBB_VOLATILE uint64_t*)ptr) : "memory" ); - } else { - // Unaligned load - result = __TBB_machine_cmpswp8(const_cast(ptr),0,0); - } - return result; -} - -//! Handles misaligned 8-byte store -/** Defined in tbb_misc.cpp */ -extern "C" void __TBB_machine_store8_slow( volatile void *ptr, int64_t value ); -extern "C" void __TBB_machine_store8_slow_perf_warning( volatile void *ptr ); - -static inline void __TBB_machine_store8(volatile void *ptr, int64_t value) { - if( ((uint32_t)ptr&7u)==0 ) { - // Aligned store - __asm__ __volatile__ ( "fildq %1\n\t" - "fistpq %0" : "=m"(*(__TBB_VOLATILE int64_t*)ptr) : "m"(value) : "memory" ); - } else { - // Unaligned store -#if TBB_USE_PERFORMANCE_WARNINGS - __TBB_machine_store8_slow_perf_warning(ptr); -#endif /* TBB_USE_PERFORMANCE_WARNINGS */ - __TBB_machine_store8_slow(ptr,value); - } -} - -// Machine specific atomic operations - -#define __TBB_CompareAndSwap1(P,V,C) __TBB_machine_cmpswp1(P,V,C) -#define __TBB_CompareAndSwap2(P,V,C) __TBB_machine_cmpswp2(P,V,C) -#define __TBB_CompareAndSwap4(P,V,C) __TBB_machine_cmpswp4(P,V,C) -#define __TBB_CompareAndSwap8(P,V,C) __TBB_machine_cmpswp8(P,V,C) -#define __TBB_CompareAndSwapW(P,V,C) __TBB_machine_cmpswp4(P,V,C) - -#define __TBB_FetchAndAdd1(P,V) __TBB_machine_fetchadd1(P,V) -#define __TBB_FetchAndAdd2(P,V) __TBB_machine_fetchadd2(P,V) -#define __TBB_FetchAndAdd4(P,V) __TBB_machine_fetchadd4(P,V) -#define __TBB_FetchAndAddW(P,V) __TBB_machine_fetchadd4(P,V) - -#define __TBB_FetchAndStore1(P,V) __TBB_machine_fetchstore1(P,V) -#define __TBB_FetchAndStore2(P,V) __TBB_machine_fetchstore2(P,V) -#define __TBB_FetchAndStore4(P,V) __TBB_machine_fetchstore4(P,V) -#define __TBB_FetchAndStoreW(P,V) __TBB_machine_fetchstore4(P,V) - -#define __TBB_Store8(P,V) __TBB_machine_store8(P,V) -#define __TBB_Load8(P) __TBB_machine_load8(P) - -#define __TBB_AtomicOR(P,V) __TBB_machine_or(P,V) -#define __TBB_AtomicAND(P,V) __TBB_machine_and(P,V) - - -// Those we chose not to implement (they will be implemented generically using CMPSWP8) -#undef __TBB_FetchAndAdd8 -#undef __TBB_FetchAndStore8 - -// Definition of other functions -#define __TBB_Pause(V) __TBB_machine_pause(V) -#define __TBB_Log2(V) __TBB_machine_lg(V) - -// Special atomic functions -#define __TBB_FetchAndAddWrelease(P,V) __TBB_FetchAndAddW(P,V) -#define __TBB_FetchAndIncrementWacquire(P) __TBB_FetchAndAddW(P,1) -#define __TBB_FetchAndDecrementWrelease(P) __TBB_FetchAndAddW(P,-1) - -// Use generic definitions from tbb_machine.h -#undef __TBB_TryLockByte -#undef __TBB_LockByte diff --git a/deal.II/bundled/tbb30_104oss/include/tbb/machine/linux_ia64.h b/deal.II/bundled/tbb30_104oss/include/tbb/machine/linux_ia64.h deleted file mode 100644 index 502f8d04f6..0000000000 --- a/deal.II/bundled/tbb30_104oss/include/tbb/machine/linux_ia64.h +++ /dev/null @@ -1,170 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_machine_H -#error Do not include this file directly; include tbb_machine.h instead -#endif - -#include -#include -#include - -#define __TBB_WORDSIZE 8 -#define __TBB_BIG_ENDIAN 0 -#define __TBB_DECL_FENCED_ATOMICS 1 - -// Most of the functions will be in a .s file - -extern "C" { - int8_t __TBB_machine_cmpswp1__TBB_full_fence (volatile void *ptr, int8_t value, int8_t comparand); - int8_t __TBB_machine_fetchadd1__TBB_full_fence (volatile void *ptr, int8_t addend); - int8_t __TBB_machine_fetchadd1acquire(volatile void *ptr, int8_t addend); - int8_t __TBB_machine_fetchadd1release(volatile void *ptr, int8_t addend); - int8_t __TBB_machine_fetchstore1acquire(volatile void *ptr, int8_t value); - int8_t __TBB_machine_fetchstore1release(volatile void *ptr, int8_t value); - - int16_t __TBB_machine_cmpswp2__TBB_full_fence (volatile void *ptr, int16_t value, int16_t comparand); - int16_t __TBB_machine_fetchadd2__TBB_full_fence (volatile void *ptr, int16_t addend); - int16_t __TBB_machine_fetchadd2acquire(volatile void *ptr, int16_t addend); - int16_t __TBB_machine_fetchadd2release(volatile void *ptr, int16_t addend); - int16_t __TBB_machine_fetchstore2acquire(volatile void *ptr, int16_t value); - int16_t __TBB_machine_fetchstore2release(volatile void *ptr, int16_t value); - - int32_t __TBB_machine_fetchstore4__TBB_full_fence (volatile void *ptr, int32_t value); - int32_t __TBB_machine_fetchstore4acquire(volatile void *ptr, int32_t value); - int32_t __TBB_machine_fetchstore4release(volatile void *ptr, int32_t value); - int32_t __TBB_machine_fetchadd4acquire(volatile void *ptr, int32_t addend); - int32_t __TBB_machine_fetchadd4release(volatile void *ptr, int32_t addend); - - int64_t __TBB_machine_cmpswp8__TBB_full_fence (volatile void *ptr, int64_t value, int64_t comparand); - int64_t __TBB_machine_fetchstore8__TBB_full_fence (volatile void *ptr, int64_t value); - int64_t __TBB_machine_fetchstore8acquire(volatile void *ptr, int64_t value); - int64_t __TBB_machine_fetchstore8release(volatile void *ptr, int64_t value); - int64_t __TBB_machine_fetchadd8acquire(volatile void *ptr, int64_t addend); - int64_t __TBB_machine_fetchadd8release(volatile void *ptr, int64_t addend); - - int8_t __TBB_machine_cmpswp1acquire(volatile void *ptr, int8_t value, int8_t comparand); - int8_t __TBB_machine_cmpswp1release(volatile void *ptr, int8_t value, int8_t comparand); - int8_t __TBB_machine_fetchstore1__TBB_full_fence (volatile void *ptr, int8_t value); - - int16_t __TBB_machine_cmpswp2acquire(volatile void *ptr, int16_t value, int16_t comparand); - int16_t __TBB_machine_cmpswp2release(volatile void *ptr, int16_t value, int16_t comparand); - int16_t __TBB_machine_fetchstore2__TBB_full_fence (volatile void *ptr, int16_t value); - - int32_t __TBB_machine_cmpswp4__TBB_full_fence (volatile void *ptr, int32_t value, int32_t comparand); - int32_t __TBB_machine_cmpswp4acquire(volatile void *ptr, int32_t value, int32_t comparand); - int32_t __TBB_machine_cmpswp4release(volatile void *ptr, int32_t value, int32_t comparand); - int32_t __TBB_machine_fetchadd4__TBB_full_fence (volatile void *ptr, int32_t value); - - int64_t __TBB_machine_cmpswp8acquire(volatile void *ptr, int64_t value, int64_t comparand); - int64_t __TBB_machine_cmpswp8release(volatile void *ptr, int64_t value, int64_t comparand); - int64_t __TBB_machine_fetchadd8__TBB_full_fence (volatile void *ptr, int64_t value); - - int64_t __TBB_machine_lg(uint64_t value); - void __TBB_machine_pause(int32_t delay); - bool __TBB_machine_trylockbyte( volatile unsigned char &ptr ); - int64_t __TBB_machine_lockbyte( volatile unsigned char &ptr ); - - //! Retrieves the current RSE backing store pointer. IA64 specific. - void* __TBB_get_bsp(); -} - -#define __TBB_CompareAndSwap1(P,V,C) __TBB_machine_cmpswp1__TBB_full_fence(P,V,C) -#define __TBB_CompareAndSwap2(P,V,C) __TBB_machine_cmpswp2__TBB_full_fence(P,V,C) - -#define __TBB_FetchAndAdd1(P,V) __TBB_machine_fetchadd1__TBB_full_fence(P,V) -#define __TBB_FetchAndAdd1acquire(P,V) __TBB_machine_fetchadd1acquire(P,V) -#define __TBB_FetchAndAdd1release(P,V) __TBB_machine_fetchadd1release(P,V) -#define __TBB_FetchAndAdd2(P,V) __TBB_machine_fetchadd2__TBB_full_fence(P,V) -#define __TBB_FetchAndAdd2acquire(P,V) __TBB_machine_fetchadd2acquire(P,V) -#define __TBB_FetchAndAdd2release(P,V) __TBB_machine_fetchadd2release(P,V) -#define __TBB_FetchAndAdd4acquire(P,V) __TBB_machine_fetchadd4acquire(P,V) -#define __TBB_FetchAndAdd4release(P,V) __TBB_machine_fetchadd4release(P,V) -#define __TBB_FetchAndAdd8acquire(P,V) __TBB_machine_fetchadd8acquire(P,V) -#define __TBB_FetchAndAdd8release(P,V) __TBB_machine_fetchadd8release(P,V) - -#define __TBB_FetchAndStore1acquire(P,V) __TBB_machine_fetchstore1acquire(P,V) -#define __TBB_FetchAndStore1release(P,V) __TBB_machine_fetchstore1release(P,V) -#define __TBB_FetchAndStore2acquire(P,V) __TBB_machine_fetchstore2acquire(P,V) -#define __TBB_FetchAndStore2release(P,V) __TBB_machine_fetchstore2release(P,V) -#define __TBB_FetchAndStore4acquire(P,V) __TBB_machine_fetchstore4acquire(P,V) -#define __TBB_FetchAndStore4release(P,V) __TBB_machine_fetchstore4release(P,V) -#define __TBB_FetchAndStore8acquire(P,V) __TBB_machine_fetchstore8acquire(P,V) -#define __TBB_FetchAndStore8release(P,V) __TBB_machine_fetchstore8release(P,V) - -#define __TBB_CompareAndSwap1acquire(P,V,C) __TBB_machine_cmpswp1acquire(P,V,C) -#define __TBB_CompareAndSwap1release(P,V,C) __TBB_machine_cmpswp1release(P,V,C) -#define __TBB_CompareAndSwap2acquire(P,V,C) __TBB_machine_cmpswp2acquire(P,V,C) -#define __TBB_CompareAndSwap2release(P,V,C) __TBB_machine_cmpswp2release(P,V,C) -#define __TBB_CompareAndSwap4(P,V,C) __TBB_machine_cmpswp4__TBB_full_fence(P,V,C) -#define __TBB_CompareAndSwap4acquire(P,V,C) __TBB_machine_cmpswp4acquire(P,V,C) -#define __TBB_CompareAndSwap4release(P,V,C) __TBB_machine_cmpswp4release(P,V,C) -#define __TBB_CompareAndSwap8(P,V,C) __TBB_machine_cmpswp8__TBB_full_fence(P,V,C) -#define __TBB_CompareAndSwap8acquire(P,V,C) __TBB_machine_cmpswp8acquire(P,V,C) -#define __TBB_CompareAndSwap8release(P,V,C) __TBB_machine_cmpswp8release(P,V,C) - -#define __TBB_FetchAndAdd4(P,V) __TBB_machine_fetchadd4__TBB_full_fence(P,V) -#define __TBB_FetchAndAdd8(P,V) __TBB_machine_fetchadd8__TBB_full_fence(P,V) - -#define __TBB_FetchAndStore1(P,V) __TBB_machine_fetchstore1__TBB_full_fence(P,V) -#define __TBB_FetchAndStore2(P,V) __TBB_machine_fetchstore2__TBB_full_fence(P,V) -#define __TBB_FetchAndStore4(P,V) __TBB_machine_fetchstore4__TBB_full_fence(P,V) -#define __TBB_FetchAndStore8(P,V) __TBB_machine_fetchstore8__TBB_full_fence(P,V) - -#define __TBB_FetchAndIncrementWacquire(P) __TBB_FetchAndAdd8acquire(P,1) -#define __TBB_FetchAndDecrementWrelease(P) __TBB_FetchAndAdd8release(P,-1) - -#ifndef __INTEL_COMPILER -/* Even though GCC imbues volatile loads with acquire semantics, - it sometimes moves loads over the acquire fence. The - fences defined here stop such incorrect code motion. */ -#define __TBB_release_consistency_helper() __asm__ __volatile__("": : :"memory") -#define __TBB_full_memory_fence() __asm__ __volatile__("mf": : :"memory") -#else -#define __TBB_release_consistency_helper() -#define __TBB_full_memory_fence() __mf() -#endif /* __INTEL_COMPILER */ - -// Special atomic functions -#define __TBB_CompareAndSwapW(P,V,C) __TBB_CompareAndSwap8(P,V,C) -#define __TBB_FetchAndStoreW(P,V) __TBB_FetchAndStore8(P,V) -#define __TBB_FetchAndAddW(P,V) __TBB_FetchAndAdd8(P,V) -#define __TBB_FetchAndAddWrelease(P,V) __TBB_FetchAndAdd8release(P,V) - -// Not needed -#undef __TBB_Store8 -#undef __TBB_Load8 - -// Definition of Lock functions -#define __TBB_TryLockByte(P) __TBB_machine_trylockbyte(P) -#define __TBB_LockByte(P) __TBB_machine_lockbyte(P) - -// Definition of other utility functions -#define __TBB_Pause(V) __TBB_machine_pause(V) -#define __TBB_Log2(V) __TBB_machine_lg(V) - diff --git a/deal.II/bundled/tbb30_104oss/include/tbb/machine/linux_intel64.h b/deal.II/bundled/tbb30_104oss/include/tbb/machine/linux_intel64.h deleted file mode 100644 index d7b94fcc95..0000000000 --- a/deal.II/bundled/tbb30_104oss/include/tbb/machine/linux_intel64.h +++ /dev/null @@ -1,143 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_machine_H -#error Do not include this file directly; include tbb_machine.h instead -#endif - -#include -#include - -#define __TBB_WORDSIZE 8 -#define __TBB_BIG_ENDIAN 0 - -#define __TBB_release_consistency_helper() __asm__ __volatile__("": : :"memory") - -// __TBB_full_memory_fence can be predefined -#ifndef __TBB_full_memory_fence -#define __TBB_full_memory_fence() __asm__ __volatile__("mfence": : :"memory") -#endif - -#define __MACHINE_DECL_ATOMICS(S,T,X) \ -static inline T __TBB_machine_cmpswp##S (volatile void *ptr, T value, T comparand ) \ -{ \ - T result; \ - \ - __asm__ __volatile__("lock\ncmpxchg" X " %2,%1" \ - : "=a"(result), "=m"(*(volatile T*)ptr) \ - : "q"(value), "0"(comparand), "m"(*(volatile T*)ptr) \ - : "memory"); \ - return result; \ -} \ - \ -static inline T __TBB_machine_fetchadd##S(volatile void *ptr, T addend) \ -{ \ - T result; \ - __asm__ __volatile__("lock\nxadd" X " %0,%1" \ - : "=r"(result),"=m"(*(volatile T*)ptr) \ - : "0"(addend), "m"(*(volatile T*)ptr) \ - : "memory"); \ - return result; \ -} \ - \ -static inline T __TBB_machine_fetchstore##S(volatile void *ptr, T value) \ -{ \ - T result; \ - __asm__ __volatile__("lock\nxchg" X " %0,%1" \ - : "=r"(result),"=m"(*(volatile T*)ptr) \ - : "0"(value), "m"(*(volatile T*)ptr) \ - : "memory"); \ - return result; \ -} \ - -__MACHINE_DECL_ATOMICS(1,int8_t,"") -__MACHINE_DECL_ATOMICS(2,int16_t,"") -__MACHINE_DECL_ATOMICS(4,int32_t,"") -__MACHINE_DECL_ATOMICS(8,int64_t,"q") - -static inline int64_t __TBB_machine_lg( uint64_t x ) { - int64_t j; - __asm__ ("bsr %1,%0" : "=r"(j) : "r"(x)); - return j; -} - -static inline void __TBB_machine_or( volatile void *ptr, uint64_t addend ) { - __asm__ __volatile__("lock\norq %1,%0" : "=m"(*(volatile uint64_t*)ptr) : "r"(addend), "m"(*(volatile uint64_t*)ptr) : "memory"); -} - -static inline void __TBB_machine_and( volatile void *ptr, uint64_t addend ) { - __asm__ __volatile__("lock\nandq %1,%0" : "=m"(*(volatile uint64_t*)ptr) : "r"(addend), "m"(*(volatile uint64_t*)ptr) : "memory"); -} - -static inline void __TBB_machine_pause( int32_t delay ) { - for (int32_t i = 0; i < delay; i++) { - __asm__ __volatile__("pause;"); - } - return; -} - -// Machine specific atomic operations - -#define __TBB_CompareAndSwap1(P,V,C) __TBB_machine_cmpswp1(P,V,C) -#define __TBB_CompareAndSwap2(P,V,C) __TBB_machine_cmpswp2(P,V,C) -#define __TBB_CompareAndSwap4(P,V,C) __TBB_machine_cmpswp4(P,V,C) -#define __TBB_CompareAndSwap8(P,V,C) __TBB_machine_cmpswp8(P,V,C) -#define __TBB_CompareAndSwapW(P,V,C) __TBB_machine_cmpswp8(P,V,C) - -#define __TBB_FetchAndAdd1(P,V) __TBB_machine_fetchadd1(P,V) -#define __TBB_FetchAndAdd2(P,V) __TBB_machine_fetchadd2(P,V) -#define __TBB_FetchAndAdd4(P,V) __TBB_machine_fetchadd4(P,V) -#define __TBB_FetchAndAdd8(P,V) __TBB_machine_fetchadd8(P,V) -#define __TBB_FetchAndAddW(P,V) __TBB_machine_fetchadd8(P,V) - -#define __TBB_FetchAndStore1(P,V) __TBB_machine_fetchstore1(P,V) -#define __TBB_FetchAndStore2(P,V) __TBB_machine_fetchstore2(P,V) -#define __TBB_FetchAndStore4(P,V) __TBB_machine_fetchstore4(P,V) -#define __TBB_FetchAndStore8(P,V) __TBB_machine_fetchstore8(P,V) -#define __TBB_FetchAndStoreW(P,V) __TBB_machine_fetchstore8(P,V) - -#undef __TBB_Store8 -#undef __TBB_Load8 - -#define __TBB_AtomicOR(P,V) __TBB_machine_or(P,V) -#define __TBB_AtomicAND(P,V) __TBB_machine_and(P,V) - -// Definition of other functions -#ifndef __TBB_Pause -#define __TBB_Pause(V) __TBB_machine_pause(V) -#endif -#define __TBB_Log2(V) __TBB_machine_lg(V) - -// Special atomic functions -#define __TBB_FetchAndAddWrelease(P,V) __TBB_FetchAndAddW(P,V) -#define __TBB_FetchAndIncrementWacquire(P) __TBB_FetchAndAddW(P,1) -#define __TBB_FetchAndDecrementWrelease(P) __TBB_FetchAndAddW(P,-1) - -// Use generic definitions from tbb_machine.h -#undef __TBB_TryLockByte -#undef __TBB_LockByte diff --git a/deal.II/bundled/tbb30_104oss/include/tbb/machine/mac_ppc.h b/deal.II/bundled/tbb30_104oss/include/tbb/machine/mac_ppc.h deleted file mode 100644 index d5b1364b21..0000000000 --- a/deal.II/bundled/tbb30_104oss/include/tbb/machine/mac_ppc.h +++ /dev/null @@ -1,82 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_machine_H -#error Do not include this file directly; include tbb_machine.h instead -#endif - -#include -#include - -inline int32_t __TBB_machine_cmpswp4 (volatile void *ptr, int32_t value, int32_t comparand ) -{ - int32_t result; - - __asm__ __volatile__("lwsync\n" - "0: lwarx %0,0,%2\n\t" /* load w/ reservation */ - "cmpw %0,%4\n\t" /* compare against comparand */ - "bne- 1f\n\t" /* exit if not same */ - "stwcx. %3,0,%2\n\t" /* store new_value */ - "bne- 0b\n" /* retry if reservation lost */ - "1: lwsync" /* the exit */ - : "=&r"(result), "=m"(* (int32_t*) ptr) - : "r"(ptr), "r"(value), "r"(comparand), "m"(* (int32_t*) ptr) - : "cr0"); - return result; -} - -inline int64_t __TBB_machine_cmpswp8 (volatile void *ptr, int64_t value, int64_t comparand ) -{ - int64_t result; - __asm__ __volatile__("lwsync\n" - "0: ldarx %0,0,%2\n\t" /* load w/ reservation */ - "cmpd %0,%4\n\t" /* compare against comparand */ - "bne- 1f\n\t" /* exit if not same */ - "stdcx. %3,0,%2\n\t" /* store new_value */ - "bne- 0b\n" /* retry if reservation lost */ - "1: lwsync" /* the exit */ - : "=&b"(result), "=m"(* (int64_t*) ptr) - : "r"(ptr), "r"(value), "r"(comparand), "m"(* (int64_t*) ptr) - : "cr0"); - return result; -} - -#define __TBB_BIG_ENDIAN 1 - -#if defined(powerpc64) || defined(__powerpc64__) || defined(__ppc64__) -#define __TBB_WORDSIZE 8 -#define __TBB_CompareAndSwapW(P,V,C) __TBB_machine_cmpswp8(P,V,C) -#else -#define __TBB_WORDSIZE 4 -#define __TBB_CompareAndSwapW(P,V,C) __TBB_machine_cmpswp4(P,V,C) -#endif - -#define __TBB_CompareAndSwap4(P,V,C) __TBB_machine_cmpswp4(P,V,C) -#define __TBB_CompareAndSwap8(P,V,C) __TBB_machine_cmpswp8(P,V,C) -#define __TBB_full_memory_fence() __asm__ __volatile__("sync": : :"memory") -#define __TBB_release_consistency_helper() __asm__ __volatile__("lwsync": : :"memory") diff --git a/deal.II/bundled/tbb30_104oss/include/tbb/machine/macos_common.h b/deal.II/bundled/tbb30_104oss/include/tbb/machine/macos_common.h deleted file mode 100644 index ee2412d788..0000000000 --- a/deal.II/bundled/tbb30_104oss/include/tbb/machine/macos_common.h +++ /dev/null @@ -1,126 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_machine_H -#error Do not include this file directly; include tbb_machine.h instead -#endif - -#include -#define __TBB_Yield() sched_yield() - - -// __TBB_DetectNumberOfWorkers - -#include -#include - -static inline int __TBB_macos_available_cpu() { - int name[2] = {CTL_HW, HW_AVAILCPU}; - int ncpu; - size_t size = sizeof(ncpu); - sysctl( name, 2, &ncpu, &size, NULL, 0 ); - return ncpu; -} - -#define __TBB_DetectNumberOfWorkers() __TBB_macos_available_cpu() - - -#ifndef __TBB_WORDSIZE -#define __TBB_WORDSIZE 4 -#endif - -#ifndef __TBB_BIG_ENDIAN -#if __BIG_ENDIAN__ -#define __TBB_BIG_ENDIAN 1 -#else -#define __TBB_BIG_ENDIAN 0 -#endif -#endif - - -#if !defined(__TBB_CompareAndSwap4) || !defined(__TBB_CompareAndSwap8) - -// Implementation of atomic operations based on OS provided primitives -#include - -#define __TBB_release_consistency_helper() OSMemoryBarrier() -#define __TBB_full_memory_fence() OSMemoryBarrier() - -static inline int32_t __TBB_macos_cmpswp4(volatile void *ptr, int32_t value, int32_t comparand) -{ - __TBB_ASSERT( !((uintptr_t)ptr&0x3), "address not properly aligned for Mac OS atomics"); - int32_t* address = (int32_t*)ptr; - while( !OSAtomicCompareAndSwap32Barrier(comparand, value, address) ){ - int32_t snapshot = *address; - if( snapshot!=comparand ) return snapshot; - } - return comparand; -} - -static inline int64_t __TBB_macos_cmpswp8(volatile void *ptr, int64_t value, int64_t comparand) -{ - __TBB_ASSERT( !((uintptr_t)ptr&0x7), "address not properly aligned for Mac OS atomics"); - int64_t* address = (int64_t*)ptr; - while( !OSAtomicCompareAndSwap64Barrier(comparand, value, address) ){ -#if __TBB_WORDSIZE==8 - int64_t snapshot = *address; -#else - int64_t snapshot = OSAtomicAdd64( 0, address ); -#endif - if( snapshot!=comparand ) return snapshot; - } - return comparand; -} - -#define __TBB_CompareAndSwap4(P,V,C) __TBB_macos_cmpswp4(P,V,C) -#define __TBB_CompareAndSwap8(P,V,C) __TBB_macos_cmpswp8(P,V,C) - -static inline int32_t __TBB_macos_fetchadd4(volatile void *ptr, int32_t addend) -{ - __TBB_ASSERT( !((uintptr_t)ptr&0x3), "address not properly aligned for Mac OS atomics"); - return OSAtomicAdd32Barrier(addend, (int32_t*)ptr) - addend; -} - -static inline int64_t __TBB_macos_fetchadd8(volatile void *ptr, int64_t addend) -{ - __TBB_ASSERT( !((uintptr_t)ptr&0x7), "address not properly aligned for Mac OS atomics"); - return OSAtomicAdd64Barrier(addend, (int64_t*)ptr) - addend; -} - -#define __TBB_FetchAndAdd4(P,V) __TBB_macos_fetchadd4(P,V) -#define __TBB_FetchAndAdd8(P,V) __TBB_macos_fetchadd8(P,V) - -#if __TBB_WORDSIZE==4 -#define __TBB_CompareAndSwapW(P,V,C) __TBB_CompareAndSwap4(P,V,C) -#define __TBB_FetchAndAddW(P,V) __TBB_FetchAndAdd4(P,V) -#else -#define __TBB_CompareAndSwapW(P,V,C) __TBB_CompareAndSwap8(P,V,C) -#define __TBB_FetchAndAddW(P,V) __TBB_FetchAndAdd8(P,V) -#endif - -#endif /* !defined(__TBB_CompareAndSwap4) || !defined(__TBB_CompareAndSwap8) */ diff --git a/deal.II/bundled/tbb30_104oss/include/tbb/machine/sunos_sparc.h b/deal.II/bundled/tbb30_104oss/include/tbb/machine/sunos_sparc.h deleted file mode 100644 index f6176894e4..0000000000 --- a/deal.II/bundled/tbb30_104oss/include/tbb/machine/sunos_sparc.h +++ /dev/null @@ -1,228 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - - -#ifndef __TBB_machine_H -#error Do not include this file directly; include tbb_machine.h instead -#endif - -#include -#include - -#define __TBB_WORDSIZE 8 -#define __TBB_BIG_ENDIAN 1 - -#define __TBB_release_consistency_helper() __asm__ __volatile__ ("": : :"memory") -#define __TBB_full_memory_fence() __asm__ __volatile__("membar #LoadLoad|#LoadStore|#StoreStore|#StoreLoad": : : "memory") - -//-------------------------------------------------- -// Compare and swap -//-------------------------------------------------- - -/** - * Atomic CAS for 32 bit values, if *ptr==comparand, then *ptr=value, returns *ptr - * @param ptr pointer to value in memory to be swapped with value if *ptr==comparand - * @param value value to assign *ptr to if *ptr==comparand - * @param comparand value to compare with *ptr - ( @return value originally in memory at ptr, regardless of success -*/ -static inline int32_t __TBB_machine_cmpswp4(volatile void *ptr, int32_t value, int32_t comparand ){ - int32_t result; - __asm__ __volatile__( - "cas\t[%5],%4,%1" - : "=m"(*(int32_t *)ptr), "=r"(result) - : "m"(*(int32_t *)ptr), "1"(value), "r"(comparand), "r"(ptr) - : "memory"); - return result; -} - -/** - * Atomic CAS for 64 bit values, if *ptr==comparand, then *ptr=value, returns *ptr - * @param ptr pointer to value in memory to be swapped with value if *ptr==comparand - * @param value value to assign *ptr to if *ptr==comparand - * @param comparand value to compare with *ptr - ( @return value originally in memory at ptr, regardless of success - */ -static inline int64_t __TBB_machine_cmpswp8(volatile void *ptr, int64_t value, int64_t comparand ){ - int64_t result; - __asm__ __volatile__( - "casx\t[%5],%4,%1" - : "=m"(*(int64_t *)ptr), "=r"(result) - : "m"(*(int64_t *)ptr), "1"(value), "r"(comparand), "r"(ptr) - : "memory"); - return result; -} - -//--------------------------------------------------- -// Fetch and add -//--------------------------------------------------- - -/** - * Atomic fetch and add for 32 bit values, in this case implemented by continuously checking success of atomicity - * @param ptr pointer to value to add addend to - * @param addened value to add to *ptr - * @return value at ptr before addened was added - */ -static inline int32_t __TBB_machine_fetchadd4(volatile void *ptr, int32_t addend){ - int32_t result; - __asm__ __volatile__ ( - "0:\t add\t %3, %4, %0\n" // do addition - "\t cas\t [%2], %3, %0\n" // cas to store result in memory - "\t cmp\t %3, %0\n" // check if value from memory is original - "\t bne,a,pn\t %%icc, 0b\n" // if not try again - "\t mov %0, %3\n" // use branch delay slot to move new value in memory to be added - : "=&r"(result), "=m"(*(int32_t *)ptr) - : "r"(ptr), "r"(*(int32_t *)ptr), "r"(addend), "m"(*(int32_t *)ptr) - : "ccr", "memory"); - return result; -} - -/** - * Atomic fetch and add for 64 bit values, in this case implemented by continuously checking success of atomicity - * @param ptr pointer to value to add addend to - * @param addened value to add to *ptr - * @return value at ptr before addened was added - */ -static inline int64_t __TBB_machine_fetchadd8(volatile void *ptr, int64_t addend){ - int64_t result; - __asm__ __volatile__ ( - "0:\t add\t %3, %4, %0\n" // do addition - "\t casx\t [%2], %3, %0\n" // cas to store result in memory - "\t cmp\t %3, %0\n" // check if value from memory is original - "\t bne,a,pn\t %%xcc, 0b\n" // if not try again - "\t mov %0, %3\n" // use branch delay slot to move new value in memory to be added - : "=&r"(result), "=m"(*(int64_t *)ptr) - : "r"(ptr), "r"(*(int64_t *)ptr), "r"(addend), "m"(*(int64_t *)ptr) - : "ccr", "memory"); - return result; -} - -//-------------------------------------------------------- -// Logarithm (base two, integer) -//-------------------------------------------------------- - -static inline int64_t __TBB_machine_lg( uint64_t x ) { - uint64_t count; - // one hot encode - x |= (x >> 1); - x |= (x >> 2); - x |= (x >> 4); - x |= (x >> 8); - x |= (x >> 16); - x |= (x >> 32); - // count 1's - __asm__ ("popc %1, %0" : "=r"(count) : "r"(x) ); - return count-1; -} - -//-------------------------------------------------------- - -static inline void __TBB_machine_or( volatile void *ptr, uint64_t addend ) { - __asm__ __volatile__ ( - "0:\t or\t %2, %3, %%g1\n" // do addition - "\t casx\t [%1], %2, %%g1\n" // cas to store result in memory - "\t cmp\t %2, %%g1\n" // check if value from memory is original - "\t bne,a,pn\t %%xcc, 0b\n" // if not try again - "\t mov %%g1, %2\n" // use branch delay slot to move new value in memory to be added - : "=m"(*(int64_t *)ptr) - : "r"(ptr), "r"(*(int64_t *)ptr), "r"(addend), "m"(*(int64_t *)ptr) - : "ccr", "g1", "memory"); -} - -static inline void __TBB_machine_and( volatile void *ptr, uint64_t addend ) { - __asm__ __volatile__ ( - "0:\t and\t %2, %3, %%g1\n" // do addition - "\t casx\t [%1], %2, %%g1\n" // cas to store result in memory - "\t cmp\t %2, %%g1\n" // check if value from memory is original - "\t bne,a,pn\t %%xcc, 0b\n" // if not try again - "\t mov %%g1, %2\n" // use branch delay slot to move new value in memory to be added - : "=m"(*(int64_t *)ptr) - : "r"(ptr), "r"(*(int64_t *)ptr), "r"(addend), "m"(*(int64_t *)ptr) - : "ccr", "g1", "memory"); -} - - -static inline void __TBB_machine_pause( int32_t delay ) { - // do nothing, inlined, doesnt matter -} - -// put 0xff in memory location, return memory value, -// generic trylockbyte puts 0x01, however this is fine -// because all that matters is that 0 is unlocked -static inline bool __TBB_machine_trylockbyte(unsigned char &flag){ - unsigned char result; - __asm__ __volatile__ ( - "ldstub\t [%2], %0\n" - : "=r"(result), "=m"(flag) - : "r"(&flag), "m"(flag) - : "memory"); - return result == 0; -} - - -// Machine specific atomic operations - -//#define __TBB_CompareAndSwap1(P,V,C) __TBB_machine_cmpswp1(P,V,C) // use generic version in tbb_machine.h -//#define __TBB_CompareAndSwap2(P,V,C) __TBB_machine_cmpswp2(P,V,C) // use generic version in tbb_machine.h -#define __TBB_CompareAndSwap4(P,V,C) __TBB_machine_cmpswp4(P,V,C) -#define __TBB_CompareAndSwap8(P,V,C) __TBB_machine_cmpswp8(P,V,C) -#define __TBB_CompareAndSwapW(P,V,C) __TBB_machine_cmpswp8(P,V,C) - -//#define __TBB_FetchAndAdd1(P,V) __TBB_machine_fetchadd1(P,V) // use generic version in tbb_machine.h -//#define __TBB_FetchAndAdd2(P,V) __TBB_machine_fetchadd2(P,V) // use generic version in tbb_machine.h -#define __TBB_FetchAndAdd4(P,V) __TBB_machine_fetchadd4(P,V) -#define __TBB_FetchAndAdd8(P,V) __TBB_machine_fetchadd8(P,V) -#define __TBB_FetchAndAddW(P,V) __TBB_machine_fetchadd8(P,V) - -// use generic version in tbb_machine.h -//#define __TBB_FetchAndStore1(P,V) __TBB_machine_fetchstore1(P,V) -//#define __TBB_FetchAndStore2(P,V) __TBB_machine_fetchstore2(P,V) -//#define __TBB_FetchAndStore4(P,V) __TBB_machine_fetchstore4(P,V) -//#define __TBB_FetchAndStore8(P,V) __TBB_machine_fetchstore8(P,V) -//#define __TBB_FetchAndStoreW(P,V) __TBB_machine_fetchstore8(P,V) - -#undef __TBB_Store8 -#undef __TBB_Load8 - -#define __TBB_AtomicOR(P,V) __TBB_machine_or(P,V) -#define __TBB_AtomicAND(P,V) __TBB_machine_and(P,V) - -// Definition of other functions -#define __TBB_Pause(V) __TBB_machine_pause(V) -#define __TBB_Log2(V) __TBB_machine_lg(V) - -// Special atomic functions -#define __TBB_FetchAndAddWrelease(P,V) __TBB_FetchAndAddW(P,V) -#define __TBB_FetchAndIncrementWacquire(P) __TBB_FetchAndAddW(P,1) -#define __TBB_FetchAndDecrementWrelease(P) __TBB_FetchAndAddW(P,-1) - -// Definition of Lock functions -// Repeatedly runs TryLockByte, no need to implement -#undef __TBB_LockByte - -#define __TBB_TryLockByte(P) __TBB_machine_trylockbyte(P) diff --git a/deal.II/bundled/tbb30_104oss/include/tbb/machine/windows_api.h b/deal.II/bundled/tbb30_104oss/include/tbb/machine/windows_api.h deleted file mode 100644 index 072dc0f74f..0000000000 --- a/deal.II/bundled/tbb30_104oss/include/tbb/machine/windows_api.h +++ /dev/null @@ -1,56 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_machine_windows_api_H -#define __TBB_machine_windows_api_H - -#if _WIN32 || _WIN64 - -#if _XBOX - -#define NONET -#define NOD3D -#include - -#else // Assume "usual" Windows - -#include - -#endif // _XBOX - -#if !defined(_WIN32_WINNT) -// The following Windows API function is declared explicitly; -// otherwise any user would have to specify /D_WIN32_WINNT=0x0400 -extern "C" BOOL WINAPI TryEnterCriticalSection( LPCRITICAL_SECTION ); -#endif - -#else -#error tbb/machine/windows_api.h should only be used for Windows based platforms -#endif // _WIN32 || _WIN64 - -#endif // __TBB_machine_windows_api_H diff --git a/deal.II/bundled/tbb30_104oss/include/tbb/machine/windows_ia32.h b/deal.II/bundled/tbb30_104oss/include/tbb/machine/windows_ia32.h deleted file mode 100644 index f4afd7cdbf..0000000000 --- a/deal.II/bundled/tbb30_104oss/include/tbb/machine/windows_ia32.h +++ /dev/null @@ -1,200 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_machine_H -#error Do not include this file directly; include tbb_machine.h instead -#endif - -#if defined(__INTEL_COMPILER) -#define __TBB_release_consistency_helper() __asm { __asm nop } -#elif _MSC_VER >= 1300 -extern "C" void _ReadWriteBarrier(); -#pragma intrinsic(_ReadWriteBarrier) -#define __TBB_release_consistency_helper() _ReadWriteBarrier() -#else -#error Unsupported compiler - need to define __TBB_release_consistency_helper to support it -#endif - -#define __TBB_full_memory_fence() __asm { __asm mfence } - -#define __TBB_WORDSIZE 4 -#define __TBB_BIG_ENDIAN 0 - -#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) - // Workaround for overzealous compiler warnings in /Wp64 mode - #pragma warning (push) - #pragma warning (disable: 4244 4267) -#endif - -extern "C" { - __int64 __TBB_EXPORTED_FUNC __TBB_machine_cmpswp8 (volatile void *ptr, __int64 value, __int64 comparand ); - __int64 __TBB_EXPORTED_FUNC __TBB_machine_fetchadd8 (volatile void *ptr, __int64 addend ); - __int64 __TBB_EXPORTED_FUNC __TBB_machine_fetchstore8 (volatile void *ptr, __int64 value ); - void __TBB_EXPORTED_FUNC __TBB_machine_store8 (volatile void *ptr, __int64 value ); - __int64 __TBB_EXPORTED_FUNC __TBB_machine_load8 (const volatile void *ptr); -} - -#define __TBB_DEFINE_ATOMICS(S,T,U,A,C) \ -static inline T __TBB_machine_cmpswp##S ( volatile void * ptr, U value, U comparand ) { \ - T result; \ - volatile T *p = (T *)ptr; \ - __TBB_release_consistency_helper(); \ - __asm \ - { \ - __asm mov edx, p \ - __asm mov C , value \ - __asm mov A , comparand \ - __asm lock cmpxchg [edx], C \ - __asm mov result, A \ - } \ - __TBB_release_consistency_helper(); \ - return result; \ -} \ -\ -static inline T __TBB_machine_fetchadd##S ( volatile void * ptr, U addend ) { \ - T result; \ - volatile T *p = (T *)ptr; \ - __TBB_release_consistency_helper(); \ - __asm \ - { \ - __asm mov edx, p \ - __asm mov A, addend \ - __asm lock xadd [edx], A \ - __asm mov result, A \ - } \ - __TBB_release_consistency_helper(); \ - return result; \ -}\ -\ -static inline T __TBB_machine_fetchstore##S ( volatile void * ptr, U value ) { \ - T result; \ - volatile T *p = (T *)ptr; \ - __TBB_release_consistency_helper(); \ - __asm \ - { \ - __asm mov edx, p \ - __asm mov A, value \ - __asm lock xchg [edx], A \ - __asm mov result, A \ - } \ - __TBB_release_consistency_helper(); \ - return result; \ -} - -__TBB_DEFINE_ATOMICS(1, __int8, __int8, al, cl) -__TBB_DEFINE_ATOMICS(2, __int16, __int16, ax, cx) -__TBB_DEFINE_ATOMICS(4, __int32, __int32, eax, ecx) -__TBB_DEFINE_ATOMICS(W, ptrdiff_t, ptrdiff_t, eax, ecx) - -static inline __int32 __TBB_machine_lg( unsigned __int64 i ) { - unsigned __int32 j; - __asm - { - bsr eax, i - mov j, eax - } - return j; -} - -static inline void __TBB_machine_OR( volatile void *operand, __int32 addend ) { - __asm - { - mov eax, addend - mov edx, [operand] - lock or [edx], eax - } -} - -static inline void __TBB_machine_AND( volatile void *operand, __int32 addend ) { - __asm - { - mov eax, addend - mov edx, [operand] - lock and [edx], eax - } -} - -static inline void __TBB_machine_pause (__int32 delay ) { - _asm - { - mov eax, delay - L1: - pause - add eax, -1 - jne L1 - } - return; -} - -#define __TBB_CompareAndSwap1(P,V,C) __TBB_machine_cmpswp1(P,V,C) -#define __TBB_CompareAndSwap2(P,V,C) __TBB_machine_cmpswp2(P,V,C) -#define __TBB_CompareAndSwap4(P,V,C) __TBB_machine_cmpswp4(P,V,C) -#define __TBB_CompareAndSwap8(P,V,C) __TBB_machine_cmpswp8(P,V,C) -#define __TBB_CompareAndSwapW(P,V,C) __TBB_machine_cmpswpW(P,V,C) - -#define __TBB_FetchAndAdd1(P,V) __TBB_machine_fetchadd1(P,V) -#define __TBB_FetchAndAdd2(P,V) __TBB_machine_fetchadd2(P,V) -#define __TBB_FetchAndAdd4(P,V) __TBB_machine_fetchadd4(P,V) -#define __TBB_FetchAndAdd8(P,V) __TBB_machine_fetchadd8(P,V) -#define __TBB_FetchAndAddW(P,V) __TBB_machine_fetchaddW(P,V) - -#define __TBB_FetchAndStore1(P,V) __TBB_machine_fetchstore1(P,V) -#define __TBB_FetchAndStore2(P,V) __TBB_machine_fetchstore2(P,V) -#define __TBB_FetchAndStore4(P,V) __TBB_machine_fetchstore4(P,V) -#define __TBB_FetchAndStore8(P,V) __TBB_machine_fetchstore8(P,V) -#define __TBB_FetchAndStoreW(P,V) __TBB_machine_fetchstoreW(P,V) - -// Should define this: -#define __TBB_Store8(P,V) __TBB_machine_store8(P,V) -#define __TBB_Load8(P) __TBB_machine_load8(P) -#define __TBB_AtomicOR(P,V) __TBB_machine_OR(P,V) -#define __TBB_AtomicAND(P,V) __TBB_machine_AND(P,V) - -// Definition of other functions -extern "C" __declspec(dllimport) int __stdcall SwitchToThread( void ); -#define __TBB_Yield() SwitchToThread() -#define __TBB_Pause(V) __TBB_machine_pause(V) -#define __TBB_Log2(V) __TBB_machine_lg(V) - -// Use generic definitions from tbb_machine.h -#undef __TBB_TryLockByte -#undef __TBB_LockByte - -#if defined(_MSC_VER)&&_MSC_VER<1400 - static inline void* __TBB_machine_get_current_teb () { - void* pteb; - __asm mov eax, fs:[0x18] - __asm mov pteb, eax - return pteb; - } -#endif - -#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) - #pragma warning (pop) -#endif // warnings 4244, 4267 are back - diff --git a/deal.II/bundled/tbb30_104oss/include/tbb/machine/windows_intel64.h b/deal.II/bundled/tbb30_104oss/include/tbb/machine/windows_intel64.h deleted file mode 100644 index 1da213ba53..0000000000 --- a/deal.II/bundled/tbb30_104oss/include/tbb/machine/windows_intel64.h +++ /dev/null @@ -1,132 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_machine_H -#error Do not include this file directly; include tbb_machine.h instead -#endif - -#include -#if !defined(__INTEL_COMPILER) -#pragma intrinsic(_InterlockedOr64) -#pragma intrinsic(_InterlockedAnd64) -#pragma intrinsic(_InterlockedCompareExchange) -#pragma intrinsic(_InterlockedCompareExchange64) -#pragma intrinsic(_InterlockedExchangeAdd) -#pragma intrinsic(_InterlockedExchangeAdd64) -#pragma intrinsic(_InterlockedExchange) -#pragma intrinsic(_InterlockedExchange64) -#endif /* !defined(__INTEL_COMPILER) */ - -#if defined(__INTEL_COMPILER) -#define __TBB_release_consistency_helper() __asm { __asm nop } -#define __TBB_full_memory_fence() __asm { __asm mfence } -#elif _MSC_VER >= 1300 -extern "C" void _ReadWriteBarrier(); -#pragma intrinsic(_ReadWriteBarrier) -#define __TBB_release_consistency_helper() _ReadWriteBarrier() -#pragma intrinsic(_mm_mfence) -#define __TBB_full_memory_fence() _mm_mfence() -#endif - -#define __TBB_WORDSIZE 8 -#define __TBB_BIG_ENDIAN 0 - -// ATTENTION: if you ever change argument types in machine-specific primitives, -// please take care of atomic_word<> specializations in tbb/atomic.h -extern "C" { - __int8 __TBB_EXPORTED_FUNC __TBB_machine_cmpswp1 (volatile void *ptr, __int8 value, __int8 comparand ); - __int8 __TBB_EXPORTED_FUNC __TBB_machine_fetchadd1 (volatile void *ptr, __int8 addend ); - __int8 __TBB_EXPORTED_FUNC __TBB_machine_fetchstore1 (volatile void *ptr, __int8 value ); - __int16 __TBB_EXPORTED_FUNC __TBB_machine_cmpswp2 (volatile void *ptr, __int16 value, __int16 comparand ); - __int16 __TBB_EXPORTED_FUNC __TBB_machine_fetchadd2 (volatile void *ptr, __int16 addend ); - __int16 __TBB_EXPORTED_FUNC __TBB_machine_fetchstore2 (volatile void *ptr, __int16 value ); - void __TBB_EXPORTED_FUNC __TBB_machine_pause (__int32 delay ); -} - - -#if !__INTEL_COMPILER -extern "C" unsigned char _BitScanReverse64( unsigned long* i, unsigned __int64 w ); -#pragma intrinsic(_BitScanReverse64) -#endif - -inline __int64 __TBB_machine_lg( unsigned __int64 i ) { -#if __INTEL_COMPILER - unsigned __int64 j; - __asm - { - bsr rax, i - mov j, rax - } -#else - unsigned long j; - _BitScanReverse64( &j, i ); -#endif - return j; -} - -inline void __TBB_machine_OR( volatile void *operand, intptr_t addend ) { - _InterlockedOr64((__int64*)operand, addend); -} - -inline void __TBB_machine_AND( volatile void *operand, intptr_t addend ) { - _InterlockedAnd64((__int64*)operand, addend); -} - -#define __TBB_CompareAndSwap1(P,V,C) __TBB_machine_cmpswp1(P,V,C) -#define __TBB_CompareAndSwap2(P,V,C) __TBB_machine_cmpswp2(P,V,C) -#define __TBB_CompareAndSwap4(P,V,C) _InterlockedCompareExchange( (long*) P , V , C ) -#define __TBB_CompareAndSwap8(P,V,C) _InterlockedCompareExchange64( (__int64*) P , V , C ) -#define __TBB_CompareAndSwapW(P,V,C) _InterlockedCompareExchange64( (__int64*) P , V , C ) - -#define __TBB_FetchAndAdd1(P,V) __TBB_machine_fetchadd1(P,V) -#define __TBB_FetchAndAdd2(P,V) __TBB_machine_fetchadd2(P,V) -#define __TBB_FetchAndAdd4(P,V) _InterlockedExchangeAdd((long*) P , V ) -#define __TBB_FetchAndAdd8(P,V) _InterlockedExchangeAdd64((__int64*) P , V ) -#define __TBB_FetchAndAddW(P,V) _InterlockedExchangeAdd64((__int64*) P , V ) - -#define __TBB_FetchAndStore1(P,V) __TBB_machine_fetchstore1(P,V) -#define __TBB_FetchAndStore2(P,V) __TBB_machine_fetchstore2(P,V) -#define __TBB_FetchAndStore4(P,V) _InterlockedExchange((long*) P , V ) -#define __TBB_FetchAndStore8(P,V) _InterlockedExchange64((__int64*) P , V ) -#define __TBB_FetchAndStoreW(P,V) _InterlockedExchange64((__int64*) P , V ) - -// Not used if wordsize == 8 -#undef __TBB_Store8 -#undef __TBB_Load8 - -#define __TBB_AtomicOR(P,V) __TBB_machine_OR(P,V) -#define __TBB_AtomicAND(P,V) __TBB_machine_AND(P,V) - -extern "C" __declspec(dllimport) int __stdcall SwitchToThread( void ); -#define __TBB_Yield() SwitchToThread() -#define __TBB_Pause(V) __TBB_machine_pause(V) -#define __TBB_Log2(V) __TBB_machine_lg(V) - -// Use generic definitions from tbb_machine.h -#undef __TBB_TryLockByte -#undef __TBB_LockByte diff --git a/deal.II/bundled/tbb30_104oss/include/tbb/machine/xbox360_ppc.h b/deal.II/bundled/tbb30_104oss/include/tbb/machine/xbox360_ppc.h deleted file mode 100644 index ed2529c4a6..0000000000 --- a/deal.II/bundled/tbb30_104oss/include/tbb/machine/xbox360_ppc.h +++ /dev/null @@ -1,121 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_machine_H -#error Do not include this file directly; include tbb_machine.h instead -#endif - -#define NONET -#define NOD3D -#include "xtl.h" -#include "ppcintrinsics.h" - -#if _MSC_VER >= 1300 -extern "C" void _MemoryBarrier(); -#pragma intrinsic(_MemoryBarrier) -#define __TBB_release_consistency_helper() _MemoryBarrier() -#endif - -#define __TBB_full_memory_fence() __sync() - -#define __TBB_WORDSIZE 4 -#define __TBB_BIG_ENDIAN 1 - -//todo: define __TBB_DECL_FENCED_ATOMICS and define acquire/release primitives to maximize performance - -typedef __int64 int64_t; //required for definition of Store8/Load8 in atomic.h -typedef unsigned char uint8_t; //same reason - -inline __int32 __TBB_machine_cmpswp4(volatile void *ptr, __int32 value, __int32 comparand ) -{ - __lwsync(); - __int32 result = InterlockedCompareExchange((volatile LONG*)ptr, value, comparand); - __lwsync(); - return result; -} - -inline __int64 __TBB_machine_cmpswp8(volatile void *ptr, __int64 value, __int64 comparand ) -{ - __lwsync(); - __int64 result = InterlockedCompareExchange64((volatile LONG64*)ptr, value, comparand); - __lwsync(); - return result; -} - -#pragma optimize( "", off ) -inline void __TBB_machine_pause (__int32 delay ) -{ - for (__int32 i=0; i> 0) & 1) + - ((__TBB_XBOX360_HARDWARE_THREAD_MASK >> 1) & 1) + - ((__TBB_XBOX360_HARDWARE_THREAD_MASK >> 2) & 1) + - ((__TBB_XBOX360_HARDWARE_THREAD_MASK >> 3) & 1) + - ((__TBB_XBOX360_HARDWARE_THREAD_MASK >> 4) & 1) + - ((__TBB_XBOX360_HARDWARE_THREAD_MASK >> 5) & 1) + 1; // +1 accomodates for the master thread -} - -static inline int __TBB_XBOX360_GetHardwareThreadIndex(int workerThreadIndex) -{ - workerThreadIndex %= __TBB_XBOX360_DetectNumberOfWorkers()-1; - int m = __TBB_XBOX360_HARDWARE_THREAD_MASK; - int index = 0; - int skipcount = workerThreadIndex; - while (true) - { - if ((m & 1)!=0) - { - if (skipcount==0) break; - skipcount--; - } - m >>= 1; - index++; - } - return index; -} - -#define __TBB_DetectNumberOfWorkers() __TBB_XBOX360_DetectNumberOfWorkers() diff --git a/deal.II/bundled/tbb30_104oss/include/tbb/mutex.h b/deal.II/bundled/tbb30_104oss/include/tbb/mutex.h deleted file mode 100644 index dfa806f230..0000000000 --- a/deal.II/bundled/tbb30_104oss/include/tbb/mutex.h +++ /dev/null @@ -1,240 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_mutex_H -#define __TBB_mutex_H - -#if _WIN32||_WIN64 -#include "machine/windows_api.h" -#else -#include -#endif /* _WIN32||_WIN64 */ - -#include -#include "aligned_space.h" -#include "tbb_stddef.h" -#include "tbb_profiling.h" - -namespace tbb { - -//! Wrapper around the platform's native reader-writer lock. -/** For testing purposes only. - @ingroup synchronization */ -class mutex { -public: - //! Construct unacquired mutex. - mutex() { -#if TBB_USE_ASSERT || TBB_USE_THREADING_TOOLS - internal_construct(); -#else - #if _WIN32||_WIN64 - InitializeCriticalSection(&impl); - #else - int error_code = pthread_mutex_init(&impl,NULL); - if( error_code ) - tbb::internal::handle_perror(error_code,"mutex: pthread_mutex_init failed"); - #endif /* _WIN32||_WIN64*/ -#endif /* TBB_USE_ASSERT */ - }; - - ~mutex() { -#if TBB_USE_ASSERT - internal_destroy(); -#else - #if _WIN32||_WIN64 - DeleteCriticalSection(&impl); - #else - pthread_mutex_destroy(&impl); - - #endif /* _WIN32||_WIN64 */ -#endif /* TBB_USE_ASSERT */ - }; - - class scoped_lock; - friend class scoped_lock; - - //! The scoped locking pattern - /** It helps to avoid the common problem of forgetting to release lock. - It also nicely provides the "node" for queuing locks. */ - class scoped_lock : internal::no_copy { - public: - //! Construct lock that has not acquired a mutex. - scoped_lock() : my_mutex(NULL) {}; - - //! Acquire lock on given mutex. - scoped_lock( mutex& mutex ) { - acquire( mutex ); - } - - //! Release lock (if lock is held). - ~scoped_lock() { - if( my_mutex ) - release(); - } - - //! Acquire lock on given mutex. - void acquire( mutex& mutex ) { -#if TBB_USE_ASSERT - internal_acquire(mutex); -#else - mutex.lock(); - my_mutex = &mutex; -#endif /* TBB_USE_ASSERT */ - } - - //! Try acquire lock on given mutex. - bool try_acquire( mutex& mutex ) { -#if TBB_USE_ASSERT - return internal_try_acquire (mutex); -#else - bool result = mutex.try_lock(); - if( result ) - my_mutex = &mutex; - return result; -#endif /* TBB_USE_ASSERT */ - } - - //! Release lock - void release() { -#if TBB_USE_ASSERT - internal_release (); -#else - my_mutex->unlock(); - my_mutex = NULL; -#endif /* TBB_USE_ASSERT */ - } - - private: - //! The pointer to the current mutex to work - mutex* my_mutex; - - //! All checks from acquire using mutex.state were moved here - void __TBB_EXPORTED_METHOD internal_acquire( mutex& m ); - - //! All checks from try_acquire using mutex.state were moved here - bool __TBB_EXPORTED_METHOD internal_try_acquire( mutex& m ); - - //! All checks from release using mutex.state were moved here - void __TBB_EXPORTED_METHOD internal_release(); - - friend class mutex; - }; - - // Mutex traits - static const bool is_rw_mutex = false; - static const bool is_recursive_mutex = false; - static const bool is_fair_mutex = false; - - // ISO C++0x compatibility methods - - //! Acquire lock - void lock() { -#if TBB_USE_ASSERT - aligned_space tmp; - new(tmp.begin()) scoped_lock(*this); -#else - #if _WIN32||_WIN64 - EnterCriticalSection(&impl); - #else - pthread_mutex_lock(&impl); - #endif /* _WIN32||_WIN64 */ -#endif /* TBB_USE_ASSERT */ - } - - //! Try acquiring lock (non-blocking) - /** Return true if lock acquired; false otherwise. */ - bool try_lock() { -#if TBB_USE_ASSERT - aligned_space tmp; - scoped_lock& s = *tmp.begin(); - s.my_mutex = NULL; - return s.internal_try_acquire(*this); -#else - #if _WIN32||_WIN64 - return TryEnterCriticalSection(&impl)!=0; - #else - return pthread_mutex_trylock(&impl)==0; - #endif /* _WIN32||_WIN64 */ -#endif /* TBB_USE_ASSERT */ - } - - //! Release lock - void unlock() { -#if TBB_USE_ASSERT - aligned_space tmp; - scoped_lock& s = *tmp.begin(); - s.my_mutex = this; - s.internal_release(); -#else - #if _WIN32||_WIN64 - LeaveCriticalSection(&impl); - #else - pthread_mutex_unlock(&impl); - #endif /* _WIN32||_WIN64 */ -#endif /* TBB_USE_ASSERT */ - } - - //! Return native_handle - #if _WIN32||_WIN64 - typedef LPCRITICAL_SECTION native_handle_type; - #else - typedef pthread_mutex_t* native_handle_type; - #endif - native_handle_type native_handle() { return (native_handle_type) &impl; } - - enum state_t { - INITIALIZED=0x1234, - DESTROYED=0x789A, - HELD=0x56CD - }; -private: -#if _WIN32||_WIN64 - CRITICAL_SECTION impl; - enum state_t state; -#else - pthread_mutex_t impl; -#endif /* _WIN32||_WIN64 */ - - //! All checks from mutex constructor using mutex.state were moved here - void __TBB_EXPORTED_METHOD internal_construct(); - - //! All checks from mutex destructor using mutex.state were moved here - void __TBB_EXPORTED_METHOD internal_destroy(); - -#if _WIN32||_WIN64 -public: - //! Set the internal state - void set_state( state_t to ) { state = to; } -#endif -}; - -__TBB_DEFINE_PROFILING_SET_NAME(mutex) - -} // namespace tbb - -#endif /* __TBB_mutex_H */ diff --git a/deal.II/bundled/tbb30_104oss/include/tbb/null_mutex.h b/deal.II/bundled/tbb30_104oss/include/tbb/null_mutex.h deleted file mode 100644 index 67aabd56ad..0000000000 --- a/deal.II/bundled/tbb30_104oss/include/tbb/null_mutex.h +++ /dev/null @@ -1,63 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_null_mutex_H -#define __TBB_null_mutex_H - -namespace tbb { - -//! A mutex which does nothing -/** A null_mutex does no operation and simulates success. - @ingroup synchronization */ -class null_mutex { - //! Deny assignment and copy construction - null_mutex( const null_mutex& ); - void operator=( const null_mutex& ); -public: - //! Represents acquisition of a mutex. - class scoped_lock { - public: - scoped_lock() {} - scoped_lock( null_mutex& ) {} - ~scoped_lock() {} - void acquire( null_mutex& ) {} - bool try_acquire( null_mutex& ) { return true; } - void release() {} - }; - - null_mutex() {} - - // Mutex traits - static const bool is_rw_mutex = false; - static const bool is_recursive_mutex = true; - static const bool is_fair_mutex = true; -}; - -} - -#endif /* __TBB_null_mutex_H */ diff --git a/deal.II/bundled/tbb30_104oss/include/tbb/null_rw_mutex.h b/deal.II/bundled/tbb30_104oss/include/tbb/null_rw_mutex.h deleted file mode 100644 index 24623896c7..0000000000 --- a/deal.II/bundled/tbb30_104oss/include/tbb/null_rw_mutex.h +++ /dev/null @@ -1,65 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_null_rw_mutex_H -#define __TBB_null_rw_mutex_H - -namespace tbb { - -//! A rw mutex which does nothing -/** A null_rw_mutex is a rw mutex that does nothing and simulates successful operation. - @ingroup synchronization */ -class null_rw_mutex { - //! Deny assignment and copy construction - null_rw_mutex( const null_rw_mutex& ); - void operator=( const null_rw_mutex& ); -public: - //! Represents acquisition of a mutex. - class scoped_lock { - public: - scoped_lock() {} - scoped_lock( null_rw_mutex& , bool = true ) {} - ~scoped_lock() {} - void acquire( null_rw_mutex& , bool = true ) {} - bool upgrade_to_writer() { return true; } - bool downgrade_to_reader() { return true; } - bool try_acquire( null_rw_mutex& , bool = true ) { return true; } - void release() {} - }; - - null_rw_mutex() {} - - // Mutex traits - static const bool is_rw_mutex = true; - static const bool is_recursive_mutex = true; - static const bool is_fair_mutex = true; -}; - -} - -#endif /* __TBB_null_rw_mutex_H */ diff --git a/deal.II/bundled/tbb30_104oss/include/tbb/parallel_do.h b/deal.II/bundled/tbb30_104oss/include/tbb/parallel_do.h deleted file mode 100644 index 6f91f72e4b..0000000000 --- a/deal.II/bundled/tbb30_104oss/include/tbb/parallel_do.h +++ /dev/null @@ -1,508 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_parallel_do_H -#define __TBB_parallel_do_H - -#include "task.h" -#include "aligned_space.h" -#include - -namespace tbb { - -//! @cond INTERNAL -namespace internal { - template class parallel_do_feeder_impl; - template class do_group_task; - - //! Strips its template type argument from 'cv' and '&' qualifiers - template - struct strip { typedef T type; }; - template - struct strip { typedef T type; }; - template - struct strip { typedef T type; }; - template - struct strip { typedef T type; }; - template - struct strip { typedef T type; }; - // Most of the compilers remove cv-qualifiers from non-reference function argument types. - // But unfortunately there are those that don't. - template - struct strip { typedef T type; }; - template - struct strip { typedef T type; }; - template - struct strip { typedef T type; }; -} // namespace internal -//! @endcond - -//! Class the user supplied algorithm body uses to add new tasks -/** \param Item Work item type **/ -template -class parallel_do_feeder: internal::no_copy -{ - parallel_do_feeder() {} - virtual ~parallel_do_feeder () {} - virtual void internal_add( const Item& item ) = 0; - template friend class internal::parallel_do_feeder_impl; -public: - //! Add a work item to a running parallel_do. - void add( const Item& item ) {internal_add(item);} -}; - -//! @cond INTERNAL -namespace internal { - //! For internal use only. - /** Selects one of the two possible forms of function call member operator. - @ingroup algorithms **/ - template - class parallel_do_operator_selector - { - typedef parallel_do_feeder Feeder; - template - static void internal_call( const Body& obj, A1& arg1, A2&, void (Body::*)(CvItem) const ) { - obj(arg1); - } - template - static void internal_call( const Body& obj, A1& arg1, A2& arg2, void (Body::*)(CvItem, parallel_do_feeder&) const ) { - obj(arg1, arg2); - } - - public: - template - static void call( const Body& obj, A1& arg1, A2& arg2 ) - { - internal_call( obj, arg1, arg2, &Body::operator() ); - } - }; - - //! For internal use only. - /** Executes one iteration of a do. - @ingroup algorithms */ - template - class do_iteration_task: public task - { - typedef parallel_do_feeder_impl feeder_type; - - Item my_value; - feeder_type& my_feeder; - - do_iteration_task( const Item& value, feeder_type& feeder ) : - my_value(value), my_feeder(feeder) - {} - - /*override*/ - task* execute() - { - parallel_do_operator_selector::call(*my_feeder.my_body, my_value, my_feeder); - return NULL; - } - - template friend class parallel_do_feeder_impl; - }; // class do_iteration_task - - template - class do_iteration_task_iter: public task - { - typedef parallel_do_feeder_impl feeder_type; - - Iterator my_iter; - feeder_type& my_feeder; - - do_iteration_task_iter( const Iterator& iter, feeder_type& feeder ) : - my_iter(iter), my_feeder(feeder) - {} - - /*override*/ - task* execute() - { - parallel_do_operator_selector::call(*my_feeder.my_body, *my_iter, my_feeder); - return NULL; - } - - template friend class do_group_task_forward; - template friend class do_group_task_input; - template friend class do_task_iter; - }; // class do_iteration_task_iter - - //! For internal use only. - /** Implements new task adding procedure. - @ingroup algorithms **/ - template - class parallel_do_feeder_impl : public parallel_do_feeder - { - /*override*/ - void internal_add( const Item& item ) - { - typedef do_iteration_task iteration_type; - - iteration_type& t = *new (task::allocate_additional_child_of(*my_barrier)) iteration_type(item, *this); - - t.spawn( t ); - } - public: - const Body* my_body; - empty_task* my_barrier; - - parallel_do_feeder_impl() - { - my_barrier = new( task::allocate_root() ) empty_task(); - __TBB_ASSERT(my_barrier, "root task allocation failed"); - } - -#if __TBB_TASK_GROUP_CONTEXT - parallel_do_feeder_impl(tbb::task_group_context &context) - { - my_barrier = new( task::allocate_root(context) ) empty_task(); - __TBB_ASSERT(my_barrier, "root task allocation failed"); - } -#endif - - ~parallel_do_feeder_impl() - { - my_barrier->destroy(*my_barrier); - } - }; // class parallel_do_feeder_impl - - - //! For internal use only - /** Unpacks a block of iterations. - @ingroup algorithms */ - - template - class do_group_task_forward: public task - { - static const size_t max_arg_size = 4; - - typedef parallel_do_feeder_impl feeder_type; - - feeder_type& my_feeder; - Iterator my_first; - size_t my_size; - - do_group_task_forward( Iterator first, size_t size, feeder_type& feeder ) - : my_feeder(feeder), my_first(first), my_size(size) - {} - - /*override*/ task* execute() - { - typedef do_iteration_task_iter iteration_type; - __TBB_ASSERT( my_size>0, NULL ); - task_list list; - task* t; - size_t k=0; - for(;;) { - t = new( allocate_child() ) iteration_type( my_first, my_feeder ); - ++my_first; - if( ++k==my_size ) break; - list.push_back(*t); - } - set_ref_count(int(k+1)); - spawn(list); - spawn_and_wait_for_all(*t); - return NULL; - } - - template friend class do_task_iter; - }; // class do_group_task_forward - - template - class do_group_task_input: public task - { - static const size_t max_arg_size = 4; - - typedef parallel_do_feeder_impl feeder_type; - - feeder_type& my_feeder; - size_t my_size; - aligned_space my_arg; - - do_group_task_input( feeder_type& feeder ) - : my_feeder(feeder), my_size(0) - {} - - /*override*/ task* execute() - { - typedef do_iteration_task_iter iteration_type; - __TBB_ASSERT( my_size>0, NULL ); - task_list list; - task* t; - size_t k=0; - for(;;) { - t = new( allocate_child() ) iteration_type( my_arg.begin() + k, my_feeder ); - if( ++k==my_size ) break; - list.push_back(*t); - } - set_ref_count(int(k+1)); - spawn(list); - spawn_and_wait_for_all(*t); - return NULL; - } - - ~do_group_task_input(){ - for( size_t k=0; k~Item(); - } - - template friend class do_task_iter; - }; // class do_group_task_input - - //! For internal use only. - /** Gets block of iterations and packages them into a do_group_task. - @ingroup algorithms */ - template - class do_task_iter: public task - { - typedef parallel_do_feeder_impl feeder_type; - - public: - do_task_iter( Iterator first, Iterator last , feeder_type& feeder ) : - my_first(first), my_last(last), my_feeder(feeder) - {} - - private: - Iterator my_first; - Iterator my_last; - feeder_type& my_feeder; - - /* Do not merge run(xxx) and run_xxx() methods. They are separated in order - to make sure that compilers will eliminate unused argument of type xxx - (that is will not put it on stack). The sole purpose of this argument - is overload resolution. - - An alternative could be using template functions, but explicit specialization - of member function templates is not supported for non specialized class - templates. Besides template functions would always fall back to the least - efficient variant (the one for input iterators) in case of iterators having - custom tags derived from basic ones. */ - /*override*/ task* execute() - { - typedef typename std::iterator_traits::iterator_category iterator_tag; - return run( (iterator_tag*)NULL ); - } - - /** This is the most restricted variant that operates on input iterators or - iterators with unknown tags (tags not derived from the standard ones). **/ - inline task* run( void* ) { return run_for_input_iterator(); } - - task* run_for_input_iterator() { - typedef do_group_task_input block_type; - - block_type& t = *new( allocate_additional_child_of(*my_feeder.my_barrier) ) block_type(my_feeder); - size_t k=0; - while( !(my_first == my_last) ) { - new (t.my_arg.begin() + k) Item(*my_first); - ++my_first; - if( ++k==block_type::max_arg_size ) { - if ( !(my_first == my_last) ) - recycle_to_reexecute(); - break; - } - } - if( k==0 ) { - destroy(t); - return NULL; - } else { - t.my_size = k; - return &t; - } - } - - inline task* run( std::forward_iterator_tag* ) { return run_for_forward_iterator(); } - - task* run_for_forward_iterator() { - typedef do_group_task_forward block_type; - - Iterator first = my_first; - size_t k=0; - while( !(my_first==my_last) ) { - ++my_first; - if( ++k==block_type::max_arg_size ) { - if ( !(my_first==my_last) ) - recycle_to_reexecute(); - break; - } - } - return k==0 ? NULL : new( allocate_additional_child_of(*my_feeder.my_barrier) ) block_type(first, k, my_feeder); - } - - inline task* run( std::random_access_iterator_tag* ) { return run_for_random_access_iterator(); } - - task* run_for_random_access_iterator() { - typedef do_group_task_forward block_type; - typedef do_iteration_task_iter iteration_type; - - size_t k = static_cast(my_last-my_first); - if( k > block_type::max_arg_size ) { - Iterator middle = my_first + k/2; - - empty_task& c = *new( allocate_continuation() ) empty_task; - do_task_iter& b = *new( c.allocate_child() ) do_task_iter(middle, my_last, my_feeder); - recycle_as_child_of(c); - - my_last = middle; - c.set_ref_count(2); - c.spawn(b); - return this; - }else if( k != 0 ) { - task_list list; - task* t; - size_t k1=0; - for(;;) { - t = new( allocate_child() ) iteration_type(my_first, my_feeder); - ++my_first; - if( ++k1==k ) break; - list.push_back(*t); - } - set_ref_count(int(k+1)); - spawn(list); - spawn_and_wait_for_all(*t); - } - return NULL; - } - }; // class do_task_iter - - //! For internal use only. - /** Implements parallel iteration over a range. - @ingroup algorithms */ - template - void run_parallel_do( Iterator first, Iterator last, const Body& body -#if __TBB_TASK_GROUP_CONTEXT - , task_group_context& context -#endif - ) - { - typedef do_task_iter root_iteration_task; -#if __TBB_TASK_GROUP_CONTEXT - parallel_do_feeder_impl feeder(context); -#else - parallel_do_feeder_impl feeder; -#endif - feeder.my_body = &body; - - root_iteration_task &t = *new( feeder.my_barrier->allocate_child() ) root_iteration_task(first, last, feeder); - - feeder.my_barrier->set_ref_count(2); - feeder.my_barrier->spawn_and_wait_for_all(t); - } - - //! For internal use only. - /** Detects types of Body's operator function arguments. - @ingroup algorithms **/ - template - void select_parallel_do( Iterator first, Iterator last, const Body& body, void (Body::*)(Item) const -#if __TBB_TASK_GROUP_CONTEXT - , task_group_context& context -#endif // __TBB_TASK_GROUP_CONTEXT - ) - { - run_parallel_do::type>( first, last, body -#if __TBB_TASK_GROUP_CONTEXT - , context -#endif // __TBB_TASK_GROUP_CONTEXT - ); - } - - //! For internal use only. - /** Detects types of Body's operator function arguments. - @ingroup algorithms **/ - template - void select_parallel_do( Iterator first, Iterator last, const Body& body, void (Body::*)(Item, parallel_do_feeder<_Item>&) const -#if __TBB_TASK_GROUP_CONTEXT - , task_group_context& context -#endif // __TBB_TASK_GROUP_CONTEXT - ) - { - run_parallel_do::type>( first, last, body -#if __TBB_TASK_GROUP_CONTEXT - , context -#endif // __TBB_TASK_GROUP_CONTEXT - ); - } - -} // namespace internal -//! @endcond - - -/** \page parallel_do_body_req Requirements on parallel_do body - Class \c Body implementing the concept of parallel_do body must define: - - \code - B::operator()( - cv_item_type item, - parallel_do_feeder& feeder - ) const - - OR - - B::operator()( cv_item_type& item ) const - \endcode Process item. - May be invoked concurrently for the same \c this but different \c item. - - - \code item_type( const item_type& ) \endcode - Copy a work item. - - \code ~item_type() \endcode Destroy a work item -**/ - -/** \name parallel_do - See also requirements on \ref parallel_do_body_req "parallel_do Body". **/ -//@{ -//! Parallel iteration over a range, with optional addition of more work. -/** @ingroup algorithms */ -template -void parallel_do( Iterator first, Iterator last, const Body& body ) -{ - if ( first == last ) - return; -#if __TBB_TASK_GROUP_CONTEXT - task_group_context context; -#endif // __TBB_TASK_GROUP_CONTEXT - internal::select_parallel_do( first, last, body, &Body::operator() -#if __TBB_TASK_GROUP_CONTEXT - , context -#endif // __TBB_TASK_GROUP_CONTEXT - ); -} - -#if __TBB_TASK_GROUP_CONTEXT -//! Parallel iteration over a range, with optional addition of more work and user-supplied context -/** @ingroup algorithms */ -template -void parallel_do( Iterator first, Iterator last, const Body& body, task_group_context& context ) -{ - if ( first == last ) - return; - internal::select_parallel_do( first, last, body, &Body::operator(), context ); -} -#endif // __TBB_TASK_GROUP_CONTEXT - -//@} - -} // namespace - -#endif /* __TBB_parallel_do_H */ diff --git a/deal.II/bundled/tbb30_104oss/include/tbb/parallel_for.h b/deal.II/bundled/tbb30_104oss/include/tbb/parallel_for.h deleted file mode 100644 index 3cb911aa53..0000000000 --- a/deal.II/bundled/tbb30_104oss/include/tbb/parallel_for.h +++ /dev/null @@ -1,241 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_parallel_for_H -#define __TBB_parallel_for_H - -#include "task.h" -#include "partitioner.h" -#include "blocked_range.h" -#include -#include "tbb_exception.h" - -namespace tbb { - -//! @cond INTERNAL -namespace internal { - - //! Task type used in parallel_for - /** @ingroup algorithms */ - template - class start_for: public task { - Range my_range; - const Body my_body; - typename Partitioner::partition_type my_partition; - /*override*/ task* execute(); - - //! Constructor for root task. - start_for( const Range& range, const Body& body, Partitioner& partitioner ) : - my_range(range), - my_body(body), - my_partition(partitioner) - { - } - //! Splitting constructor used to generate children. - /** this becomes left child. Newly constructed object is right child. */ - start_for( start_for& parent_, split ) : - my_range(parent_.my_range,split()), - my_body(parent_.my_body), - my_partition(parent_.my_partition,split()) - { - my_partition.set_affinity(*this); - } - //! Update affinity info, if any. - /*override*/ void note_affinity( affinity_id id ) { - my_partition.note_affinity( id ); - } - public: - static void run( const Range& range, const Body& body, const Partitioner& partitioner ) { - if( !range.empty() ) { -#if !__TBB_TASK_GROUP_CONTEXT || TBB_JOIN_OUTER_TASK_GROUP - start_for& a = *new(task::allocate_root()) start_for(range,body,const_cast(partitioner)); -#else - // Bound context prevents exceptions from body to affect nesting or sibling algorithms, - // and allows users to handle exceptions safely by wrapping parallel_for in the try-block. - task_group_context context; - start_for& a = *new(task::allocate_root(context)) start_for(range,body,const_cast(partitioner)); -#endif /* __TBB_TASK_GROUP_CONTEXT && !TBB_JOIN_OUTER_TASK_GROUP */ - task::spawn_root_and_wait(a); - } - } -#if __TBB_TASK_GROUP_CONTEXT - static void run( const Range& range, const Body& body, const Partitioner& partitioner, task_group_context& context ) { - if( !range.empty() ) { - start_for& a = *new(task::allocate_root(context)) start_for(range,body,const_cast(partitioner)); - task::spawn_root_and_wait(a); - } - } -#endif /* __TBB_TASK_GROUP_CONTEXT */ - }; - - template - task* start_for::execute() { - if( !my_range.is_divisible() || my_partition.should_execute_range(*this) ) { - my_body( my_range ); - return my_partition.continue_after_execute_range(); - } else { - empty_task& c = *new( this->allocate_continuation() ) empty_task; - recycle_as_child_of(c); - c.set_ref_count(2); - bool delay = my_partition.decide_whether_to_delay(); - start_for& b = *new( c.allocate_child() ) start_for(*this,split()); - my_partition.spawn_or_delay(delay,b); - return this; - } - } -} // namespace internal -//! @endcond - - -// Requirements on Range concept are documented in blocked_range.h - -/** \page parallel_for_body_req Requirements on parallel_for body - Class \c Body implementing the concept of parallel_for body must define: - - \code Body::Body( const Body& ); \endcode Copy constructor - - \code Body::~Body(); \endcode Destructor - - \code void Body::operator()( Range& r ) const; \endcode Function call operator applying the body to range \c r. -**/ - -/** \name parallel_for - See also requirements on \ref range_req "Range" and \ref parallel_for_body_req "parallel_for Body". **/ -//@{ - -//! Parallel iteration over range with default partitioner. -/** @ingroup algorithms **/ -template -void parallel_for( const Range& range, const Body& body ) { - internal::start_for::run(range,body,__TBB_DEFAULT_PARTITIONER()); -} - -//! Parallel iteration over range with simple partitioner. -/** @ingroup algorithms **/ -template -void parallel_for( const Range& range, const Body& body, const simple_partitioner& partitioner ) { - internal::start_for::run(range,body,partitioner); -} - -//! Parallel iteration over range with auto_partitioner. -/** @ingroup algorithms **/ -template -void parallel_for( const Range& range, const Body& body, const auto_partitioner& partitioner ) { - internal::start_for::run(range,body,partitioner); -} - -//! Parallel iteration over range with affinity_partitioner. -/** @ingroup algorithms **/ -template -void parallel_for( const Range& range, const Body& body, affinity_partitioner& partitioner ) { - internal::start_for::run(range,body,partitioner); -} - -#if __TBB_TASK_GROUP_CONTEXT -//! Parallel iteration over range with simple partitioner and user-supplied context. -/** @ingroup algorithms **/ -template -void parallel_for( const Range& range, const Body& body, const simple_partitioner& partitioner, task_group_context& context ) { - internal::start_for::run(range, body, partitioner, context); -} - -//! Parallel iteration over range with auto_partitioner and user-supplied context. -/** @ingroup algorithms **/ -template -void parallel_for( const Range& range, const Body& body, const auto_partitioner& partitioner, task_group_context& context ) { - internal::start_for::run(range, body, partitioner, context); -} - -//! Parallel iteration over range with affinity_partitioner and user-supplied context. -/** @ingroup algorithms **/ -template -void parallel_for( const Range& range, const Body& body, affinity_partitioner& partitioner, task_group_context& context ) { - internal::start_for::run(range,body,partitioner, context); -} -#endif /* __TBB_TASK_GROUP_CONTEXT */ -//@} - -//! @cond INTERNAL -namespace internal { - //! Calls the function with values from range [begin, end) with a step provided -template -class parallel_for_body : internal::no_assign { - const Function &my_func; - const Index my_begin; - const Index my_step; -public: - parallel_for_body( const Function& _func, Index& _begin, Index& _step) - : my_func(_func), my_begin(_begin), my_step(_step) {} - - void operator()( tbb::blocked_range& r ) const { - for( Index i = r.begin(), k = my_begin + i * my_step; i < r.end(); i++, k = k + my_step) - my_func( k ); - } -}; -} // namespace internal -//! @endcond - -namespace strict_ppl { - -//@{ -//! Parallel iteration over a range of integers with a step provided -template -void parallel_for(Index first, Index last, Index step, const Function& f) { - tbb::task_group_context context; - parallel_for(first, last, step, f, context); -} -template -void parallel_for(Index first, Index last, Index step, const Function& f, tbb::task_group_context &context) { - if (step <= 0 ) - internal::throw_exception(internal::eid_nonpositive_step); // throws std::invalid_argument - else if (last > first) { - // Above "else" avoids "potential divide by zero" warning on some platforms - Index end = (last - first - Index(1)) / step + Index(1); - tbb::blocked_range range(static_cast(0), end); - internal::parallel_for_body body(f, first, step); - tbb::parallel_for(range, body, tbb::auto_partitioner(), context); - } -} -//! Parallel iteration over a range of integers with a default step value -template -void parallel_for(Index first, Index last, const Function& f) { - tbb::task_group_context context; - parallel_for(first, last, static_cast(1), f, context); -} -template -void parallel_for(Index first, Index last, const Function& f, tbb::task_group_context &context) { - parallel_for(first, last, static_cast(1), f, context); -} - -//@} - -} // namespace strict_ppl - -using strict_ppl::parallel_for; - -} // namespace tbb - -#endif /* __TBB_parallel_for_H */ - diff --git a/deal.II/bundled/tbb30_104oss/include/tbb/parallel_for_each.h b/deal.II/bundled/tbb30_104oss/include/tbb/parallel_for_each.h deleted file mode 100644 index 6b8d862c07..0000000000 --- a/deal.II/bundled/tbb30_104oss/include/tbb/parallel_for_each.h +++ /dev/null @@ -1,77 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_parallel_for_each_H -#define __TBB_parallel_for_each_H - -#include "parallel_do.h" - -namespace tbb { - -//! @cond INTERNAL -namespace internal { - // The class calls user function in operator() - template - class parallel_for_each_body : internal::no_assign { - const Function &my_func; - public: - parallel_for_each_body(const Function &_func) : my_func(_func) {} - parallel_for_each_body(const parallel_for_each_body &_caller) : my_func(_caller.my_func) {} - - void operator() ( typename std::iterator_traits::value_type& value ) const { - my_func(value); - } - }; -} // namespace internal -//! @endcond - -/** \name parallel_for_each - **/ -//@{ -//! Calls function f for all items from [first, last) interval using user-supplied context -/** @ingroup algorithms */ -template -void parallel_for_each(InputIterator first, InputIterator last, const Function& f, task_group_context &context) { - internal::parallel_for_each_body body(f); - - tbb::parallel_do (first, last, body, context); -} - -//! Uses default context -template -void parallel_for_each(InputIterator first, InputIterator last, const Function& f) { - internal::parallel_for_each_body body(f); - - tbb::parallel_do (first, last, body); -} - -//@} - -} // namespace - -#endif /* __TBB_parallel_for_each_H */ diff --git a/deal.II/bundled/tbb30_104oss/include/tbb/parallel_invoke.h b/deal.II/bundled/tbb30_104oss/include/tbb/parallel_invoke.h deleted file mode 100644 index 02c3e80ef1..0000000000 --- a/deal.II/bundled/tbb30_104oss/include/tbb/parallel_invoke.h +++ /dev/null @@ -1,359 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_parallel_invoke_H -#define __TBB_parallel_invoke_H - -#include "task.h" - -namespace tbb { - -//! @cond INTERNAL -namespace internal { - // Simple task object, executing user method - template - class function_invoker : public task{ - public: - function_invoker(const function& _function) : my_function(_function) {} - private: - const function &my_function; - /*override*/ - task* execute() - { - my_function(); - return NULL; - } - }; - - // The class spawns two or three child tasks - template - class spawner : public task { - private: - const function1& my_func1; - const function2& my_func2; - const function3& my_func3; - bool is_recycled; - - task* execute (){ - if(is_recycled){ - return NULL; - }else{ - __TBB_ASSERT(N==2 || N==3, "Number of arguments passed to spawner is wrong"); - set_ref_count(N); - recycle_as_safe_continuation(); - internal::function_invoker* invoker2 = new (allocate_child()) internal::function_invoker(my_func2); - __TBB_ASSERT(invoker2, "Child task allocation failed"); - spawn(*invoker2); - size_t n = N; // To prevent compiler warnings - if (n>2) { - internal::function_invoker* invoker3 = new (allocate_child()) internal::function_invoker(my_func3); - __TBB_ASSERT(invoker3, "Child task allocation failed"); - spawn(*invoker3); - } - my_func1(); - is_recycled = true; - return NULL; - } - } // execute - - public: - spawner(const function1& _func1, const function2& _func2, const function3& _func3) : my_func1(_func1), my_func2(_func2), my_func3(_func3), is_recycled(false) {} - }; - - // Creates and spawns child tasks - class parallel_invoke_helper : public empty_task { - public: - // Dummy functor class - class parallel_invoke_noop { - public: - void operator() () const {} - }; - // Creates a helper object with user-defined number of children expected - parallel_invoke_helper(int number_of_children) - { - set_ref_count(number_of_children + 1); - } - // Adds child task and spawns it - template - void add_child (const function &_func) - { - internal::function_invoker* invoker = new (allocate_child()) internal::function_invoker(_func); - __TBB_ASSERT(invoker, "Child task allocation failed"); - spawn(*invoker); - } - - // Adds a task with multiple child tasks and spawns it - // two arguments - template - void add_children (const function1& _func1, const function2& _func2) - { - // The third argument is dummy, it is ignored actually. - parallel_invoke_noop noop; - internal::spawner<2, function1, function2, parallel_invoke_noop>& sub_root = *new(allocate_child())internal::spawner<2, function1, function2, parallel_invoke_noop>(_func1, _func2, noop); - spawn(sub_root); - } - // three arguments - template - void add_children (const function1& _func1, const function2& _func2, const function3& _func3) - { - internal::spawner<3, function1, function2, function3>& sub_root = *new(allocate_child())internal::spawner<3, function1, function2, function3>(_func1, _func2, _func3); - spawn(sub_root); - } - - // Waits for all child tasks - template - void run_and_finish(const F0& f0) - { - internal::function_invoker* invoker = new (allocate_child()) internal::function_invoker(f0); - __TBB_ASSERT(invoker, "Child task allocation failed"); - spawn_and_wait_for_all(*invoker); - } - }; - // The class destroys root if exception occured as well as in normal case - class parallel_invoke_cleaner: internal::no_copy { - public: - parallel_invoke_cleaner(int number_of_children, tbb::task_group_context& context) : root(*new(task::allocate_root(context)) internal::parallel_invoke_helper(number_of_children)) - {} - ~parallel_invoke_cleaner(){ - root.destroy(root); - } - internal::parallel_invoke_helper& root; - }; -} // namespace internal -//! @endcond - -/** \name parallel_invoke - **/ -//@{ -//! Executes a list of tasks in parallel and waits for all tasks to complete. -/** @ingroup algorithms */ - -// parallel_invoke with user-defined context -// two arguments -template -void parallel_invoke(const F0& f0, const F1& f1, tbb::task_group_context& context) { - internal::parallel_invoke_cleaner cleaner(2, context); - internal::parallel_invoke_helper& root = cleaner.root; - - root.add_child(f1); - - root.run_and_finish(f0); -} - -// three arguments -template -void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, tbb::task_group_context& context) { - internal::parallel_invoke_cleaner cleaner(3, context); - internal::parallel_invoke_helper& root = cleaner.root; - - root.add_child(f2); - root.add_child(f1); - - root.run_and_finish(f0); -} - -// four arguments -template -void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, - tbb::task_group_context& context) -{ - internal::parallel_invoke_cleaner cleaner(4, context); - internal::parallel_invoke_helper& root = cleaner.root; - - root.add_child(f3); - root.add_child(f2); - root.add_child(f1); - - root.run_and_finish(f0); -} - -// five arguments -template -void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4, - tbb::task_group_context& context) -{ - internal::parallel_invoke_cleaner cleaner(3, context); - internal::parallel_invoke_helper& root = cleaner.root; - - root.add_children(f4, f3); - root.add_children(f2, f1); - - root.run_and_finish(f0); -} - -// six arguments -template -void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4, const F5& f5, - tbb::task_group_context& context) -{ - internal::parallel_invoke_cleaner cleaner(3, context); - internal::parallel_invoke_helper& root = cleaner.root; - - root.add_children(f5, f4, f3); - root.add_children(f2, f1); - - root.run_and_finish(f0); -} - -// seven arguments -template -void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4, - const F5& f5, const F6& f6, - tbb::task_group_context& context) -{ - internal::parallel_invoke_cleaner cleaner(3, context); - internal::parallel_invoke_helper& root = cleaner.root; - - root.add_children(f6, f5, f4); - root.add_children(f3, f2, f1); - - root.run_and_finish(f0); -} - -// eight arguments -template -void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4, - const F5& f5, const F6& f6, const F7& f7, - tbb::task_group_context& context) -{ - internal::parallel_invoke_cleaner cleaner(4, context); - internal::parallel_invoke_helper& root = cleaner.root; - - root.add_children(f7, f6, f5); - root.add_children(f4, f3); - root.add_children(f2, f1); - - root.run_and_finish(f0); -} - -// nine arguments -template -void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4, - const F5& f5, const F6& f6, const F7& f7, const F8& f8, - tbb::task_group_context& context) -{ - internal::parallel_invoke_cleaner cleaner(4, context); - internal::parallel_invoke_helper& root = cleaner.root; - - root.add_children(f8, f7, f6); - root.add_children(f5, f4, f3); - root.add_children(f2, f1); - - root.run_and_finish(f0); -} - -// ten arguments -template -void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4, - const F5& f5, const F6& f6, const F7& f7, const F8& f8, const F9& f9, - tbb::task_group_context& context) -{ - internal::parallel_invoke_cleaner cleaner(4, context); - internal::parallel_invoke_helper& root = cleaner.root; - - root.add_children(f9, f8, f7); - root.add_children(f6, f5, f4); - root.add_children(f3, f2, f1); - - root.run_and_finish(f0); -} - -// two arguments -template -void parallel_invoke(const F0& f0, const F1& f1) { - task_group_context context; - parallel_invoke(f0, f1, context); -} -// three arguments -template -void parallel_invoke(const F0& f0, const F1& f1, const F2& f2) { - task_group_context context; - parallel_invoke(f0, f1, f2, context); -} -// four arguments -template -void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3) { - task_group_context context; - parallel_invoke(f0, f1, f2, f3, context); -} -// five arguments -template -void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4) { - task_group_context context; - parallel_invoke(f0, f1, f2, f3, f4, context); -} -// six arguments -template -void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4, const F5& f5) { - task_group_context context; - parallel_invoke(f0, f1, f2, f3, f4, f5, context); -} -// seven arguments -template -void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4, - const F5& f5, const F6& f6) -{ - task_group_context context; - parallel_invoke(f0, f1, f2, f3, f4, f5, f6, context); -} -// eigth arguments -template -void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4, - const F5& f5, const F6& f6, const F7& f7) -{ - task_group_context context; - parallel_invoke(f0, f1, f2, f3, f4, f5, f6, f7, context); -} -// nine arguments -template -void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4, - const F5& f5, const F6& f6, const F7& f7, const F8& f8) -{ - task_group_context context; - parallel_invoke(f0, f1, f2, f3, f4, f5, f6, f7, f8, context); -} -// ten arguments -template -void parallel_invoke(const F0& f0, const F1& f1, const F2& f2, const F3& f3, const F4& f4, - const F5& f5, const F6& f6, const F7& f7, const F8& f8, const F9& f9) -{ - task_group_context context; - parallel_invoke(f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, context); -} - -//@} - -} // namespace - -#endif /* __TBB_parallel_invoke_H */ diff --git a/deal.II/bundled/tbb30_104oss/include/tbb/parallel_reduce.h b/deal.II/bundled/tbb30_104oss/include/tbb/parallel_reduce.h deleted file mode 100644 index 670b626de4..0000000000 --- a/deal.II/bundled/tbb30_104oss/include/tbb/parallel_reduce.h +++ /dev/null @@ -1,387 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_parallel_reduce_H -#define __TBB_parallel_reduce_H - -#include "task.h" -#include "aligned_space.h" -#include "partitioner.h" -#include - -namespace tbb { - -//! @cond INTERNAL -namespace internal { - - //! ITT instrumented routine that stores src into location pointed to by dst. - void __TBB_EXPORTED_FUNC itt_store_pointer_with_release_v3( void* dst, void* src ); - - //! ITT instrumented routine that loads pointer from location pointed to by src. - void* __TBB_EXPORTED_FUNC itt_load_pointer_with_acquire_v3( const void* src ); - - template inline void parallel_reduce_store_body( T*& dst, T* src ) { -#if TBB_USE_THREADING_TOOLS - itt_store_pointer_with_release_v3(&dst,src); -#else - __TBB_store_with_release(dst,src); -#endif /* TBB_USE_THREADING_TOOLS */ - } - - template inline T* parallel_reduce_load_body( T*& src ) { -#if TBB_USE_THREADING_TOOLS - return static_cast(itt_load_pointer_with_acquire_v3(&src)); -#else - return __TBB_load_with_acquire(src); -#endif /* TBB_USE_THREADING_TOOLS */ - } - - //! 0 if root, 1 if a left child, 2 if a right child. - /** Represented as a char, not enum, for compactness. */ - typedef char reduction_context; - - //! Task type use to combine the partial results of parallel_reduce. - /** @ingroup algorithms */ - template - class finish_reduce: public task { - //! Pointer to body, or NULL if the left child has not yet finished. - Body* my_body; - bool has_right_zombie; - const reduction_context my_context; - aligned_space zombie_space; - finish_reduce( char context_ ) : - my_body(NULL), - has_right_zombie(false), - my_context(context_) - { - } - task* execute() { - if( has_right_zombie ) { - // Right child was stolen. - Body* s = zombie_space.begin(); - my_body->join( *s ); - s->~Body(); - } - if( my_context==1 ) - parallel_reduce_store_body( static_cast(parent())->my_body, my_body ); - return NULL; - } - template - friend class start_reduce; - }; - - //! Task type used to split the work of parallel_reduce. - /** @ingroup algorithms */ - template - class start_reduce: public task { - typedef finish_reduce finish_type; - Body* my_body; - Range my_range; - typename Partitioner::partition_type my_partition; - reduction_context my_context; - /*override*/ task* execute(); - template - friend class finish_reduce; - - //! Constructor used for root task - start_reduce( const Range& range, Body* body, Partitioner& partitioner ) : - my_body(body), - my_range(range), - my_partition(partitioner), - my_context(0) - { - } - //! Splitting constructor used to generate children. - /** this becomes left child. Newly constructed object is right child. */ - start_reduce( start_reduce& parent_, split ) : - my_body(parent_.my_body), - my_range(parent_.my_range,split()), - my_partition(parent_.my_partition,split()), - my_context(2) - { - my_partition.set_affinity(*this); - parent_.my_context = 1; - } - //! Update affinity info, if any - /*override*/ void note_affinity( affinity_id id ) { - my_partition.note_affinity( id ); - } - -public: - static void run( const Range& range, Body& body, Partitioner& partitioner ) { - if( !range.empty() ) { -#if !__TBB_TASK_GROUP_CONTEXT || TBB_JOIN_OUTER_TASK_GROUP - task::spawn_root_and_wait( *new(task::allocate_root()) start_reduce(range,&body,partitioner) ); -#else - // Bound context prevents exceptions from body to affect nesting or sibling algorithms, - // and allows users to handle exceptions safely by wrapping parallel_for in the try-block. - task_group_context context; - task::spawn_root_and_wait( *new(task::allocate_root(context)) start_reduce(range,&body,partitioner) ); -#endif /* __TBB_TASK_GROUP_CONTEXT && !TBB_JOIN_OUTER_TASK_GROUP */ - } - } -#if __TBB_TASK_GROUP_CONTEXT - static void run( const Range& range, Body& body, Partitioner& partitioner, task_group_context& context ) { - if( !range.empty() ) - task::spawn_root_and_wait( *new(task::allocate_root(context)) start_reduce(range,&body,partitioner) ); - } -#endif /* __TBB_TASK_GROUP_CONTEXT */ - }; - - template - task* start_reduce::execute() { - if( my_context==2 ) { - finish_type* p = static_cast(parent() ); - if( !parallel_reduce_load_body(p->my_body) ) { - my_body = new( p->zombie_space.begin() ) Body(*my_body,split()); - p->has_right_zombie = true; - } - } - if( !my_range.is_divisible() || my_partition.should_execute_range(*this) ) { - (*my_body)( my_range ); - if( my_context==1 ) - parallel_reduce_store_body(static_cast(parent())->my_body, my_body ); - return my_partition.continue_after_execute_range(); - } else { - finish_type& c = *new( allocate_continuation()) finish_type(my_context); - recycle_as_child_of(c); - c.set_ref_count(2); - bool delay = my_partition.decide_whether_to_delay(); - start_reduce& b = *new( c.allocate_child() ) start_reduce(*this,split()); - my_partition.spawn_or_delay(delay,b); - return this; - } - } - - //! Auxiliary class for parallel_reduce; for internal use only. - /** The adaptor class that implements \ref parallel_reduce_body_req "parallel_reduce Body" - using given \ref parallel_reduce_lambda_req "anonymous function objects". - **/ - /** @ingroup algorithms */ - template - class lambda_reduce_body { - -//FIXME: decide if my_real_body, my_reduction, and identity_element should be copied or referenced -// (might require some performance measurements) - - const Value& identity_element; - const RealBody& my_real_body; - const Reduction& my_reduction; - Value my_value; - lambda_reduce_body& operator= ( const lambda_reduce_body& other ); - public: - lambda_reduce_body( const Value& identity, const RealBody& body, const Reduction& reduction ) - : identity_element(identity) - , my_real_body(body) - , my_reduction(reduction) - , my_value(identity) - { } - lambda_reduce_body( const lambda_reduce_body& other ) - : identity_element(other.identity_element) - , my_real_body(other.my_real_body) - , my_reduction(other.my_reduction) - , my_value(other.my_value) - { } - lambda_reduce_body( lambda_reduce_body& other, tbb::split ) - : identity_element(other.identity_element) - , my_real_body(other.my_real_body) - , my_reduction(other.my_reduction) - , my_value(other.identity_element) - { } - void operator()(Range& range) { - my_value = my_real_body(range, const_cast(my_value)); - } - void join( lambda_reduce_body& rhs ) { - my_value = my_reduction(const_cast(my_value), const_cast(rhs.my_value)); - } - Value result() const { - return my_value; - } - }; - -} // namespace internal -//! @endcond - -// Requirements on Range concept are documented in blocked_range.h - -/** \page parallel_reduce_body_req Requirements on parallel_reduce body - Class \c Body implementing the concept of parallel_reduce body must define: - - \code Body::Body( Body&, split ); \endcode Splitting constructor. - Must be able to run concurrently with operator() and method \c join - - \code Body::~Body(); \endcode Destructor - - \code void Body::operator()( Range& r ); \endcode Function call operator applying body to range \c r - and accumulating the result - - \code void Body::join( Body& b ); \endcode Join results. - The result in \c b should be merged into the result of \c this -**/ - -/** \page parallel_reduce_lambda_req Requirements on parallel_reduce anonymous function objects (lambda functions) - TO BE DOCUMENTED -**/ - -/** \name parallel_reduce - See also requirements on \ref range_req "Range" and \ref parallel_reduce_body_req "parallel_reduce Body". **/ -//@{ - -//! Parallel iteration with reduction and default partitioner. -/** @ingroup algorithms **/ -template -void parallel_reduce( const Range& range, Body& body ) { - internal::start_reduce::run( range, body, __TBB_DEFAULT_PARTITIONER() ); -} - -//! Parallel iteration with reduction and simple_partitioner -/** @ingroup algorithms **/ -template -void parallel_reduce( const Range& range, Body& body, const simple_partitioner& partitioner ) { - internal::start_reduce::run( range, body, partitioner ); -} - -//! Parallel iteration with reduction and auto_partitioner -/** @ingroup algorithms **/ -template -void parallel_reduce( const Range& range, Body& body, const auto_partitioner& partitioner ) { - internal::start_reduce::run( range, body, partitioner ); -} - -//! Parallel iteration with reduction and affinity_partitioner -/** @ingroup algorithms **/ -template -void parallel_reduce( const Range& range, Body& body, affinity_partitioner& partitioner ) { - internal::start_reduce::run( range, body, partitioner ); -} - -#if __TBB_TASK_GROUP_CONTEXT -//! Parallel iteration with reduction, simple partitioner and user-supplied context. -/** @ingroup algorithms **/ -template -void parallel_reduce( const Range& range, Body& body, const simple_partitioner& partitioner, task_group_context& context ) { - internal::start_reduce::run( range, body, partitioner, context ); -} - -//! Parallel iteration with reduction, auto_partitioner and user-supplied context -/** @ingroup algorithms **/ -template -void parallel_reduce( const Range& range, Body& body, const auto_partitioner& partitioner, task_group_context& context ) { - internal::start_reduce::run( range, body, partitioner, context ); -} - -//! Parallel iteration with reduction, affinity_partitioner and user-supplied context -/** @ingroup algorithms **/ -template -void parallel_reduce( const Range& range, Body& body, affinity_partitioner& partitioner, task_group_context& context ) { - internal::start_reduce::run( range, body, partitioner, context ); -} -#endif /* __TBB_TASK_GROUP_CONTEXT */ - -/** parallel_reduce overloads that work with anonymous function objects - (see also \ref parallel_reduce_lambda_req "requirements on parallel_reduce anonymous function objects"). **/ - -//! Parallel iteration with reduction and default partitioner. -/** @ingroup algorithms **/ -template -Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction ) { - internal::lambda_reduce_body body(identity, real_body, reduction); - internal::start_reduce,const __TBB_DEFAULT_PARTITIONER> - ::run(range, body, __TBB_DEFAULT_PARTITIONER() ); - return body.result(); -} - -//! Parallel iteration with reduction and simple_partitioner. -/** @ingroup algorithms **/ -template -Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction, - const simple_partitioner& partitioner ) { - internal::lambda_reduce_body body(identity, real_body, reduction); - internal::start_reduce,const simple_partitioner> - ::run(range, body, partitioner ); - return body.result(); -} - -//! Parallel iteration with reduction and auto_partitioner -/** @ingroup algorithms **/ -template -Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction, - const auto_partitioner& partitioner ) { - internal::lambda_reduce_body body(identity, real_body, reduction); - internal::start_reduce,const auto_partitioner> - ::run( range, body, partitioner ); - return body.result(); -} - -//! Parallel iteration with reduction and affinity_partitioner -/** @ingroup algorithms **/ -template -Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction, - affinity_partitioner& partitioner ) { - internal::lambda_reduce_body body(identity, real_body, reduction); - internal::start_reduce,affinity_partitioner> - ::run( range, body, partitioner ); - return body.result(); -} - -#if __TBB_TASK_GROUP_CONTEXT -//! Parallel iteration with reduction, simple partitioner and user-supplied context. -/** @ingroup algorithms **/ -template -Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction, - const simple_partitioner& partitioner, task_group_context& context ) { - internal::lambda_reduce_body body(identity, real_body, reduction); - internal::start_reduce,const simple_partitioner> - ::run( range, body, partitioner, context ); - return body.result(); -} - -//! Parallel iteration with reduction, auto_partitioner and user-supplied context -/** @ingroup algorithms **/ -template -Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction, - const auto_partitioner& partitioner, task_group_context& context ) { - internal::lambda_reduce_body body(identity, real_body, reduction); - internal::start_reduce,const auto_partitioner> - ::run( range, body, partitioner, context ); - return body.result(); -} - -//! Parallel iteration with reduction, affinity_partitioner and user-supplied context -/** @ingroup algorithms **/ -template -Value parallel_reduce( const Range& range, const Value& identity, const RealBody& real_body, const Reduction& reduction, - affinity_partitioner& partitioner, task_group_context& context ) { - internal::lambda_reduce_body body(identity, real_body, reduction); - internal::start_reduce,affinity_partitioner> - ::run( range, body, partitioner, context ); - return body.result(); -} -#endif /* __TBB_TASK_GROUP_CONTEXT */ -//@} - -} // namespace tbb - -#endif /* __TBB_parallel_reduce_H */ - diff --git a/deal.II/bundled/tbb30_104oss/include/tbb/parallel_scan.h b/deal.II/bundled/tbb30_104oss/include/tbb/parallel_scan.h deleted file mode 100644 index 3a1963f471..0000000000 --- a/deal.II/bundled/tbb30_104oss/include/tbb/parallel_scan.h +++ /dev/null @@ -1,351 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_parallel_scan_H -#define __TBB_parallel_scan_H - -#include "task.h" -#include "aligned_space.h" -#include -#include "partitioner.h" - -namespace tbb { - -//! Used to indicate that the initial scan is being performed. -/** @ingroup algorithms */ -struct pre_scan_tag { - static bool is_final_scan() {return false;} -}; - -//! Used to indicate that the final scan is being performed. -/** @ingroup algorithms */ -struct final_scan_tag { - static bool is_final_scan() {return true;} -}; - -//! @cond INTERNAL -namespace internal { - - //! Performs final scan for a leaf - /** @ingroup algorithms */ - template - class final_sum: public task { - public: - Body body; - private: - aligned_space range; - //! Where to put result of last subrange, or NULL if not last subrange. - Body* stuff_last; - public: - final_sum( Body& body_ ) : - body(body_,split()) - { - poison_pointer(stuff_last); - } - ~final_sum() { - range.begin()->~Range(); - } - void finish_construction( const Range& range_, Body* stuff_last_ ) { - new( range.begin() ) Range(range_); - stuff_last = stuff_last_; - } - private: - /*override*/ task* execute() { - body( *range.begin(), final_scan_tag() ); - if( stuff_last ) - stuff_last->assign(body); - return NULL; - } - }; - - //! Split work to be done in the scan. - /** @ingroup algorithms */ - template - class sum_node: public task { - typedef final_sum final_sum_type; - public: - final_sum_type *incoming; - final_sum_type *body; - Body *stuff_last; - private: - final_sum_type *left_sum; - sum_node *left; - sum_node *right; - bool left_is_final; - Range range; - sum_node( const Range range_, bool left_is_final_ ) : - left_sum(NULL), - left(NULL), - right(NULL), - left_is_final(left_is_final_), - range(range_) - { - // Poison fields that will be set by second pass. - poison_pointer(body); - poison_pointer(incoming); - } - task* create_child( const Range& range_, final_sum_type& f, sum_node* n, final_sum_type* incoming_, Body* stuff_last_ ) { - if( !n ) { - f.recycle_as_child_of( *this ); - f.finish_construction( range_, stuff_last_ ); - return &f; - } else { - n->body = &f; - n->incoming = incoming_; - n->stuff_last = stuff_last_; - return n; - } - } - /*override*/ task* execute() { - if( body ) { - if( incoming ) - left_sum->body.reverse_join( incoming->body ); - recycle_as_continuation(); - sum_node& c = *this; - task* b = c.create_child(Range(range,split()),*left_sum,right,left_sum,stuff_last); - task* a = left_is_final ? NULL : c.create_child(range,*body,left,incoming,NULL); - set_ref_count( (a!=NULL)+(b!=NULL) ); - body = NULL; - if( a ) spawn(*b); - else a = b; - return a; - } else { - return NULL; - } - } - template - friend class start_scan; - - template - friend class finish_scan; - }; - - //! Combine partial results - /** @ingroup algorithms */ - template - class finish_scan: public task { - typedef sum_node sum_node_type; - typedef final_sum final_sum_type; - final_sum_type** const sum; - sum_node_type*& return_slot; - public: - final_sum_type* right_zombie; - sum_node_type& result; - - /*override*/ task* execute() { - __TBB_ASSERT( result.ref_count()==(result.left!=NULL)+(result.right!=NULL), NULL ); - if( result.left ) - result.left_is_final = false; - if( right_zombie && sum ) - ((*sum)->body).reverse_join(result.left_sum->body); - __TBB_ASSERT( !return_slot, NULL ); - if( right_zombie || result.right ) { - return_slot = &result; - } else { - destroy( result ); - } - if( right_zombie && !sum && !result.right ) destroy(*right_zombie); - return NULL; - } - - finish_scan( sum_node_type*& return_slot_, final_sum_type** sum_, sum_node_type& result_ ) : - sum(sum_), - return_slot(return_slot_), - right_zombie(NULL), - result(result_) - { - __TBB_ASSERT( !return_slot, NULL ); - } - }; - - //! Initial task to split the work - /** @ingroup algorithms */ - template - class start_scan: public task { - typedef sum_node sum_node_type; - typedef final_sum final_sum_type; - final_sum_type* body; - /** Non-null if caller is requesting total. */ - final_sum_type** sum; - sum_node_type** return_slot; - /** Null if computing root. */ - sum_node_type* parent_sum; - bool is_final; - bool is_right_child; - Range range; - typename Partitioner::partition_type partition; - /*override*/ task* execute(); - public: - start_scan( sum_node_type*& return_slot_, start_scan& parent_, sum_node_type* parent_sum_ ) : - body(parent_.body), - sum(parent_.sum), - return_slot(&return_slot_), - parent_sum(parent_sum_), - is_final(parent_.is_final), - is_right_child(false), - range(parent_.range,split()), - partition(parent_.partition,split()) - { - __TBB_ASSERT( !*return_slot, NULL ); - } - - start_scan( sum_node_type*& return_slot_, const Range& range_, final_sum_type& body_, const Partitioner& partitioner_) : - body(&body_), - sum(NULL), - return_slot(&return_slot_), - parent_sum(NULL), - is_final(true), - is_right_child(false), - range(range_), - partition(partitioner_) - { - __TBB_ASSERT( !*return_slot, NULL ); - } - - static void run( const Range& range, Body& body, const Partitioner& partitioner ) { - if( !range.empty() ) { - typedef internal::start_scan start_pass1_type; - internal::sum_node* root = NULL; - typedef internal::final_sum final_sum_type; - final_sum_type* temp_body = new(task::allocate_root()) final_sum_type( body ); - start_pass1_type& pass1 = *new(task::allocate_root()) start_pass1_type( - /*return_slot=*/root, - range, - *temp_body, - partitioner ); - task::spawn_root_and_wait( pass1 ); - if( root ) { - root->body = temp_body; - root->incoming = NULL; - root->stuff_last = &body; - task::spawn_root_and_wait( *root ); - } else { - body.assign(temp_body->body); - temp_body->finish_construction( range, NULL ); - temp_body->destroy(*temp_body); - } - } - } - }; - - template - task* start_scan::execute() { - typedef internal::finish_scan finish_pass1_type; - finish_pass1_type* p = parent_sum ? static_cast( parent() ) : NULL; - // Inspecting p->result.left_sum would ordinarily be a race condition. - // But we inspect it only if we are not a stolen task, in which case we - // know that task assigning to p->result.left_sum has completed. - bool treat_as_stolen = is_right_child && (is_stolen_task() || body!=p->result.left_sum); - if( treat_as_stolen ) { - // Invocation is for right child that has been really stolen or needs to be virtually stolen - p->right_zombie = body = new( allocate_root() ) final_sum_type(body->body); - is_final = false; - } - task* next_task = NULL; - if( (is_right_child && !treat_as_stolen) || !range.is_divisible() || partition.should_execute_range(*this) ) { - if( is_final ) - (body->body)( range, final_scan_tag() ); - else if( sum ) - (body->body)( range, pre_scan_tag() ); - if( sum ) - *sum = body; - __TBB_ASSERT( !*return_slot, NULL ); - } else { - sum_node_type* result; - if( parent_sum ) - result = new(allocate_additional_child_of(*parent_sum)) sum_node_type(range,/*left_is_final=*/is_final); - else - result = new(task::allocate_root()) sum_node_type(range,/*left_is_final=*/is_final); - finish_pass1_type& c = *new( allocate_continuation()) finish_pass1_type(*return_slot,sum,*result); - // Split off right child - start_scan& b = *new( c.allocate_child() ) start_scan( /*return_slot=*/result->right, *this, result ); - b.is_right_child = true; - // Left child is recycling of *this. Must recycle this before spawning b, - // otherwise b might complete and decrement c.ref_count() to zero, which - // would cause c.execute() to run prematurely. - recycle_as_child_of(c); - c.set_ref_count(2); - c.spawn(b); - sum = &result->left_sum; - return_slot = &result->left; - is_right_child = false; - next_task = this; - parent_sum = result; - __TBB_ASSERT( !*return_slot, NULL ); - } - return next_task; - } -} // namespace internal -//! @endcond - -// Requirements on Range concept are documented in blocked_range.h - -/** \page parallel_scan_body_req Requirements on parallel_scan body - Class \c Body implementing the concept of parallel_reduce body must define: - - \code Body::Body( Body&, split ); \endcode Splitting constructor. - Split \c b so that \c this and \c b can accumulate separately - - \code Body::~Body(); \endcode Destructor - - \code void Body::operator()( const Range& r, pre_scan_tag ); \endcode - Preprocess iterations for range \c r - - \code void Body::operator()( const Range& r, final_scan_tag ); \endcode - Do final processing for iterations of range \c r - - \code void Body::reverse_join( Body& a ); \endcode - Merge preprocessing state of \c a into \c this, where \c a was - created earlier from \c b by b's splitting constructor -**/ - -/** \name parallel_scan - See also requirements on \ref range_req "Range" and \ref parallel_scan_body_req "parallel_scan Body". **/ -//@{ - -//! Parallel prefix with default partitioner -/** @ingroup algorithms **/ -template -void parallel_scan( const Range& range, Body& body ) { - internal::start_scan::run(range,body,__TBB_DEFAULT_PARTITIONER()); -} - -//! Parallel prefix with simple_partitioner -/** @ingroup algorithms **/ -template -void parallel_scan( const Range& range, Body& body, const simple_partitioner& partitioner ) { - internal::start_scan::run(range,body,partitioner); -} - -//! Parallel prefix with auto_partitioner -/** @ingroup algorithms **/ -template -void parallel_scan( const Range& range, Body& body, const auto_partitioner& partitioner ) { - internal::start_scan::run(range,body,partitioner); -} -//@} - -} // namespace tbb - -#endif /* __TBB_parallel_scan_H */ - diff --git a/deal.II/bundled/tbb30_104oss/include/tbb/parallel_sort.h b/deal.II/bundled/tbb30_104oss/include/tbb/parallel_sort.h deleted file mode 100644 index 6fbbe8073c..0000000000 --- a/deal.II/bundled/tbb30_104oss/include/tbb/parallel_sort.h +++ /dev/null @@ -1,227 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_parallel_sort_H -#define __TBB_parallel_sort_H - -#include "parallel_for.h" -#include "blocked_range.h" -#include -#include -#include - -namespace tbb { - -//! @cond INTERNAL -namespace internal { - -//! Range used in quicksort to split elements into subranges based on a value. -/** The split operation selects a splitter and places all elements less than or equal - to the value in the first range and the remaining elements in the second range. - @ingroup algorithms */ -template -class quick_sort_range: private no_assign { - - inline size_t median_of_three(const RandomAccessIterator &array, size_t l, size_t m, size_t r) const { - return comp(array[l], array[m]) ? ( comp(array[m], array[r]) ? m : ( comp( array[l], array[r]) ? r : l ) ) - : ( comp(array[r], array[m]) ? m : ( comp( array[r], array[l] ) ? r : l ) ); - } - - inline size_t pseudo_median_of_nine( const RandomAccessIterator &array, const quick_sort_range &range ) const { - size_t offset = range.size/8u; - return median_of_three(array, - median_of_three(array, 0, offset, offset*2), - median_of_three(array, offset*3, offset*4, offset*5), - median_of_three(array, offset*6, offset*7, range.size - 1) ); - - } - -public: - - static const size_t grainsize = 500; - const Compare ∁ - RandomAccessIterator begin; - size_t size; - - quick_sort_range( RandomAccessIterator begin_, size_t size_, const Compare &comp_ ) : - comp(comp_), begin(begin_), size(size_) {} - - bool empty() const {return size==0;} - bool is_divisible() const {return size>=grainsize;} - - quick_sort_range( quick_sort_range& range, split ) : comp(range.comp) { - RandomAccessIterator array = range.begin; - RandomAccessIterator key0 = range.begin; - size_t m = pseudo_median_of_nine(array, range); - if (m) std::swap ( array[0], array[m] ); - - size_t i=0; - size_t j=range.size; - // Partition interval [i+1,j-1] with key *key0. - for(;;) { - __TBB_ASSERT( i -class quick_sort_pretest_body : internal::no_assign { - const Compare ∁ - -public: - quick_sort_pretest_body(const Compare &_comp) : comp(_comp) {} - - void operator()( const blocked_range& range ) const { - task &my_task = task::self(); - RandomAccessIterator my_end = range.end(); - - int i = 0; - for (RandomAccessIterator k = range.begin(); k != my_end; ++k, ++i) { - if ( i%64 == 0 && my_task.is_cancelled() ) break; - - // The k-1 is never out-of-range because the first chunk starts at begin+serial_cutoff+1 - if ( comp( *(k), *(k-1) ) ) { - my_task.cancel_group_execution(); - break; - } - } - } - -}; - -//! Body class used to sort elements in a range that is smaller than the grainsize. -/** @ingroup algorithms */ -template -struct quick_sort_body { - void operator()( const quick_sort_range& range ) const { - //SerialQuickSort( range.begin, range.size, range.comp ); - std::sort( range.begin, range.begin + range.size, range.comp ); - } -}; - -//! Wrapper method to initiate the sort by calling parallel_for. -/** @ingroup algorithms */ -template -void parallel_quick_sort( RandomAccessIterator begin, RandomAccessIterator end, const Compare& comp ) { - task_group_context my_context; - const int serial_cutoff = 9; - - __TBB_ASSERT( begin + serial_cutoff < end, "min_parallel_size is smaller than serial cutoff?" ); - RandomAccessIterator k; - for ( k = begin ; k != begin + serial_cutoff; ++k ) { - if ( comp( *(k+1), *k ) ) { - goto do_parallel_quick_sort; - } - } - - parallel_for( blocked_range(k+1, end), - quick_sort_pretest_body(comp), - auto_partitioner(), - my_context); - - if (my_context.is_group_execution_cancelled()) -do_parallel_quick_sort: - parallel_for( quick_sort_range(begin, end-begin, comp ), - quick_sort_body(), - auto_partitioner() ); -} - -} // namespace internal -//! @endcond - -/** \page parallel_sort_iter_req Requirements on iterators for parallel_sort - Requirements on value type \c T of \c RandomAccessIterator for \c parallel_sort: - - \code void swap( T& x, T& y ) \endcode Swaps \c x and \c y - - \code bool Compare::operator()( const T& x, const T& y ) \endcode - True if x comes before y; -**/ - -/** \name parallel_sort - See also requirements on \ref parallel_sort_iter_req "iterators for parallel_sort". **/ -//@{ - -//! Sorts the data in [begin,end) using the given comparator -/** The compare function object is used for all comparisons between elements during sorting. - The compare object must define a bool operator() function. - @ingroup algorithms **/ -template -void parallel_sort( RandomAccessIterator begin, RandomAccessIterator end, const Compare& comp) { - const int min_parallel_size = 500; - if( end > begin ) { - if (end - begin < min_parallel_size) { - std::sort(begin, end, comp); - } else { - internal::parallel_quick_sort(begin, end, comp); - } - } -} - -//! Sorts the data in [begin,end) with a default comparator \c std::less -/** @ingroup algorithms **/ -template -inline void parallel_sort( RandomAccessIterator begin, RandomAccessIterator end ) { - parallel_sort( begin, end, std::less< typename std::iterator_traits::value_type >() ); -} - -//! Sorts the data in the range \c [begin,end) with a default comparator \c std::less -/** @ingroup algorithms **/ -template -inline void parallel_sort( T * begin, T * end ) { - parallel_sort( begin, end, std::less< T >() ); -} -//@} - - -} // namespace tbb - -#endif - diff --git a/deal.II/bundled/tbb30_104oss/include/tbb/parallel_while.h b/deal.II/bundled/tbb30_104oss/include/tbb/parallel_while.h deleted file mode 100644 index 21c2bc185b..0000000000 --- a/deal.II/bundled/tbb30_104oss/include/tbb/parallel_while.h +++ /dev/null @@ -1,194 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_parallel_while -#define __TBB_parallel_while - -#include "task.h" -#include - -namespace tbb { - -template -class parallel_while; - -//! @cond INTERNAL -namespace internal { - - template class while_task; - - //! For internal use only. - /** Executes one iteration of a while. - @ingroup algorithms */ - template - class while_iteration_task: public task { - const Body& my_body; - typename Body::argument_type my_value; - /*override*/ task* execute() { - my_body(my_value); - return NULL; - } - while_iteration_task( const typename Body::argument_type& value, const Body& body ) : - my_body(body), my_value(value) - {} - template friend class while_group_task; - friend class tbb::parallel_while; - }; - - //! For internal use only - /** Unpacks a block of iterations. - @ingroup algorithms */ - template - class while_group_task: public task { - static const size_t max_arg_size = 4; - const Body& my_body; - size_t size; - typename Body::argument_type my_arg[max_arg_size]; - while_group_task( const Body& body ) : my_body(body), size(0) {} - /*override*/ task* execute() { - typedef while_iteration_task iteration_type; - __TBB_ASSERT( size>0, NULL ); - task_list list; - task* t; - size_t k=0; - for(;;) { - t = new( allocate_child() ) iteration_type(my_arg[k],my_body); - if( ++k==size ) break; - list.push_back(*t); - } - set_ref_count(int(k+1)); - spawn(list); - spawn_and_wait_for_all(*t); - return NULL; - } - template friend class while_task; - }; - - //! For internal use only. - /** Gets block of iterations from a stream and packages them into a while_group_task. - @ingroup algorithms */ - template - class while_task: public task { - Stream& my_stream; - const Body& my_body; - empty_task& my_barrier; - /*override*/ task* execute() { - typedef while_group_task block_type; - block_type& t = *new( allocate_additional_child_of(my_barrier) ) block_type(my_body); - size_t k=0; - while( my_stream.pop_if_present(t.my_arg[k]) ) { - if( ++k==block_type::max_arg_size ) { - // There might be more iterations. - recycle_to_reexecute(); - break; - } - } - if( k==0 ) { - destroy(t); - return NULL; - } else { - t.size = k; - return &t; - } - } - while_task( Stream& stream, const Body& body, empty_task& barrier ) : - my_stream(stream), - my_body(body), - my_barrier(barrier) - {} - friend class tbb::parallel_while; - }; - -} // namespace internal -//! @endcond - -//! Parallel iteration over a stream, with optional addition of more work. -/** The Body b has the requirement: \n - "b(v)" \n - "b.argument_type" \n - where v is an argument_type - @ingroup algorithms */ -template -class parallel_while: internal::no_copy { -public: - //! Construct empty non-running parallel while. - parallel_while() : my_body(NULL), my_barrier(NULL) {} - - //! Destructor cleans up data members before returning. - ~parallel_while() { - if( my_barrier ) { - my_barrier->destroy(*my_barrier); - my_barrier = NULL; - } - } - - //! Type of items - typedef typename Body::argument_type value_type; - - //! Apply body.apply to each item in the stream. - /** A Stream s has the requirements \n - "S::value_type" \n - "s.pop_if_present(value) is convertible to bool */ - template - void run( Stream& stream, const Body& body ); - - //! Add a work item while running. - /** Should be executed only by body.apply or a thread spawned therefrom. */ - void add( const value_type& item ); - -private: - const Body* my_body; - empty_task* my_barrier; -}; - -template -template -void parallel_while::run( Stream& stream, const Body& body ) { - using namespace internal; - empty_task& barrier = *new( task::allocate_root() ) empty_task(); - my_body = &body; - my_barrier = &barrier; - my_barrier->set_ref_count(2); - while_task& w = *new( my_barrier->allocate_child() ) while_task( stream, body, barrier ); - my_barrier->spawn_and_wait_for_all(w); - my_barrier->destroy(*my_barrier); - my_barrier = NULL; - my_body = NULL; -} - -template -void parallel_while::add( const value_type& item ) { - __TBB_ASSERT(my_barrier,"attempt to add to parallel_while that is not running"); - typedef internal::while_iteration_task iteration_type; - iteration_type& i = *new( task::allocate_additional_child_of(*my_barrier) ) iteration_type(item,*my_body); - task::self().spawn( i ); -} - -} // namespace - -#endif /* __TBB_parallel_while */ diff --git a/deal.II/bundled/tbb30_104oss/include/tbb/partitioner.h b/deal.II/bundled/tbb30_104oss/include/tbb/partitioner.h deleted file mode 100644 index 98db3ac4dc..0000000000 --- a/deal.II/bundled/tbb30_104oss/include/tbb/partitioner.h +++ /dev/null @@ -1,228 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_partitioner_H -#define __TBB_partitioner_H - -#include "task.h" - -namespace tbb { -class affinity_partitioner; - -//! @cond INTERNAL -namespace internal { -size_t __TBB_EXPORTED_FUNC get_initial_auto_partitioner_divisor(); - -//! Defines entry points into tbb run-time library; -/** The entry points are the constructor and destructor. */ -class affinity_partitioner_base_v3: no_copy { - friend class tbb::affinity_partitioner; - //! Array that remembers affinities of tree positions to affinity_id. - /** NULL if my_size==0. */ - affinity_id* my_array; - //! Number of elements in my_array. - size_t my_size; - //! Zeros the fields. - affinity_partitioner_base_v3() : my_array(NULL), my_size(0) {} - //! Deallocates my_array. - ~affinity_partitioner_base_v3() {resize(0);} - //! Resize my_array. - /** Retains values if resulting size is the same. */ - void __TBB_EXPORTED_METHOD resize( unsigned factor ); - friend class affinity_partition_type; -}; - -//! Provides default methods for partition objects without affinity. -class partition_type_base { -public: - void set_affinity( task & ) {} - void note_affinity( task::affinity_id ) {} - task* continue_after_execute_range() {return NULL;} - bool decide_whether_to_delay() {return false;} - void spawn_or_delay( bool, task& b ) { - task::spawn(b); - } -}; - -class affinity_partition_type; - -template class start_for; -template class start_reduce; -template class start_reduce_with_affinity; -template class start_scan; - -} // namespace internal -//! @endcond - -//! A simple partitioner -/** Divides the range until the range is not divisible. - @ingroup algorithms */ -class simple_partitioner { -public: - simple_partitioner() {} -private: - template friend class internal::start_for; - template friend class internal::start_reduce; - template friend class internal::start_scan; - - class partition_type: public internal::partition_type_base { - public: - bool should_execute_range(const task& ) {return false;} - partition_type( const simple_partitioner& ) {} - partition_type( const partition_type&, split ) {} - }; -}; - -//! An auto partitioner -/** The range is initial divided into several large chunks. - Chunks are further subdivided into VICTIM_CHUNKS pieces if they are stolen and divisible. - @ingroup algorithms */ -class auto_partitioner { -public: - auto_partitioner() {} - -private: - template friend class internal::start_for; - template friend class internal::start_reduce; - template friend class internal::start_scan; - - class partition_type: public internal::partition_type_base { - size_t num_chunks; - static const size_t VICTIM_CHUNKS = 4; -public: - bool should_execute_range(const task &t) { - if( num_chunks friend class internal::start_for; - template friend class internal::start_reduce; - template friend class internal::start_reduce_with_affinity; - template friend class internal::start_scan; - - typedef internal::affinity_partition_type partition_type; - friend class internal::affinity_partition_type; -}; - -//! @cond INTERNAL -namespace internal { - -class affinity_partition_type: public no_copy { - //! Must be power of two - static const unsigned factor = 16; - static const size_t VICTIM_CHUNKS = 4; - - internal::affinity_id* my_array; - task_list delay_list; - unsigned map_begin, map_end; - size_t num_chunks; -public: - affinity_partition_type( affinity_partitioner& ap ) { - __TBB_ASSERT( (factor&(factor-1))==0, "factor must be power of two" ); - ap.resize(factor); - my_array = ap.my_array; - map_begin = 0; - map_end = unsigned(ap.my_size); - num_chunks = internal::get_initial_auto_partitioner_divisor(); - } - affinity_partition_type(affinity_partition_type& p, split) : my_array(p.my_array) { - __TBB_ASSERT( p.map_end-p.map_beginfactor ) - d &= 0u-factor; - map_end = e; - map_begin = p.map_end = e-d; - } - - bool should_execute_range(const task &t) { - if( num_chunks < VICTIM_CHUNKS && t.is_stolen_task() ) - num_chunks = VICTIM_CHUNKS; - return num_chunks == 1; - } - - void set_affinity( task &t ) { - if( map_begin - -namespace tbb { - -class pipeline; -class filter; - -//! @cond INTERNAL -namespace internal { - -// The argument for PIPELINE_VERSION should be an integer between 2 and 9 -#define __TBB_PIPELINE_VERSION(x) (unsigned char)(x-2)<<1 - -typedef unsigned long Token; -typedef long tokendiff_t; -class stage_task; -class input_buffer; -class pipeline_root_task; -class pipeline_cleaner; - -} // namespace internal - -namespace interface5 { - template class filter_t; - - namespace internal { - class pipeline_proxy; - } -} - -//! @endcond - -//! A stage in a pipeline. -/** @ingroup algorithms */ -class filter: internal::no_copy { -private: - //! Value used to mark "not in pipeline" - static filter* not_in_pipeline() {return reinterpret_cast(intptr_t(-1));} - - //! The lowest bit 0 is for parallel vs. serial - static const unsigned char filter_is_serial = 0x1; - - //! 4th bit distinguishes ordered vs unordered filters. - /** The bit was not set for parallel filters in TBB 2.1 and earlier, - but is_ordered() function always treats parallel filters as out of order. */ - static const unsigned char filter_is_out_of_order = 0x1<<4; - - //! 5th bit distinguishes thread-bound and regular filters. - static const unsigned char filter_is_bound = 0x1<<5; - - //! 7th bit defines exception propagation mode expected by the application. - static const unsigned char exact_exception_propagation = -#if TBB_USE_CAPTURED_EXCEPTION - 0x0; -#else - 0x1<<7; -#endif /* TBB_USE_CAPTURED_EXCEPTION */ - - static const unsigned char current_version = __TBB_PIPELINE_VERSION(5); - static const unsigned char version_mask = 0x7<<1; // bits 1-3 are for version -public: - enum mode { - //! processes multiple items in parallel and in no particular order - parallel = current_version | filter_is_out_of_order, - //! processes items one at a time; all such filters process items in the same order - serial_in_order = current_version | filter_is_serial, - //! processes items one at a time and in no particular order - serial_out_of_order = current_version | filter_is_serial | filter_is_out_of_order, - //! @deprecated use serial_in_order instead - serial = serial_in_order - }; -protected: - filter( bool is_serial_ ) : - next_filter_in_pipeline(not_in_pipeline()), - my_input_buffer(NULL), - my_filter_mode(static_cast((is_serial_ ? serial : parallel) | exact_exception_propagation)), - prev_filter_in_pipeline(not_in_pipeline()), - my_pipeline(NULL), - next_segment(NULL) - {} - - filter( mode filter_mode ) : - next_filter_in_pipeline(not_in_pipeline()), - my_input_buffer(NULL), - my_filter_mode(static_cast(filter_mode | exact_exception_propagation)), - prev_filter_in_pipeline(not_in_pipeline()), - my_pipeline(NULL), - next_segment(NULL) - {} - -public: - //! True if filter is serial. - bool is_serial() const { - return bool( my_filter_mode & filter_is_serial ); - } - - //! True if filter must receive stream in order. - bool is_ordered() const { - return (my_filter_mode & (filter_is_out_of_order|filter_is_serial))==filter_is_serial; - } - - //! True if filter is thread-bound. - bool is_bound() const { - return ( my_filter_mode & filter_is_bound )==filter_is_bound; - } - - //! Operate on an item from the input stream, and return item for output stream. - /** Returns NULL if filter is a sink. */ - virtual void* operator()( void* item ) = 0; - - //! Destroy filter. - /** If the filter was added to a pipeline, the pipeline must be destroyed first. */ - virtual __TBB_EXPORTED_METHOD ~filter(); - -#if __TBB_TASK_GROUP_CONTEXT - //! Destroys item if pipeline was cancelled. - /** Required to prevent memory leaks. - Note it can be called concurrently even for serial filters.*/ - virtual void finalize( void* /*item*/ ) {}; -#endif - -private: - //! Pointer to next filter in the pipeline. - filter* next_filter_in_pipeline; - - //! has the filter not yet processed all the tokens it will ever see? - // (pipeline has not yet reached end_of_input or this filter has not yet - // seen the last token produced by input_filter) - bool has_more_work(); - - //! Buffer for incoming tokens, or NULL if not required. - /** The buffer is required if the filter is serial or follows a thread-bound one. */ - internal::input_buffer* my_input_buffer; - - friend class internal::stage_task; - friend class internal::pipeline_root_task; - friend class pipeline; - friend class thread_bound_filter; - - //! Storage for filter mode and dynamically checked implementation version. - const unsigned char my_filter_mode; - - //! Pointer to previous filter in the pipeline. - filter* prev_filter_in_pipeline; - - //! Pointer to the pipeline. - pipeline* my_pipeline; - - //! Pointer to the next "segment" of filters, or NULL if not required. - /** In each segment, the first filter is not thread-bound but follows a thread-bound one. */ - filter* next_segment; -}; - -//! A stage in a pipeline served by a user thread. -/** @ingroup algorithms */ -class thread_bound_filter: public filter { -public: - enum result_type { - // item was processed - success, - // item is currently not available - item_not_available, - // there are no more items to process - end_of_stream - }; -protected: - thread_bound_filter(mode filter_mode): - filter(static_cast(filter_mode | filter::filter_is_bound | filter::exact_exception_propagation)) - {} -public: - //! If a data item is available, invoke operator() on that item. - /** This interface is non-blocking. - Returns 'success' if an item was processed. - Returns 'item_not_available' if no item can be processed now - but more may arrive in the future, or if token limit is reached. - Returns 'end_of_stream' if there are no more items to process. */ - result_type __TBB_EXPORTED_METHOD try_process_item(); - - //! Wait until a data item becomes available, and invoke operator() on that item. - /** This interface is blocking. - Returns 'success' if an item was processed. - Returns 'end_of_stream' if there are no more items to process. - Never returns 'item_not_available', as it blocks until another return condition applies. */ - result_type __TBB_EXPORTED_METHOD process_item(); - -private: - //! Internal routine for item processing - result_type internal_process_item(bool is_blocking); -}; - -//! A processing pipeline that applies filters to items. -/** @ingroup algorithms */ -class pipeline { -public: - //! Construct empty pipeline. - __TBB_EXPORTED_METHOD pipeline(); - - /** Though the current implementation declares the destructor virtual, do not rely on this - detail. The virtualness is deprecated and may disappear in future versions of TBB. */ - virtual __TBB_EXPORTED_METHOD ~pipeline(); - - //! Add filter to end of pipeline. - void __TBB_EXPORTED_METHOD add_filter( filter& filter_ ); - - //! Run the pipeline to completion. - void __TBB_EXPORTED_METHOD run( size_t max_number_of_live_tokens ); - -#if __TBB_TASK_GROUP_CONTEXT - //! Run the pipeline to completion with user-supplied context. - void __TBB_EXPORTED_METHOD run( size_t max_number_of_live_tokens, tbb::task_group_context& context ); -#endif - - //! Remove all filters from the pipeline. - void __TBB_EXPORTED_METHOD clear(); - -private: - friend class internal::stage_task; - friend class internal::pipeline_root_task; - friend class filter; - friend class thread_bound_filter; - friend class internal::pipeline_cleaner; - friend class tbb::interface5::internal::pipeline_proxy; - - //! Pointer to first filter in the pipeline. - filter* filter_list; - - //! Pointer to location where address of next filter to be added should be stored. - filter* filter_end; - - //! task who's reference count is used to determine when all stages are done. - task* end_counter; - - //! Number of idle tokens waiting for input stage. - atomic input_tokens; - - //! Global counter of tokens - atomic token_counter; - - //! False until fetch_input returns NULL. - bool end_of_input; - - //! True if the pipeline contains a thread-bound filter; false otherwise. - bool has_thread_bound_filters; - - //! Remove filter from pipeline. - void remove_filter( filter& filter_ ); - - //! Not used, but retained to satisfy old export files. - void __TBB_EXPORTED_METHOD inject_token( task& self ); - -#if __TBB_TASK_GROUP_CONTEXT - //! Does clean up if pipeline is cancelled or exception occured - void clear_filters(); -#endif -}; - -//------------------------------------------------------------------------ -// Support for lambda-friendly parallel_pipeline interface -//------------------------------------------------------------------------ - -namespace interface5 { - -namespace internal { - template class concrete_filter; -} - -//! input_filter control to signal end-of-input for parallel_pipeline -class flow_control { - bool is_pipeline_stopped; - flow_control() { is_pipeline_stopped = false; } - template friend class internal::concrete_filter; -public: - void stop() { is_pipeline_stopped = true; } -}; - -//! @cond INTERNAL -namespace internal { - -template -class concrete_filter: public tbb::filter { - const Body& my_body; - - typedef typename tbb::tbb_allocator u_allocator; - typedef typename tbb::tbb_allocator t_allocator; - - /*override*/ void* operator()(void* input) { - T* temp_input = (T*)input; - // Call user's operator()() here - U* output_u = u_allocator().allocate(1); - void* output = (void*) new (output_u) U(my_body(*temp_input)); - t_allocator().destroy(temp_input); - t_allocator().deallocate(temp_input,1); - return output; - } - -public: - concrete_filter(tbb::filter::mode filter_mode, const Body& body) : filter(filter_mode), my_body(body) {} -}; - -template -class concrete_filter: public filter { - const Body& my_body; - - typedef typename tbb::tbb_allocator u_allocator; - - /*override*/void* operator()(void*) { - flow_control control; - U* output_u = u_allocator().allocate(1); - (void) new (output_u) U(my_body(control)); - if(control.is_pipeline_stopped) { - u_allocator().destroy(output_u); - u_allocator().deallocate(output_u,1); - output_u = NULL; - } - return (void*)output_u; - } -public: - concrete_filter(tbb::filter::mode filter_mode, const Body& body) : filter(filter_mode), my_body(body) {} -}; - -template -class concrete_filter: public filter { - const Body& my_body; - - typedef typename tbb::tbb_allocator t_allocator; - - /*override*/ void* operator()(void* input) { - T* temp_input = (T*)input; - my_body(*temp_input); - t_allocator().destroy(temp_input); - t_allocator().deallocate(temp_input,1); - return NULL; - } -public: - concrete_filter(tbb::filter::mode filter_mode, const Body& body) : filter(filter_mode), my_body(body) {} -}; - -template -class concrete_filter: public filter { - const Body& my_body; - - /** Override privately because it is always called virtually */ - /*override*/ void* operator()(void*) { - flow_control control; - my_body(control); - void* output = control.is_pipeline_stopped ? NULL : (void*)(intptr_t)-1; - return output; - } -public: - concrete_filter(filter::mode filter_mode, const Body& body) : filter(filter_mode), my_body(body) {} -}; - -//! The class that represents an object of the pipeline for parallel_pipeline(). -/** It primarily serves as RAII class that deletes heap-allocated filter instances. */ -class pipeline_proxy { - tbb::pipeline my_pipe; -public: - pipeline_proxy( const filter_t& filter_chain ); - ~pipeline_proxy() { - while( filter* f = my_pipe.filter_list ) - delete f; // filter destructor removes it from the pipeline - } - tbb::pipeline* operator->() { return &my_pipe; } -}; - -//! Abstract base class that represents a node in a parse tree underlying a filter_t. -/** These nodes are always heap-allocated and can be shared by filter_t objects. */ -class filter_node: tbb::internal::no_copy { - /** Count must be atomic because it is hidden state for user, but might be shared by threads. */ - tbb::atomic ref_count; -protected: - filter_node() { - ref_count = 0; -#ifdef __TBB_TEST_FILTER_NODE_COUNT - ++(__TBB_TEST_FILTER_NODE_COUNT); -#endif - } -public: - //! Add concrete_filter to pipeline - virtual void add_to( pipeline& ) = 0; - //! Increment reference count - void add_ref() {++ref_count;} - //! Decrement reference count and delete if it becomes zero. - void remove_ref() { - __TBB_ASSERT(ref_count>0,"ref_count underflow"); - if( --ref_count==0 ) - delete this; - } - virtual ~filter_node() { -#ifdef __TBB_TEST_FILTER_NODE_COUNT - --(__TBB_TEST_FILTER_NODE_COUNT); -#endif - } -}; - -//! Node in parse tree representing result of make_filter. -template -class filter_node_leaf: public filter_node { - const tbb::filter::mode mode; - const Body body; - /*override*/void add_to( pipeline& p ) { - concrete_filter* f = new concrete_filter(mode,body); - p.add_filter( *f ); - } -public: - filter_node_leaf( tbb::filter::mode m, const Body& b ) : mode(m), body(b) {} -}; - -//! Node in parse tree representing join of two filters. -class filter_node_join: public filter_node { - friend class filter_node; // to suppress GCC 3.2 warnings - filter_node& left; - filter_node& right; - /*override*/~filter_node_join() { - left.remove_ref(); - right.remove_ref(); - } - /*override*/void add_to( pipeline& p ) { - left.add_to(p); - right.add_to(p); - } -public: - filter_node_join( filter_node& x, filter_node& y ) : left(x), right(y) { - left.add_ref(); - right.add_ref(); - } -}; - -} // namespace internal -//! @endcond - -//! Create a filter to participate in parallel_pipeline -template -filter_t make_filter(tbb::filter::mode mode, const Body& body) { - return new internal::filter_node_leaf(mode, body); -} - -template -filter_t operator& (const filter_t& left, const filter_t& right) { - __TBB_ASSERT(left.root,"cannot use default-constructed filter_t as left argument of '&'"); - __TBB_ASSERT(right.root,"cannot use default-constructed filter_t as right argument of '&'"); - return new internal::filter_node_join(*left.root,*right.root); -} - -//! Class representing a chain of type-safe pipeline filters -template -class filter_t { - typedef internal::filter_node filter_node; - filter_node* root; - filter_t( filter_node* root_ ) : root(root_) { - root->add_ref(); - } - friend class internal::pipeline_proxy; - template - friend filter_t make_filter(tbb::filter::mode, const Body& ); - template - friend filter_t operator& (const filter_t& , const filter_t& ); -public: - filter_t() : root(NULL) {} - filter_t( const filter_t& rhs ) : root(rhs.root) { - if( root ) root->add_ref(); - } - template - filter_t( tbb::filter::mode mode, const Body& body ) : - root( new internal::filter_node_leaf(mode, body) ) { - root->add_ref(); - } - - void operator=( const filter_t& rhs ) { - // Order of operations below carefully chosen so that reference counts remain correct - // in unlikely event that remove_ref throws exception. - filter_node* old = root; - root = rhs.root; - if( root ) root->add_ref(); - if( old ) old->remove_ref(); - } - ~filter_t() { - if( root ) root->remove_ref(); - } - void clear() { - // Like operator= with filter_t() on right side. - if( root ) { - filter_node* old = root; - root = NULL; - old->remove_ref(); - } - } -}; - -inline internal::pipeline_proxy::pipeline_proxy( const filter_t& filter_chain ) : my_pipe() { - __TBB_ASSERT( filter_chain.root, "cannot apply parallel_pipeline to default-constructed filter_t" ); - filter_chain.root->add_to(my_pipe); -} - -inline void parallel_pipeline(size_t max_number_of_live_tokens, const filter_t& filter_chain -#if __TBB_TASK_GROUP_CONTEXT - , tbb::task_group_context& context -#endif - ) { - internal::pipeline_proxy pipe(filter_chain); - // tbb::pipeline::run() is called via the proxy - pipe->run(max_number_of_live_tokens -#if __TBB_TASK_GROUP_CONTEXT - , context -#endif - ); -} - -#if __TBB_TASK_GROUP_CONTEXT -inline void parallel_pipeline(size_t max_number_of_live_tokens, const filter_t& filter_chain) { - tbb::task_group_context context; - parallel_pipeline(max_number_of_live_tokens, filter_chain, context); -} -#endif // __TBB_TASK_GROUP_CONTEXT - -} // interface5 - -using interface5::flow_control; -using interface5::filter_t; -using interface5::make_filter; -using interface5::parallel_pipeline; - -} // tbb - -#endif /* __TBB_pipeline_H */ diff --git a/deal.II/bundled/tbb30_104oss/include/tbb/queuing_mutex.h b/deal.II/bundled/tbb30_104oss/include/tbb/queuing_mutex.h deleted file mode 100644 index ec9832c784..0000000000 --- a/deal.II/bundled/tbb30_104oss/include/tbb/queuing_mutex.h +++ /dev/null @@ -1,131 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_queuing_mutex_H -#define __TBB_queuing_mutex_H - -#include "tbb_config.h" - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (push) - #pragma warning (disable: 4530) -#endif - -#include - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - #pragma warning (pop) -#endif - -#include "atomic.h" -#include "tbb_profiling.h" - -namespace tbb { - -//! Queuing lock with local-only spinning. -/** @ingroup synchronization */ -class queuing_mutex { -public: - //! Construct unacquired mutex. - queuing_mutex() { - q_tail = NULL; -#if TBB_USE_THREADING_TOOLS - internal_construct(); -#endif - } - - //! The scoped locking pattern - /** It helps to avoid the common problem of forgetting to release lock. - It also nicely provides the "node" for queuing locks. */ - class scoped_lock: internal::no_copy { - //! Initialize fields to mean "no lock held". - void initialize() { - mutex = NULL; -#if TBB_USE_ASSERT - internal::poison_pointer(next); -#endif /* TBB_USE_ASSERT */ - } - public: - //! Construct lock that has not acquired a mutex. - /** Equivalent to zero-initialization of *this. */ - scoped_lock() {initialize();} - - //! Acquire lock on given mutex. - scoped_lock( queuing_mutex& m ) { - initialize(); - acquire(m); - } - - //! Release lock (if lock is held). - ~scoped_lock() { - if( mutex ) release(); - } - - //! Acquire lock on given mutex. - void __TBB_EXPORTED_METHOD acquire( queuing_mutex& m ); - - //! Acquire lock on given mutex if free (i.e. non-blocking) - bool __TBB_EXPORTED_METHOD try_acquire( queuing_mutex& m ); - - //! Release lock. - void __TBB_EXPORTED_METHOD release(); - - private: - //! The pointer to the mutex owned, or NULL if not holding a mutex. - queuing_mutex* mutex; - - //! The pointer to the next competitor for a mutex - scoped_lock *next; - - //! The local spin-wait variable - /** Inverted (0 - blocked, 1 - acquired the mutex) for the sake of - zero-initialization. Defining it as an entire word instead of - a byte seems to help performance slightly. */ - uintptr_t going; - }; - - void __TBB_EXPORTED_METHOD internal_construct(); - - // Mutex traits - static const bool is_rw_mutex = false; - static const bool is_recursive_mutex = false; - static const bool is_fair_mutex = true; - - friend class scoped_lock; -private: - //! The last competitor requesting the lock - atomic q_tail; - -}; - -__TBB_DEFINE_PROFILING_SET_NAME(queuing_mutex) - -} // namespace tbb - -#endif /* __TBB_queuing_mutex_H */ diff --git a/deal.II/bundled/tbb30_104oss/include/tbb/queuing_rw_mutex.h b/deal.II/bundled/tbb30_104oss/include/tbb/queuing_rw_mutex.h deleted file mode 100644 index 5e35478077..0000000000 --- a/deal.II/bundled/tbb30_104oss/include/tbb/queuing_rw_mutex.h +++ /dev/null @@ -1,173 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_queuing_rw_mutex_H -#define __TBB_queuing_rw_mutex_H - -#include "tbb_config.h" - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (push) - #pragma warning (disable: 4530) -#endif - -#include - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - #pragma warning (pop) -#endif - -#include "atomic.h" -#include "tbb_profiling.h" - -namespace tbb { - -//! Reader-writer lock with local-only spinning. -/** Adapted from Krieger, Stumm, et al. pseudocode at - http://www.eecg.toronto.edu/parallel/pubs_abs.html#Krieger_etal_ICPP93 - @ingroup synchronization */ -class queuing_rw_mutex { -public: - //! Construct unacquired mutex. - queuing_rw_mutex() { - q_tail = NULL; -#if TBB_USE_THREADING_TOOLS - internal_construct(); -#endif - } - - //! Destructor asserts if the mutex is acquired, i.e. q_tail is non-NULL - ~queuing_rw_mutex() { -#if TBB_USE_ASSERT - __TBB_ASSERT( !q_tail, "destruction of an acquired mutex"); -#endif - } - - class scoped_lock; - friend class scoped_lock; - - //! The scoped locking pattern - /** It helps to avoid the common problem of forgetting to release lock. - It also nicely provides the "node" for queuing locks. */ - class scoped_lock: internal::no_copy { - //! Initialize fields - void initialize() { - mutex = NULL; -#if TBB_USE_ASSERT - state = 0xFF; // Set to invalid state - internal::poison_pointer(next); - internal::poison_pointer(prev); -#endif /* TBB_USE_ASSERT */ - } - public: - //! Construct lock that has not acquired a mutex. - /** Equivalent to zero-initialization of *this. */ - scoped_lock() {initialize();} - - //! Acquire lock on given mutex. - scoped_lock( queuing_rw_mutex& m, bool write=true ) { - initialize(); - acquire(m,write); - } - - //! Release lock (if lock is held). - ~scoped_lock() { - if( mutex ) release(); - } - - //! Acquire lock on given mutex. - void acquire( queuing_rw_mutex& m, bool write=true ); - - //! Try acquire lock on given mutex. - bool try_acquire( queuing_rw_mutex& m, bool write=true ); - - //! Release lock. - void release(); - - //! Upgrade reader to become a writer. - /** Returns true if the upgrade happened without re-acquiring the lock and false if opposite */ - bool upgrade_to_writer(); - - //! Downgrade writer to become a reader. - bool downgrade_to_reader(); - - private: - //! The pointer to the current mutex to work - queuing_rw_mutex* mutex; - - //! The pointer to the previous and next competitors for a mutex - scoped_lock * prev, * next; - - typedef unsigned char state_t; - - //! State of the request: reader, writer, active reader, other service states - atomic state; - - //! The local spin-wait variable - /** Corresponds to "spin" in the pseudocode but inverted for the sake of zero-initialization */ - unsigned char going; - - //! A tiny internal lock - unsigned char internal_lock; - - //! Acquire the internal lock - void acquire_internal_lock(); - - //! Try to acquire the internal lock - /** Returns true if lock was successfully acquired. */ - bool try_acquire_internal_lock(); - - //! Release the internal lock - void release_internal_lock(); - - //! Wait for internal lock to be released - void wait_for_release_of_internal_lock(); - - //! A helper function - void unblock_or_wait_on_internal_lock( uintptr_t ); - }; - - void __TBB_EXPORTED_METHOD internal_construct(); - - // Mutex traits - static const bool is_rw_mutex = true; - static const bool is_recursive_mutex = false; - static const bool is_fair_mutex = true; - -private: - //! The last competitor requesting the lock - atomic q_tail; - -}; - -__TBB_DEFINE_PROFILING_SET_NAME(queuing_rw_mutex) - -} // namespace tbb - -#endif /* __TBB_queuing_rw_mutex_H */ diff --git a/deal.II/bundled/tbb30_104oss/include/tbb/reader_writer_lock.h b/deal.II/bundled/tbb30_104oss/include/tbb/reader_writer_lock.h deleted file mode 100644 index 3a639693b1..0000000000 --- a/deal.II/bundled/tbb30_104oss/include/tbb/reader_writer_lock.h +++ /dev/null @@ -1,240 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_reader_writer_lock_H -#define __TBB_reader_writer_lock_H - -#include "tbb_thread.h" -#include "tbb_allocator.h" -#include "atomic.h" - -namespace tbb { -namespace interface5 { -//! Writer-preference reader-writer lock with local-only spinning on readers. -/** Loosely adapted from Mellor-Crummey and Scott pseudocode at - http://www.cs.rochester.edu/research/synchronization/pseudocode/rw.html#s_wp - @ingroup synchronization */ - class reader_writer_lock : tbb::internal::no_copy { - public: - friend class scoped_lock; - friend class scoped_lock_read; - //! Status type for nodes associated with lock instances - /** waiting_nonblocking: the wait state for nonblocking lock - instances; for writes, these transition straight to active - states; for reads, these are unused. - - waiting: the start and spin state for all lock instances; these will - transition to active state when appropriate. Non-blocking write locks - transition from this state to waiting_nonblocking immediately. - - active: the active state means that the lock instance holds - the lock; it will transition to invalid state during node deletion - - invalid: the end state for all nodes; this is set in the - destructor so if we encounter this state, we are looking at - memory that has already been freed - - The state diagrams below describe the status transitions. - Single arrows indicate that the thread that owns the node is - responsible for the transition; double arrows indicate that - any thread could make the transition. - - State diagram for scoped_lock status: - - waiting ----------> waiting_nonblocking - | _____________/ | - V V V - active -----------------> invalid - - State diagram for scoped_lock_read status: - - waiting - | - V - active ----------------->invalid - - */ - enum status_t { waiting_nonblocking, waiting, active, invalid }; - - //! Constructs a new reader_writer_lock - reader_writer_lock() { - internal_construct(); - } - - //! Destructs a reader_writer_lock object - ~reader_writer_lock() { - internal_destroy(); - } - - //! The scoped lock pattern for write locks - /** Scoped locks help avoid the common problem of forgetting to release the lock. - This type is also serves as the node for queuing locks. */ - class scoped_lock : tbb::internal::no_copy { - public: - friend class reader_writer_lock; - - //! Construct with blocking attempt to acquire write lock on the passed-in lock - scoped_lock(reader_writer_lock& lock) { - internal_construct(lock); - } - - //! Destructor, releases the write lock - ~scoped_lock() { - internal_destroy(); - } - - void* operator new(size_t s) { - return tbb::internal::allocate_via_handler_v3(s); - } - void operator delete(void* p) { - tbb::internal::deallocate_via_handler_v3(p); - } - - private: - //! The pointer to the mutex to lock - reader_writer_lock *mutex; - //! The next queued competitor for the mutex - scoped_lock* next; - //! Status flag of the thread associated with this node - atomic status; - - //! Construct scoped_lock that is not holding lock - scoped_lock(); - - void __TBB_EXPORTED_METHOD internal_construct(reader_writer_lock&); - void __TBB_EXPORTED_METHOD internal_destroy(); - }; - - //! The scoped lock pattern for read locks - class scoped_lock_read : tbb::internal::no_copy { - public: - friend class reader_writer_lock; - - //! Construct with blocking attempt to acquire read lock on the passed-in lock - scoped_lock_read(reader_writer_lock& lock) { - internal_construct(lock); - } - - //! Destructor, releases the read lock - ~scoped_lock_read() { - internal_destroy(); - } - - void* operator new(size_t s) { - return tbb::internal::allocate_via_handler_v3(s); - } - void operator delete(void* p) { - tbb::internal::deallocate_via_handler_v3(p); - } - - private: - //! The pointer to the mutex to lock - reader_writer_lock *mutex; - //! The next queued competitor for the mutex - scoped_lock_read *next; - //! Status flag of the thread associated with this node - atomic status; - - //! Construct scoped_lock_read that is not holding lock - scoped_lock_read(); - - void __TBB_EXPORTED_METHOD internal_construct(reader_writer_lock&); - void __TBB_EXPORTED_METHOD internal_destroy(); - }; - - //! Acquires the reader_writer_lock for write. - /** If the lock is currently held in write mode by another - context, the writer will block by spinning on a local - variable. Exceptions thrown: improper_lock The context tries - to acquire a reader_writer_lock that it already has write - ownership of.*/ - void __TBB_EXPORTED_METHOD lock(); - - //! Tries to acquire the reader_writer_lock for write. - /** This function does not block. Return Value: True or false, - depending on whether the lock is acquired or not. If the lock - is already held by this acquiring context, try_lock() returns - false. */ - bool __TBB_EXPORTED_METHOD try_lock(); - - //! Acquires the reader_writer_lock for read. - /** If the lock is currently held by a writer, this reader will - block and wait until the writers are done. Exceptions thrown: - improper_lock The context tries to acquire a - reader_writer_lock that it already has write ownership of. */ - void __TBB_EXPORTED_METHOD lock_read(); - - //! Tries to acquire the reader_writer_lock for read. - /** This function does not block. Return Value: True or false, - depending on whether the lock is acquired or not. */ - bool __TBB_EXPORTED_METHOD try_lock_read(); - - //! Releases the reader_writer_lock - void __TBB_EXPORTED_METHOD unlock(); - - private: - void __TBB_EXPORTED_METHOD internal_construct(); - void __TBB_EXPORTED_METHOD internal_destroy(); - - //! Attempts to acquire write lock - /** If unavailable, spins in blocking case, returns false in non-blocking case. */ - bool start_write(scoped_lock *); - //! Sets writer_head to w and attempts to unblock - void set_next_writer(scoped_lock *w); - //! Relinquishes write lock to next waiting writer or group of readers - void end_write(scoped_lock *); - //! Checks if current thread holds write lock - bool is_current_writer(); - - //! Attempts to acquire read lock - /** If unavailable, spins in blocking case, returns false in non-blocking case. */ - void start_read(scoped_lock_read *); - //! Unblocks pending readers - void unblock_readers(); - //! Relinquishes read lock by decrementing counter; last reader wakes pending writer - void end_read(); - - //! The list of pending readers - atomic reader_head; - //! The list of pending writers - atomic writer_head; - //! The last node in the list of pending writers - atomic writer_tail; - //! Writer that owns the mutex; tbb_thread::id() otherwise. - tbb_thread::id my_current_writer; - //! Status of mutex - atomic rdr_count_and_flags; -}; - -} // namespace interface5 - -using interface5::reader_writer_lock; - -} // namespace tbb - -#endif /* __TBB_reader_writer_lock_H */ diff --git a/deal.II/bundled/tbb30_104oss/include/tbb/recursive_mutex.h b/deal.II/bundled/tbb30_104oss/include/tbb/recursive_mutex.h deleted file mode 100644 index f5ae5ed621..0000000000 --- a/deal.II/bundled/tbb30_104oss/include/tbb/recursive_mutex.h +++ /dev/null @@ -1,240 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_recursive_mutex_H -#define __TBB_recursive_mutex_H - -#if _WIN32||_WIN64 -#include "machine/windows_api.h" -#else -#include -#endif /* _WIN32||_WIN64 */ - -#include -#include "aligned_space.h" -#include "tbb_stddef.h" -#include "tbb_profiling.h" - -namespace tbb { -//! Mutex that allows recursive mutex acquisition. -/** Mutex that allows recursive mutex acquisition. - @ingroup synchronization */ -class recursive_mutex { -public: - //! Construct unacquired recursive_mutex. - recursive_mutex() { -#if TBB_USE_ASSERT || TBB_USE_THREADING_TOOLS - internal_construct(); -#else - #if _WIN32||_WIN64 - InitializeCriticalSection(&impl); - #else - pthread_mutexattr_t mtx_attr; - int error_code = pthread_mutexattr_init( &mtx_attr ); - if( error_code ) - tbb::internal::handle_perror(error_code,"recursive_mutex: pthread_mutexattr_init failed"); - - pthread_mutexattr_settype( &mtx_attr, PTHREAD_MUTEX_RECURSIVE ); - error_code = pthread_mutex_init( &impl, &mtx_attr ); - if( error_code ) - tbb::internal::handle_perror(error_code,"recursive_mutex: pthread_mutex_init failed"); - - pthread_mutexattr_destroy( &mtx_attr ); - #endif /* _WIN32||_WIN64*/ -#endif /* TBB_USE_ASSERT */ - }; - - ~recursive_mutex() { -#if TBB_USE_ASSERT - internal_destroy(); -#else - #if _WIN32||_WIN64 - DeleteCriticalSection(&impl); - #else - pthread_mutex_destroy(&impl); - - #endif /* _WIN32||_WIN64 */ -#endif /* TBB_USE_ASSERT */ - }; - - class scoped_lock; - friend class scoped_lock; - - //! The scoped locking pattern - /** It helps to avoid the common problem of forgetting to release lock. - It also nicely provides the "node" for queuing locks. */ - class scoped_lock: internal::no_copy { - public: - //! Construct lock that has not acquired a recursive_mutex. - scoped_lock() : my_mutex(NULL) {}; - - //! Acquire lock on given mutex. - scoped_lock( recursive_mutex& mutex ) { -#if TBB_USE_ASSERT - my_mutex = &mutex; -#endif /* TBB_USE_ASSERT */ - acquire( mutex ); - } - - //! Release lock (if lock is held). - ~scoped_lock() { - if( my_mutex ) - release(); - } - - //! Acquire lock on given mutex. - void acquire( recursive_mutex& mutex ) { -#if TBB_USE_ASSERT - internal_acquire( mutex ); -#else - my_mutex = &mutex; - mutex.lock(); -#endif /* TBB_USE_ASSERT */ - } - - //! Try acquire lock on given recursive_mutex. - bool try_acquire( recursive_mutex& mutex ) { -#if TBB_USE_ASSERT - return internal_try_acquire( mutex ); -#else - bool result = mutex.try_lock(); - if( result ) - my_mutex = &mutex; - return result; -#endif /* TBB_USE_ASSERT */ - } - - //! Release lock - void release() { -#if TBB_USE_ASSERT - internal_release(); -#else - my_mutex->unlock(); - my_mutex = NULL; -#endif /* TBB_USE_ASSERT */ - } - - private: - //! The pointer to the current recursive_mutex to work - recursive_mutex* my_mutex; - - //! All checks from acquire using mutex.state were moved here - void __TBB_EXPORTED_METHOD internal_acquire( recursive_mutex& m ); - - //! All checks from try_acquire using mutex.state were moved here - bool __TBB_EXPORTED_METHOD internal_try_acquire( recursive_mutex& m ); - - //! All checks from release using mutex.state were moved here - void __TBB_EXPORTED_METHOD internal_release(); - - friend class recursive_mutex; - }; - - // Mutex traits - static const bool is_rw_mutex = false; - static const bool is_recursive_mutex = true; - static const bool is_fair_mutex = false; - - // C++0x compatibility interface - - //! Acquire lock - void lock() { -#if TBB_USE_ASSERT - aligned_space tmp; - new(tmp.begin()) scoped_lock(*this); -#else - #if _WIN32||_WIN64 - EnterCriticalSection(&impl); - #else - pthread_mutex_lock(&impl); - #endif /* _WIN32||_WIN64 */ -#endif /* TBB_USE_ASSERT */ - } - - //! Try acquiring lock (non-blocking) - /** Return true if lock acquired; false otherwise. */ - bool try_lock() { -#if TBB_USE_ASSERT - aligned_space tmp; - return (new(tmp.begin()) scoped_lock)->internal_try_acquire(*this); -#else - #if _WIN32||_WIN64 - return TryEnterCriticalSection(&impl)!=0; - #else - return pthread_mutex_trylock(&impl)==0; - #endif /* _WIN32||_WIN64 */ -#endif /* TBB_USE_ASSERT */ - } - - //! Release lock - void unlock() { -#if TBB_USE_ASSERT - aligned_space tmp; - scoped_lock& s = *tmp.begin(); - s.my_mutex = this; - s.internal_release(); -#else - #if _WIN32||_WIN64 - LeaveCriticalSection(&impl); - #else - pthread_mutex_unlock(&impl); - #endif /* _WIN32||_WIN64 */ -#endif /* TBB_USE_ASSERT */ - } - - //! Return native_handle - #if _WIN32||_WIN64 - typedef LPCRITICAL_SECTION native_handle_type; - #else - typedef pthread_mutex_t* native_handle_type; - #endif - native_handle_type native_handle() { return (native_handle_type) &impl; } - -private: -#if _WIN32||_WIN64 - CRITICAL_SECTION impl; - enum state_t { - INITIALIZED=0x1234, - DESTROYED=0x789A, - } state; -#else - pthread_mutex_t impl; -#endif /* _WIN32||_WIN64 */ - - //! All checks from mutex constructor using mutex.state were moved here - void __TBB_EXPORTED_METHOD internal_construct(); - - //! All checks from mutex destructor using mutex.state were moved here - void __TBB_EXPORTED_METHOD internal_destroy(); -}; - -__TBB_DEFINE_PROFILING_SET_NAME(recursive_mutex) - -} // namespace tbb - -#endif /* __TBB_recursive_mutex_H */ diff --git a/deal.II/bundled/tbb30_104oss/include/tbb/scalable_allocator.h b/deal.II/bundled/tbb30_104oss/include/tbb/scalable_allocator.h deleted file mode 100644 index 2293803a72..0000000000 --- a/deal.II/bundled/tbb30_104oss/include/tbb/scalable_allocator.h +++ /dev/null @@ -1,205 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_scalable_allocator_H -#define __TBB_scalable_allocator_H -/** @file */ - -#include /* Need ptrdiff_t and size_t from here. */ - -#if !defined(__cplusplus) && __ICC==1100 - #pragma warning (push) - #pragma warning (disable: 991) -#endif - -#ifdef __cplusplus -extern "C" { -#endif /* __cplusplus */ - -#if _MSC_VER >= 1400 -#define __TBB_EXPORTED_FUNC __cdecl -#else -#define __TBB_EXPORTED_FUNC -#endif - -/** The "malloc" analogue to allocate block of memory of size bytes. - * @ingroup memory_allocation */ -void * __TBB_EXPORTED_FUNC scalable_malloc (size_t size); - -/** The "free" analogue to discard a previously allocated piece of memory. - @ingroup memory_allocation */ -void __TBB_EXPORTED_FUNC scalable_free (void* ptr); - -/** The "realloc" analogue complementing scalable_malloc. - @ingroup memory_allocation */ -void * __TBB_EXPORTED_FUNC scalable_realloc (void* ptr, size_t size); - -/** The "calloc" analogue complementing scalable_malloc. - @ingroup memory_allocation */ -void * __TBB_EXPORTED_FUNC scalable_calloc (size_t nobj, size_t size); - -/** The "posix_memalign" analogue. - @ingroup memory_allocation */ -int __TBB_EXPORTED_FUNC scalable_posix_memalign (void** memptr, size_t alignment, size_t size); - -/** The "_aligned_malloc" analogue. - @ingroup memory_allocation */ -void * __TBB_EXPORTED_FUNC scalable_aligned_malloc (size_t size, size_t alignment); - -/** The "_aligned_realloc" analogue. - @ingroup memory_allocation */ -void * __TBB_EXPORTED_FUNC scalable_aligned_realloc (void* ptr, size_t size, size_t alignment); - -/** The "_aligned_free" analogue. - @ingroup memory_allocation */ -void __TBB_EXPORTED_FUNC scalable_aligned_free (void* ptr); - -/** The analogue of _msize/malloc_size/malloc_usable_size. - Returns the usable size of a memory block previously allocated by scalable_*, - or 0 (zero) if ptr does not point to such a block. - @ingroup memory_allocation */ -size_t __TBB_EXPORTED_FUNC scalable_msize (void* ptr); - -#ifdef __cplusplus -} /* extern "C" */ -#endif /* __cplusplus */ - -#ifdef __cplusplus - -#include /* To use new with the placement argument */ - -/* Ensure that including this header does not cause implicit linkage with TBB */ -#ifndef __TBB_NO_IMPLICIT_LINKAGE - #define __TBB_NO_IMPLICIT_LINKAGE 1 - #include "tbb_stddef.h" - #undef __TBB_NO_IMPLICIT_LINKAGE -#else - #include "tbb_stddef.h" -#endif - - -namespace tbb { - -#if _MSC_VER && !defined(__INTEL_COMPILER) - // Workaround for erroneous "unreferenced parameter" warning in method destroy. - #pragma warning (push) - #pragma warning (disable: 4100) -#endif - -//! Meets "allocator" requirements of ISO C++ Standard, Section 20.1.5 -/** The members are ordered the same way they are in section 20.4.1 - of the ISO C++ standard. - @ingroup memory_allocation */ -template -class scalable_allocator { -public: - typedef typename internal::allocator_type::value_type value_type; - typedef value_type* pointer; - typedef const value_type* const_pointer; - typedef value_type& reference; - typedef const value_type& const_reference; - typedef size_t size_type; - typedef ptrdiff_t difference_type; - template struct rebind { - typedef scalable_allocator other; - }; - - scalable_allocator() throw() {} - scalable_allocator( const scalable_allocator& ) throw() {} - template scalable_allocator(const scalable_allocator&) throw() {} - - pointer address(reference x) const {return &x;} - const_pointer address(const_reference x) const {return &x;} - - //! Allocate space for n objects. - pointer allocate( size_type n, const void* /*hint*/ =0 ) { - return static_cast( scalable_malloc( n * sizeof(value_type) ) ); - } - - //! Free previously allocated block of memory - void deallocate( pointer p, size_type ) { - scalable_free( p ); - } - - //! Largest value for which method allocate might succeed. - size_type max_size() const throw() { - size_type absolutemax = static_cast(-1) / sizeof (value_type); - return (absolutemax > 0 ? absolutemax : 1); - } - void construct( pointer p, const value_type& value ) {::new((void*)(p)) value_type(value);} - void destroy( pointer p ) {p->~value_type();} -}; - -#if _MSC_VER && !defined(__INTEL_COMPILER) - #pragma warning (pop) -#endif // warning 4100 is back - -//! Analogous to std::allocator, as defined in ISO C++ Standard, Section 20.4.1 -/** @ingroup memory_allocation */ -template<> -class scalable_allocator { -public: - typedef void* pointer; - typedef const void* const_pointer; - typedef void value_type; - template struct rebind { - typedef scalable_allocator other; - }; -}; - -template -inline bool operator==( const scalable_allocator&, const scalable_allocator& ) {return true;} - -template -inline bool operator!=( const scalable_allocator&, const scalable_allocator& ) {return false;} - -} // namespace tbb - -#if _MSC_VER - #if __TBB_BUILD && !defined(__TBBMALLOC_NO_IMPLICIT_LINKAGE) - #define __TBBMALLOC_NO_IMPLICIT_LINKAGE 1 - #endif - - #if !__TBBMALLOC_NO_IMPLICIT_LINKAGE - #ifdef _DEBUG - #pragma comment(lib, "tbbmalloc_debug.lib") - #else - #pragma comment(lib, "tbbmalloc.lib") - #endif - #endif - - -#endif - -#endif /* __cplusplus */ - -#if !defined(__cplusplus) && __ICC==1100 - #pragma warning (pop) -#endif // ICC 11.0 warning 991 is back - -#endif /* __TBB_scalable_allocator_H */ diff --git a/deal.II/bundled/tbb30_104oss/include/tbb/spin_mutex.h b/deal.II/bundled/tbb30_104oss/include/tbb/spin_mutex.h deleted file mode 100644 index 5d475138da..0000000000 --- a/deal.II/bundled/tbb30_104oss/include/tbb/spin_mutex.h +++ /dev/null @@ -1,192 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_spin_mutex_H -#define __TBB_spin_mutex_H - -#include -#include -#include "aligned_space.h" -#include "tbb_stddef.h" -#include "tbb_machine.h" -#include "tbb_profiling.h" - -namespace tbb { - -//! A lock that occupies a single byte. -/** A spin_mutex is a spin mutex that fits in a single byte. - It should be used only for locking short critical sections - (typically less than 20 instructions) when fairness is not an issue. - If zero-initialized, the mutex is considered unheld. - @ingroup synchronization */ -class spin_mutex { - //! 0 if lock is released, 1 if lock is acquired. - unsigned char flag; - -public: - //! Construct unacquired lock. - /** Equivalent to zero-initialization of *this. */ - spin_mutex() : flag(0) { -#if TBB_USE_THREADING_TOOLS - internal_construct(); -#endif - } - - //! Represents acquisition of a mutex. - class scoped_lock : internal::no_copy { - private: - //! Points to currently held mutex, or NULL if no lock is held. - spin_mutex* my_mutex; - - //! Value to store into spin_mutex::flag to unlock the mutex. - uintptr_t my_unlock_value; - - //! Like acquire, but with ITT instrumentation. - void __TBB_EXPORTED_METHOD internal_acquire( spin_mutex& m ); - - //! Like try_acquire, but with ITT instrumentation. - bool __TBB_EXPORTED_METHOD internal_try_acquire( spin_mutex& m ); - - //! Like release, but with ITT instrumentation. - void __TBB_EXPORTED_METHOD internal_release(); - - friend class spin_mutex; - - public: - //! Construct without acquiring a mutex. - scoped_lock() : my_mutex(NULL), my_unlock_value(0) {} - - //! Construct and acquire lock on a mutex. - scoped_lock( spin_mutex& m ) { -#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT - my_mutex=NULL; - internal_acquire(m); -#else - my_unlock_value = __TBB_LockByte(m.flag); - my_mutex=&m; -#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT*/ - } - - //! Acquire lock. - void acquire( spin_mutex& m ) { -#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT - internal_acquire(m); -#else - my_unlock_value = __TBB_LockByte(m.flag); - my_mutex = &m; -#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT*/ - } - - //! Try acquiring lock (non-blocking) - /** Return true if lock acquired; false otherwise. */ - bool try_acquire( spin_mutex& m ) { -#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT - return internal_try_acquire(m); -#else - bool result = __TBB_TryLockByte(m.flag); - if( result ) { - my_unlock_value = 0; - my_mutex = &m; - } - return result; -#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT*/ - } - - //! Release lock - void release() { -#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT - internal_release(); -#else - __TBB_store_with_release(my_mutex->flag, static_cast(my_unlock_value)); - my_mutex = NULL; -#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */ - } - - //! Destroy lock. If holding a lock, releases the lock first. - ~scoped_lock() { - if( my_mutex ) { -#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT - internal_release(); -#else - __TBB_store_with_release(my_mutex->flag, static_cast(my_unlock_value)); -#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */ - } - } - }; - - void __TBB_EXPORTED_METHOD internal_construct(); - - // Mutex traits - static const bool is_rw_mutex = false; - static const bool is_recursive_mutex = false; - static const bool is_fair_mutex = false; - - // ISO C++0x compatibility methods - - //! Acquire lock - void lock() { -#if TBB_USE_THREADING_TOOLS - aligned_space tmp; - new(tmp.begin()) scoped_lock(*this); -#else - __TBB_LockByte(flag); -#endif /* TBB_USE_THREADING_TOOLS*/ - } - - //! Try acquiring lock (non-blocking) - /** Return true if lock acquired; false otherwise. */ - bool try_lock() { -#if TBB_USE_THREADING_TOOLS - aligned_space tmp; - return (new(tmp.begin()) scoped_lock)->internal_try_acquire(*this); -#else - return __TBB_TryLockByte(flag); -#endif /* TBB_USE_THREADING_TOOLS*/ - } - - //! Release lock - void unlock() { -#if TBB_USE_THREADING_TOOLS - aligned_space tmp; - scoped_lock& s = *tmp.begin(); - s.my_mutex = this; - s.my_unlock_value = 0; - s.internal_release(); -#else - __TBB_store_with_release(flag, 0); -#endif /* TBB_USE_THREADING_TOOLS */ - } - - friend class scoped_lock; -}; - -__TBB_DEFINE_PROFILING_SET_NAME(spin_mutex) - -} // namespace tbb - -#endif /* __TBB_spin_mutex_H */ diff --git a/deal.II/bundled/tbb30_104oss/include/tbb/spin_rw_mutex.h b/deal.II/bundled/tbb30_104oss/include/tbb/spin_rw_mutex.h deleted file mode 100644 index 38b3a1fb18..0000000000 --- a/deal.II/bundled/tbb30_104oss/include/tbb/spin_rw_mutex.h +++ /dev/null @@ -1,228 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_spin_rw_mutex_H -#define __TBB_spin_rw_mutex_H - -#include "tbb_stddef.h" -#include "tbb_machine.h" -#include "tbb_profiling.h" - -namespace tbb { - -class spin_rw_mutex_v3; -typedef spin_rw_mutex_v3 spin_rw_mutex; - -//! Fast, unfair, spinning reader-writer lock with backoff and writer-preference -/** @ingroup synchronization */ -class spin_rw_mutex_v3 { - //! @cond INTERNAL - - //! Internal acquire write lock. - bool __TBB_EXPORTED_METHOD internal_acquire_writer(); - - //! Out of line code for releasing a write lock. - /** This code is has debug checking and instrumentation for Intel(R) Thread Checker and Intel(R) Thread Profiler. */ - void __TBB_EXPORTED_METHOD internal_release_writer(); - - //! Internal acquire read lock. - void __TBB_EXPORTED_METHOD internal_acquire_reader(); - - //! Internal upgrade reader to become a writer. - bool __TBB_EXPORTED_METHOD internal_upgrade(); - - //! Out of line code for downgrading a writer to a reader. - /** This code is has debug checking and instrumentation for Intel(R) Thread Checker and Intel(R) Thread Profiler. */ - void __TBB_EXPORTED_METHOD internal_downgrade(); - - //! Internal release read lock. - void __TBB_EXPORTED_METHOD internal_release_reader(); - - //! Internal try_acquire write lock. - bool __TBB_EXPORTED_METHOD internal_try_acquire_writer(); - - //! Internal try_acquire read lock. - bool __TBB_EXPORTED_METHOD internal_try_acquire_reader(); - - //! @endcond -public: - //! Construct unacquired mutex. - spin_rw_mutex_v3() : state(0) { -#if TBB_USE_THREADING_TOOLS - internal_construct(); -#endif - } - -#if TBB_USE_ASSERT - //! Destructor asserts if the mutex is acquired, i.e. state is zero. - ~spin_rw_mutex_v3() { - __TBB_ASSERT( !state, "destruction of an acquired mutex"); - }; -#endif /* TBB_USE_ASSERT */ - - //! The scoped locking pattern - /** It helps to avoid the common problem of forgetting to release lock. - It also nicely provides the "node" for queuing locks. */ - class scoped_lock : internal::no_copy { - public: - //! Construct lock that has not acquired a mutex. - /** Equivalent to zero-initialization of *this. */ - scoped_lock() : mutex(NULL), is_writer(false) {} - - //! Acquire lock on given mutex. - scoped_lock( spin_rw_mutex& m, bool write = true ) : mutex(NULL) { - acquire(m, write); - } - - //! Release lock (if lock is held). - ~scoped_lock() { - if( mutex ) release(); - } - - //! Acquire lock on given mutex. - void acquire( spin_rw_mutex& m, bool write = true ) { - __TBB_ASSERT( !mutex, "holding mutex already" ); - is_writer = write; - mutex = &m; - if( write ) mutex->internal_acquire_writer(); - else mutex->internal_acquire_reader(); - } - - //! Upgrade reader to become a writer. - /** Returns true if the upgrade happened without re-acquiring the lock and false if opposite */ - bool upgrade_to_writer() { - __TBB_ASSERT( mutex, "lock is not acquired" ); - __TBB_ASSERT( !is_writer, "not a reader" ); - is_writer = true; - return mutex->internal_upgrade(); - } - - //! Release lock. - void release() { - __TBB_ASSERT( mutex, "lock is not acquired" ); - spin_rw_mutex *m = mutex; - mutex = NULL; -#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT - if( is_writer ) m->internal_release_writer(); - else m->internal_release_reader(); -#else - if( is_writer ) __TBB_AtomicAND( &m->state, READERS ); - else __TBB_FetchAndAddWrelease( &m->state, -(intptr_t)ONE_READER); -#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */ - } - - //! Downgrade writer to become a reader. - bool downgrade_to_reader() { -#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT - __TBB_ASSERT( mutex, "lock is not acquired" ); - __TBB_ASSERT( is_writer, "not a writer" ); - mutex->internal_downgrade(); -#else - __TBB_FetchAndAddW( &mutex->state, ((intptr_t)ONE_READER-WRITER)); -#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */ - is_writer = false; - - return true; - } - - //! Try acquire lock on given mutex. - bool try_acquire( spin_rw_mutex& m, bool write = true ) { - __TBB_ASSERT( !mutex, "holding mutex already" ); - bool result; - is_writer = write; - result = write? m.internal_try_acquire_writer() - : m.internal_try_acquire_reader(); - if( result ) - mutex = &m; - return result; - } - - private: - //! The pointer to the current mutex that is held, or NULL if no mutex is held. - spin_rw_mutex* mutex; - - //! If mutex!=NULL, then is_writer is true if holding a writer lock, false if holding a reader lock. - /** Not defined if not holding a lock. */ - bool is_writer; - }; - - // Mutex traits - static const bool is_rw_mutex = true; - static const bool is_recursive_mutex = false; - static const bool is_fair_mutex = false; - - // ISO C++0x compatibility methods - - //! Acquire writer lock - void lock() {internal_acquire_writer();} - - //! Try acquiring writer lock (non-blocking) - /** Return true if lock acquired; false otherwise. */ - bool try_lock() {return internal_try_acquire_writer();} - - //! Release lock - void unlock() { -#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT - if( state&WRITER ) internal_release_writer(); - else internal_release_reader(); -#else - if( state&WRITER ) __TBB_AtomicAND( &state, READERS ); - else __TBB_FetchAndAddWrelease( &state, -(intptr_t)ONE_READER); -#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */ - } - - // Methods for reader locks that resemble ISO C++0x compatibility methods. - - //! Acquire reader lock - void lock_read() {internal_acquire_reader();} - - //! Try acquiring reader lock (non-blocking) - /** Return true if reader lock acquired; false otherwise. */ - bool try_lock_read() {return internal_try_acquire_reader();} - -private: - typedef intptr_t state_t; - static const state_t WRITER = 1; - static const state_t WRITER_PENDING = 2; - static const state_t READERS = ~(WRITER | WRITER_PENDING); - static const state_t ONE_READER = 4; - static const state_t BUSY = WRITER | READERS; - //! State of lock - /** Bit 0 = writer is holding lock - Bit 1 = request by a writer to acquire lock (hint to readers to wait) - Bit 2..N = number of readers holding lock */ - state_t state; - - void __TBB_EXPORTED_METHOD internal_construct(); -}; - -__TBB_DEFINE_PROFILING_SET_NAME(spin_rw_mutex) - -} // namespace tbb - -#endif /* __TBB_spin_rw_mutex_H */ diff --git a/deal.II/bundled/tbb30_104oss/include/tbb/task.h b/deal.II/bundled/tbb30_104oss/include/tbb/task.h deleted file mode 100644 index 0f876b0f39..0000000000 --- a/deal.II/bundled/tbb30_104oss/include/tbb/task.h +++ /dev/null @@ -1,838 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_task_H -#define __TBB_task_H - -#include "tbb_stddef.h" -#include "tbb_machine.h" - -typedef struct ___itt_caller *__itt_caller; - -namespace tbb { - -class task; -class task_list; - -#if __TBB_TASK_GROUP_CONTEXT -class task_group_context; -#endif /* __TBB_TASK_GROUP_CONTEXT */ - -// MSVC does not allow taking the address of a member that was defined -// privately in task_base and made public in class task via a using declaration. -#if _MSC_VER || (__GNUC__==3 && __GNUC_MINOR__<3) -#define __TBB_TASK_BASE_ACCESS public -#else -#define __TBB_TASK_BASE_ACCESS private -#endif - -namespace internal { - - class allocate_additional_child_of_proxy: no_assign { - //! No longer used, but retained for binary layout compatibility. Always NULL. - task* self; - task& parent; - public: - explicit allocate_additional_child_of_proxy( task& parent_ ) : self(NULL), parent(parent_) {} - task& __TBB_EXPORTED_METHOD allocate( size_t size ) const; - void __TBB_EXPORTED_METHOD free( task& ) const; - }; - -} - -namespace interface5 { - namespace internal { - //! Base class for methods that became static in TBB 3.0. - /** TBB's evolution caused the "this" argument for several methods to become obsolete. - However, for backwards binary compatibility, the new methods need distinct names, - otherwise the One Definition Rule would be broken. Hence the new methods are - defined in this private base class, and then exposed in class task via - using declarations. */ - class task_base: tbb::internal::no_copy { - __TBB_TASK_BASE_ACCESS: - friend class tbb::task; - - //! Schedule task for execution when a worker becomes available. - static void spawn( task& t ); - - //! Spawn multiple tasks and clear list. - static void spawn( task_list& list ); - - //! Like allocate_child, except that task's parent becomes "t", not this. - /** Typically used in conjunction with schedule_to_reexecute to implement while loops. - Atomically increments the reference count of t.parent() */ - static tbb::internal::allocate_additional_child_of_proxy allocate_additional_child_of( task& t ) { - return tbb::internal::allocate_additional_child_of_proxy(t); - } - - //! Destroy a task. - /** Usually, calling this method is unnecessary, because a task is - implicitly deleted after its execute() method runs. However, - sometimes a task needs to be explicitly deallocated, such as - when a root task is used as the parent in spawn_and_wait_for_all. */ - static void __TBB_EXPORTED_FUNC destroy( task& victim ); - }; - } // internal -} // interface5 - -//! @cond INTERNAL -namespace internal { - - class scheduler: no_copy { - public: - //! For internal use only - virtual void spawn( task& first, task*& next ) = 0; - - //! For internal use only - virtual void wait_for_all( task& parent, task* child ) = 0; - - //! For internal use only - virtual void spawn_root_and_wait( task& first, task*& next ) = 0; - - //! Pure virtual destructor; - // Have to have it just to shut up overzealous compilation warnings - virtual ~scheduler() = 0; -#if __TBB_ARENA_PER_MASTER - - //! For internal use only - virtual void enqueue( task& t, void* reserved ) = 0; -#endif /* __TBB_ARENA_PER_MASTER */ - }; - - //! A reference count - /** Should always be non-negative. A signed type is used so that underflow can be detected. */ - typedef intptr_t reference_count; - - //! An id as used for specifying affinity. - typedef unsigned short affinity_id; - -#if __TBB_TASK_GROUP_CONTEXT - struct context_list_node_t { - context_list_node_t *my_prev, - *my_next; - }; - - class allocate_root_with_context_proxy: no_assign { - task_group_context& my_context; - public: - allocate_root_with_context_proxy ( task_group_context& ctx ) : my_context(ctx) {} - task& __TBB_EXPORTED_METHOD allocate( size_t size ) const; - void __TBB_EXPORTED_METHOD free( task& ) const; - }; -#endif /* __TBB_TASK_GROUP_CONTEXT */ - - class allocate_root_proxy: no_assign { - public: - static task& __TBB_EXPORTED_FUNC allocate( size_t size ); - static void __TBB_EXPORTED_FUNC free( task& ); - }; - - class allocate_continuation_proxy: no_assign { - public: - task& __TBB_EXPORTED_METHOD allocate( size_t size ) const; - void __TBB_EXPORTED_METHOD free( task& ) const; - }; - - class allocate_child_proxy: no_assign { - public: - task& __TBB_EXPORTED_METHOD allocate( size_t size ) const; - void __TBB_EXPORTED_METHOD free( task& ) const; - }; - - //! Memory prefix to a task object. - /** This class is internal to the library. - Do not reference it directly, except within the library itself. - Fields are ordered in way that preserves backwards compatibility and yields - good packing on typical 32-bit and 64-bit platforms. - @ingroup task_scheduling */ - class task_prefix { - private: - friend class tbb::task; - friend class tbb::interface5::internal::task_base; - friend class tbb::task_list; - friend class internal::scheduler; - friend class internal::allocate_root_proxy; - friend class internal::allocate_child_proxy; - friend class internal::allocate_continuation_proxy; - friend class internal::allocate_additional_child_of_proxy; - -#if __TBB_TASK_GROUP_CONTEXT - //! Shared context that is used to communicate asynchronous state changes - /** Currently it is used to broadcast cancellation requests generated both - by users and as the result of unhandled exceptions in the task::execute() - methods. */ - task_group_context *context; -#endif /* __TBB_TASK_GROUP_CONTEXT */ - - //! The scheduler that allocated the task, or NULL if the task is big. - /** Small tasks are pooled by the scheduler that allocated the task. - If a scheduler needs to free a small task allocated by another scheduler, - it returns the task to that other scheduler. This policy avoids - memory space blowup issues for memory allocators that allocate from - thread-specific pools. */ - scheduler* origin; - - //! The scheduler that owns the task. - scheduler* owner; - - //! The task whose reference count includes me. - /** In the "blocking style" of programming, this field points to the parent task. - In the "continuation-passing style" of programming, this field points to the - continuation of the parent. */ - tbb::task* parent; - - //! Reference count used for synchronization. - /** In the "continuation-passing style" of programming, this field is - the difference of the number of allocated children minus the - number of children that have completed. - In the "blocking style" of programming, this field is one more than the difference. */ - reference_count ref_count; - - //! Obsolete. Used to be scheduling depth before TBB 2.2 - /** Retained only for the sake of backward binary compatibility. **/ - int depth; - - //! A task::state_type, stored as a byte for compactness. - /** This state is exposed to users via method task::state(). */ - unsigned char state; - - //! Miscellaneous state that is not directly visible to users, stored as a byte for compactness. - /** 0x0 -> version 1.0 task - 0x1 -> version >=2.1 task - 0x20 -> task_proxy - 0x40 -> task has live ref_count - 0x80 -> a stolen task */ - unsigned char extra_state; - - affinity_id affinity; - - //! "next" field for list of task - tbb::task* next; - - //! The task corresponding to this task_prefix. - tbb::task& task() {return *reinterpret_cast(this+1);} - }; - -} // namespace internal -//! @endcond - -#if __TBB_TASK_GROUP_CONTEXT - -#if TBB_USE_CAPTURED_EXCEPTION - class tbb_exception; -#else - namespace internal { - class tbb_exception_ptr; - } -#endif /* !TBB_USE_CAPTURED_EXCEPTION */ - -//! Used to form groups of tasks -/** @ingroup task_scheduling - The context services explicit cancellation requests from user code, and unhandled - exceptions intercepted during tasks execution. Intercepting an exception results - in generating internal cancellation requests (which is processed in exactly the - same way as external ones). - - The context is associated with one or more root tasks and defines the cancellation - group that includes all the descendants of the corresponding root task(s). Association - is established when a context object is passed as an argument to the task::allocate_root() - method. See task_group_context::task_group_context for more details. - - The context can be bound to another one, and other contexts can be bound to it, - forming a tree-like structure: parent -> this -> children. Arrows here designate - cancellation propagation direction. If a task in a cancellation group is canceled - all the other tasks in this group and groups bound to it (as children) get canceled too. - - IMPLEMENTATION NOTE: - When adding new members to task_group_context or changing types of existing ones, - update the size of both padding buffers (_leading_padding and _trailing_padding) - appropriately. See also VERSIONING NOTE at the constructor definition below. **/ -class task_group_context : internal::no_copy { -private: -#if TBB_USE_CAPTURED_EXCEPTION - typedef tbb_exception exception_container_type; -#else - typedef internal::tbb_exception_ptr exception_container_type; -#endif - - enum version_traits_word_layout { - traits_offset = 16, - version_mask = 0xFFFF, - traits_mask = 0xFFFFul << traits_offset - }; - -public: - enum kind_type { - isolated, - bound - }; - - enum traits_type { - exact_exception = 0x0001ul << traits_offset, - concurrent_wait = 0x0004ul << traits_offset, -#if TBB_USE_CAPTURED_EXCEPTION - default_traits = 0 -#else - default_traits = exact_exception -#endif /* !TBB_USE_CAPTURED_EXCEPTION */ - }; - -private: - union { - //! Flavor of this context: bound or isolated. - kind_type my_kind; - uintptr_t _my_kind_aligner; - }; - - //! Pointer to the context of the parent cancellation group. NULL for isolated contexts. - task_group_context *my_parent; - - //! Used to form the thread specific list of contexts without additional memory allocation. - /** A context is included into the list of the current thread when its binding to - its parent happens. Any context can be present in the list of one thread only. **/ - internal::context_list_node_t my_node; - - //! Used to set and maintain stack stitching point for Intel Performance Tools. - __itt_caller itt_caller; - - //! Leading padding protecting accesses to frequently used members from false sharing. - /** Read accesses to the field my_cancellation_requested are on the hot path inside - the scheduler. This padding ensures that this field never shares the same cache - line with a local variable that is frequently written to. **/ - char _leading_padding[internal::NFS_MaxLineSize - - 2 * sizeof(uintptr_t)- sizeof(void*) - sizeof(internal::context_list_node_t) - - sizeof(__itt_caller)]; - - //! Specifies whether cancellation was request for this task group. - uintptr_t my_cancellation_requested; - - //! Version for run-time checks and behavioral traits of the context. - /** Version occupies low 16 bits, and traits (zero or more ORed enumerators - from the traits_type enumerations) take the next 16 bits. - Original (zeroth) version of the context did not support any traits. **/ - uintptr_t my_version_and_traits; - - //! Pointer to the container storing exception being propagated across this task group. - exception_container_type *my_exception; - - //! Scheduler that registered this context in its thread specific list. - /** This field is not terribly necessary, but it allows to get a small performance - benefit by getting us rid of using thread local storage. We do not care - about extra memory it takes since this data structure is excessively padded anyway. **/ - void *my_owner; - - //! Trailing padding protecting accesses to frequently used members from false sharing - /** \sa _leading_padding **/ - char _trailing_padding[internal::NFS_MaxLineSize - sizeof(intptr_t) - 2 * sizeof(void*)]; - -public: - //! Default & binding constructor. - /** By default a bound context is created. That is this context will be bound - (as child) to the context of the task calling task::allocate_root(this_context) - method. Cancellation requests passed to the parent context are propagated - to all the contexts bound to it. - - If task_group_context::isolated is used as the argument, then the tasks associated - with this context will never be affected by events in any other context. - - Creating isolated contexts involve much less overhead, but they have limited - utility. Normally when an exception occurs in an algorithm that has nested - ones running, it is desirably to have all the nested algorithms canceled - as well. Such a behavior requires nested algorithms to use bound contexts. - - There is one good place where using isolated algorithms is beneficial. It is - a master thread. That is if a particular algorithm is invoked directly from - the master thread (not from a TBB task), supplying it with explicitly - created isolated context will result in a faster algorithm startup. - - VERSIONING NOTE: - Implementation(s) of task_group_context constructor(s) cannot be made - entirely out-of-line because the run-time version must be set by the user - code. This will become critically important for binary compatibility, if - we ever have to change the size of the context object. - - Boosting the runtime version will also be necessary whenever new fields - are introduced in the currently unused padding areas or the meaning of - the existing fields is changed or extended. **/ - task_group_context ( kind_type relation_with_parent = bound, - uintptr_t traits = default_traits ) - : my_kind(relation_with_parent) - , my_version_and_traits(1 | traits) - { - init(); - } - - __TBB_EXPORTED_METHOD ~task_group_context (); - - //! Forcefully reinitializes the context after the task tree it was associated with is completed. - /** Because the method assumes that all the tasks that used to be associated with - this context have already finished, calling it while the context is still - in use somewhere in the task hierarchy leads to undefined behavior. - - IMPORTANT: This method is not thread safe! - - The method does not change the context's parent if it is set. **/ - void __TBB_EXPORTED_METHOD reset (); - - //! Initiates cancellation of all tasks in this cancellation group and its subordinate groups. - /** \return false if cancellation has already been requested, true otherwise. - - Note that canceling never fails. When false is returned, it just means that - another thread (or this one) has already sent cancellation request to this - context or to one of its ancestors (if this context is bound). It is guaranteed - that when this method is concurrently called on the same not yet cancelled - context, true will be returned by one and only one invocation. **/ - bool __TBB_EXPORTED_METHOD cancel_group_execution (); - - //! Returns true if the context received cancellation request. - bool __TBB_EXPORTED_METHOD is_group_execution_cancelled () const; - - //! Records the pending exception, and cancels the task group. - /** May be called only from inside a catch-block. If the context is already - canceled, does nothing. - The method brings the task group associated with this context exactly into - the state it would be in, if one of its tasks threw the currently pending - exception during its execution. In other words, it emulates the actions - of the scheduler's dispatch loop exception handler. **/ - void __TBB_EXPORTED_METHOD register_pending_exception (); - -protected: - //! Out-of-line part of the constructor. - /** Singled out to ensure backward binary compatibility of the future versions. **/ - void __TBB_EXPORTED_METHOD init (); - -private: - friend class task; - friend class internal::allocate_root_with_context_proxy; - - static const kind_type binding_required = bound; - static const kind_type binding_completed = kind_type(bound+1); - static const kind_type detached = kind_type(binding_completed+1); - static const kind_type dying = kind_type(detached+1); - - //! Checks if any of the ancestors has a cancellation request outstanding, - //! and propagates it back to descendants. - void propagate_cancellation_from_ancestors (); - -}; // class task_group_context - -#endif /* __TBB_TASK_GROUP_CONTEXT */ - -//! Base class for user-defined tasks. -/** @ingroup task_scheduling */ -class task: __TBB_TASK_BASE_ACCESS interface5::internal::task_base { - - //! Set reference count - void __TBB_EXPORTED_METHOD internal_set_ref_count( int count ); - - //! Decrement reference count and return its new value. - internal::reference_count __TBB_EXPORTED_METHOD internal_decrement_ref_count(); - -protected: - //! Default constructor. - task() {prefix().extra_state=1;} - -public: - //! Destructor. - virtual ~task() {} - - //! Should be overridden by derived classes. - virtual task* execute() = 0; - - //! Enumeration of task states that the scheduler considers. - enum state_type { - //! task is running, and will be destroyed after method execute() completes. - executing, - //! task to be rescheduled. - reexecute, - //! task is in ready pool, or is going to be put there, or was just taken off. - ready, - //! task object is freshly allocated or recycled. - allocated, - //! task object is on free list, or is going to be put there, or was just taken off. - freed, - //! task to be recycled as continuation - recycle - }; - - //------------------------------------------------------------------------ - // Allocating tasks - //------------------------------------------------------------------------ - - //! Returns proxy for overloaded new that allocates a root task. - static internal::allocate_root_proxy allocate_root() { - return internal::allocate_root_proxy(); - } - -#if __TBB_TASK_GROUP_CONTEXT - //! Returns proxy for overloaded new that allocates a root task associated with user supplied context. - static internal::allocate_root_with_context_proxy allocate_root( task_group_context& ctx ) { - return internal::allocate_root_with_context_proxy(ctx); - } -#endif /* __TBB_TASK_GROUP_CONTEXT */ - - //! Returns proxy for overloaded new that allocates a continuation task of *this. - /** The continuation's parent becomes the parent of *this. */ - internal::allocate_continuation_proxy& allocate_continuation() { - return *reinterpret_cast(this); - } - - //! Returns proxy for overloaded new that allocates a child task of *this. - internal::allocate_child_proxy& allocate_child() { - return *reinterpret_cast(this); - } - - //! Define recommended static form via import from base class. - using task_base::allocate_additional_child_of; - -#if __TBB_DEPRECATED_TASK_INTERFACE - //! Destroy a task. - /** Usually, calling this method is unnecessary, because a task is - implicitly deleted after its execute() method runs. However, - sometimes a task needs to be explicitly deallocated, such as - when a root task is used as the parent in spawn_and_wait_for_all. */ - void __TBB_EXPORTED_METHOD destroy( task& t ); -#else /* !__TBB_DEPRECATED_TASK_INTERFACE */ - //! Define recommended static form via import from base class. - using task_base::destroy; -#endif /* !__TBB_DEPRECATED_TASK_INTERFACE */ - - //------------------------------------------------------------------------ - // Recycling of tasks - //------------------------------------------------------------------------ - - //! Change this to be a continuation of its former self. - /** The caller must guarantee that the task's refcount does not become zero until - after the method execute() returns. Typically, this is done by having - method execute() return a pointer to a child of the task. If the guarantee - cannot be made, use method recycle_as_safe_continuation instead. - - Because of the hazard, this method may be deprecated in the future. */ - void recycle_as_continuation() { - __TBB_ASSERT( prefix().state==executing, "execute not running?" ); - prefix().state = allocated; - } - - //! Recommended to use, safe variant of recycle_as_continuation - /** For safety, it requires additional increment of ref_count. - With no decendants and ref_count of 1, it has the semantics of recycle_to_reexecute. */ - void recycle_as_safe_continuation() { - __TBB_ASSERT( prefix().state==executing, "execute not running?" ); - prefix().state = recycle; - } - - //! Change this to be a child of new_parent. - void recycle_as_child_of( task& new_parent ) { - internal::task_prefix& p = prefix(); - __TBB_ASSERT( prefix().state==executing||prefix().state==allocated, "execute not running, or already recycled" ); - __TBB_ASSERT( prefix().ref_count==0, "no child tasks allowed when recycled as a child" ); - __TBB_ASSERT( p.parent==NULL, "parent must be null" ); - __TBB_ASSERT( new_parent.prefix().state<=recycle, "corrupt parent's state" ); - __TBB_ASSERT( new_parent.prefix().state!=freed, "parent already freed" ); - p.state = allocated; - p.parent = &new_parent; -#if __TBB_TASK_GROUP_CONTEXT - p.context = new_parent.prefix().context; -#endif /* __TBB_TASK_GROUP_CONTEXT */ - } - - //! Schedule this for reexecution after current execute() returns. - /** Made obsolete by recycle_as_safe_continuation; may become deprecated. */ - void recycle_to_reexecute() { - __TBB_ASSERT( prefix().state==executing, "execute not running, or already recycled" ); - __TBB_ASSERT( prefix().ref_count==0, "no child tasks allowed when recycled for reexecution" ); - prefix().state = reexecute; - } - - // All depth-related methods are obsolete, and are retained for the sake - // of backward source compatibility only - intptr_t depth() const {return 0;} - void set_depth( intptr_t ) {} - void add_to_depth( int ) {} - - - //------------------------------------------------------------------------ - // Spawning and blocking - //------------------------------------------------------------------------ - - //! Set reference count - void set_ref_count( int count ) { -#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT - internal_set_ref_count(count); -#else - prefix().ref_count = count; -#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */ - } - - //! Atomically increment reference count. - /** Has acquire semantics */ - void increment_ref_count() { - __TBB_FetchAndIncrementWacquire( &prefix().ref_count ); - } - - //! Atomically decrement reference count. - /** Has release semantics. */ - int decrement_ref_count() { -#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT - return int(internal_decrement_ref_count()); -#else - return int(__TBB_FetchAndDecrementWrelease( &prefix().ref_count ))-1; -#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */ - } - - //! Define recommended static forms via import from base class. - using task_base::spawn; - - //! Similar to spawn followed by wait_for_all, but more efficient. - void spawn_and_wait_for_all( task& child ) { - prefix().owner->wait_for_all( *this, &child ); - } - - //! Similar to spawn followed by wait_for_all, but more efficient. - void __TBB_EXPORTED_METHOD spawn_and_wait_for_all( task_list& list ); - - //! Spawn task allocated by allocate_root, wait for it to complete, and deallocate it. - static void spawn_root_and_wait( task& root ) { - root.prefix().owner->spawn_root_and_wait( root, root.prefix().next ); - } - - //! Spawn root tasks on list and wait for all of them to finish. - /** If there are more tasks than worker threads, the tasks are spawned in - order of front to back. */ - static void spawn_root_and_wait( task_list& root_list ); - - //! Wait for reference count to become one, and set reference count to zero. - /** Works on tasks while waiting. */ - void wait_for_all() { - prefix().owner->wait_for_all( *this, NULL ); - } - -#if __TBB_ARENA_PER_MASTER - //! Enqueue task for starvation-resistant execution. - static void enqueue( task& t ) { - t.prefix().owner->enqueue( t, NULL ); - } - -#endif /* __TBB_ARENA_PER_MASTER */ - //! The innermost task being executed or destroyed by the current thread at the moment. - static task& __TBB_EXPORTED_FUNC self(); - - //! task on whose behalf this task is working, or NULL if this is a root. - task* parent() const {return prefix().parent;} - -#if __TBB_TASK_GROUP_CONTEXT - //! Shared context that is used to communicate asynchronous state changes - task_group_context* context() {return prefix().context;} -#endif /* __TBB_TASK_GROUP_CONTEXT */ - - //! True if task was stolen from the task pool of another thread. - bool is_stolen_task() const { - return (prefix().extra_state & 0x80)!=0; - } - - //------------------------------------------------------------------------ - // Debugging - //------------------------------------------------------------------------ - - //! Current execution state - state_type state() const {return state_type(prefix().state);} - - //! The internal reference count. - int ref_count() const { -#if TBB_USE_ASSERT - internal::reference_count ref_count_ = prefix().ref_count; - __TBB_ASSERT( ref_count_==int(ref_count_), "integer overflow error"); -#endif - return int(prefix().ref_count); - } - - //! Obsolete, and only retained for the sake of backward compatibility. Always returns true. - bool __TBB_EXPORTED_METHOD is_owned_by_current_thread() const; - - //------------------------------------------------------------------------ - // Affinity - //------------------------------------------------------------------------ - - //! An id as used for specifying affinity. - /** Guaranteed to be integral type. Value of 0 means no affinity. */ - typedef internal::affinity_id affinity_id; - - //! Set affinity for this task. - void set_affinity( affinity_id id ) {prefix().affinity = id;} - - //! Current affinity of this task - affinity_id affinity() const {return prefix().affinity;} - - //! Invoked by scheduler to notify task that it ran on unexpected thread. - /** Invoked before method execute() runs, if task is stolen, or task has - affinity but will be executed on another thread. - - The default action does nothing. */ - virtual void __TBB_EXPORTED_METHOD note_affinity( affinity_id id ); - -#if __TBB_TASK_GROUP_CONTEXT - //! Initiates cancellation of all tasks in this cancellation group and its subordinate groups. - /** \return false if cancellation has already been requested, true otherwise. **/ - bool cancel_group_execution () { return prefix().context->cancel_group_execution(); } - - //! Returns true if the context received cancellation request. - bool is_cancelled () const { return prefix().context->is_group_execution_cancelled(); } -#endif /* __TBB_TASK_GROUP_CONTEXT */ - -private: - friend class interface5::internal::task_base; - friend class task_list; - friend class internal::scheduler; - friend class internal::allocate_root_proxy; -#if __TBB_TASK_GROUP_CONTEXT - friend class internal::allocate_root_with_context_proxy; -#endif /* __TBB_TASK_GROUP_CONTEXT */ - friend class internal::allocate_continuation_proxy; - friend class internal::allocate_child_proxy; - friend class internal::allocate_additional_child_of_proxy; - - //! Get reference to corresponding task_prefix. - /** Version tag prevents loader on Linux from using the wrong symbol in debug builds. **/ - internal::task_prefix& prefix( internal::version_tag* = NULL ) const { - return reinterpret_cast(const_cast(this))[-1]; - } -}; // class task - -//! task that does nothing. Useful for synchronization. -/** @ingroup task_scheduling */ -class empty_task: public task { - /*override*/ task* execute() { - return NULL; - } -}; - -//! A list of children. -/** Used for method task::spawn_children - @ingroup task_scheduling */ -class task_list: internal::no_copy { -private: - task* first; - task** next_ptr; - friend class task; - friend class interface5::internal::task_base; -public: - //! Construct empty list - task_list() : first(NULL), next_ptr(&first) {} - - //! Destroys the list, but does not destroy the task objects. - ~task_list() {} - - //! True if list if empty; false otherwise. - bool empty() const {return !first;} - - //! Push task onto back of list. - void push_back( task& task ) { - task.prefix().next = NULL; - *next_ptr = &task; - next_ptr = &task.prefix().next; - } - - //! Pop the front task from the list. - task& pop_front() { - __TBB_ASSERT( !empty(), "attempt to pop item from empty task_list" ); - task* result = first; - first = result->prefix().next; - if( !first ) next_ptr = &first; - return *result; - } - - //! Clear the list - void clear() { - first=NULL; - next_ptr=&first; - } -}; - -inline void interface5::internal::task_base::spawn( task& t ) { - t.prefix().owner->spawn( t, t.prefix().next ); -} - -inline void interface5::internal::task_base::spawn( task_list& list ) { - if( task* t = list.first ) { - t->prefix().owner->spawn( *t, *list.next_ptr ); - list.clear(); - } -} - -inline void task::spawn_root_and_wait( task_list& root_list ) { - if( task* t = root_list.first ) { - t->prefix().owner->spawn_root_and_wait( *t, *root_list.next_ptr ); - root_list.clear(); - } -} - -} // namespace tbb - -inline void *operator new( size_t bytes, const tbb::internal::allocate_root_proxy& ) { - return &tbb::internal::allocate_root_proxy::allocate(bytes); -} - -inline void operator delete( void* task, const tbb::internal::allocate_root_proxy& ) { - tbb::internal::allocate_root_proxy::free( *static_cast(task) ); -} - -#if __TBB_TASK_GROUP_CONTEXT -inline void *operator new( size_t bytes, const tbb::internal::allocate_root_with_context_proxy& p ) { - return &p.allocate(bytes); -} - -inline void operator delete( void* task, const tbb::internal::allocate_root_with_context_proxy& p ) { - p.free( *static_cast(task) ); -} -#endif /* __TBB_TASK_GROUP_CONTEXT */ - -inline void *operator new( size_t bytes, const tbb::internal::allocate_continuation_proxy& p ) { - return &p.allocate(bytes); -} - -inline void operator delete( void* task, const tbb::internal::allocate_continuation_proxy& p ) { - p.free( *static_cast(task) ); -} - -inline void *operator new( size_t bytes, const tbb::internal::allocate_child_proxy& p ) { - return &p.allocate(bytes); -} - -inline void operator delete( void* task, const tbb::internal::allocate_child_proxy& p ) { - p.free( *static_cast(task) ); -} - -inline void *operator new( size_t bytes, const tbb::internal::allocate_additional_child_of_proxy& p ) { - return &p.allocate(bytes); -} - -inline void operator delete( void* task, const tbb::internal::allocate_additional_child_of_proxy& p ) { - p.free( *static_cast(task) ); -} - -#endif /* __TBB_task_H */ diff --git a/deal.II/bundled/tbb30_104oss/include/tbb/task_group.h b/deal.II/bundled/tbb30_104oss/include/tbb/task_group.h deleted file mode 100644 index a74ccc776f..0000000000 --- a/deal.II/bundled/tbb30_104oss/include/tbb/task_group.h +++ /dev/null @@ -1,248 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_task_group_H -#define __TBB_task_group_H - -#include "task.h" -#include "tbb_exception.h" - -namespace tbb { - -namespace internal { - template class task_handle_task; -} - -template -class task_handle : internal::no_assign { - template friend class internal::task_handle_task; - - static const intptr_t scheduled = 0x1; - - F my_func; - intptr_t my_state; - - void mark_scheduled () { - // The check here is intentionally lax to avoid the impact of interlocked operation - if ( my_state & scheduled ) - internal::throw_exception( internal::eid_invalid_multiple_scheduling ); - my_state |= scheduled; - } -public: - task_handle( const F& f ) : my_func(f), my_state(0) {} - - void operator() () const { my_func(); } -}; - -enum task_group_status { - not_complete, - complete, - canceled -}; - -namespace internal { - -// Suppress gratuitous warnings from icc 11.0 when lambda expressions are used in instances of function_task. -//#pragma warning(disable: 588) - -template -class function_task : public task { - F my_func; - /*override*/ task* execute() { - my_func(); - return NULL; - } -public: - function_task( const F& f ) : my_func(f) {} -}; - -template -class task_handle_task : public task { - task_handle& my_handle; - /*override*/ task* execute() { - my_handle(); - return NULL; - } -public: - task_handle_task( task_handle& h ) : my_handle(h) { h.mark_scheduled(); } -}; - -class task_group_base : internal::no_copy { -protected: - empty_task* my_root; - task_group_context my_context; - - task& owner () { return *my_root; } - - template - task_group_status internal_run_and_wait( F& f ) { - __TBB_TRY { - if ( !my_context.is_group_execution_cancelled() ) - f(); - } __TBB_CATCH( ... ) { - my_context.register_pending_exception(); - } - return wait(); - } - - template - void internal_run( F& f ) { - owner().spawn( *new( owner().allocate_additional_child_of(*my_root) ) Task(f) ); - } - -public: - task_group_base( uintptr_t traits = 0 ) - : my_context(task_group_context::bound, task_group_context::default_traits | traits) - { - my_root = new( task::allocate_root(my_context) ) empty_task; - my_root->set_ref_count(1); - } - - ~task_group_base() { - if( my_root->ref_count() > 1 ) { - bool stack_unwinding_in_progress = std::uncaught_exception(); - // Always attempt to do proper cleanup to avoid inevitable memory corruption - // in case of missing wait (for the sake of better testability & debuggability) - if ( !is_canceling() ) - cancel(); - __TBB_TRY { - my_root->wait_for_all(); - } __TBB_CATCH (...) { - task::destroy(*my_root); - __TBB_RETHROW(); - } - task::destroy(*my_root); - if ( !stack_unwinding_in_progress ) - internal::throw_exception( internal::eid_missing_wait ); - } - else { - task::destroy(*my_root); - } - } - - template - void run( task_handle& h ) { - internal_run< task_handle, internal::task_handle_task >( h ); - } - - task_group_status wait() { - __TBB_TRY { - my_root->wait_for_all(); - } __TBB_CATCH( ... ) { - my_context.reset(); - __TBB_RETHROW(); - } - if ( my_context.is_group_execution_cancelled() ) { - my_context.reset(); - return canceled; - } - return complete; - } - - bool is_canceling() { - return my_context.is_group_execution_cancelled(); - } - - void cancel() { - my_context.cancel_group_execution(); - } -}; // class task_group_base - -} // namespace internal - -class task_group : public internal::task_group_base { -public: - task_group () : task_group_base( task_group_context::concurrent_wait ) {} - -#if TBB_DEPRECATED - ~task_group() __TBB_TRY { - __TBB_ASSERT( my_root->ref_count() != 0, NULL ); - if( my_root->ref_count() > 1 ) - my_root->wait_for_all(); - } -#if TBB_USE_EXCEPTIONS - catch (...) { - // Have to destroy my_root here as the base class destructor won't be called - task::destroy(*my_root); - throw; - } -#endif /* TBB_USE_EXCEPTIONS */ -#endif /* TBB_DEPRECATED */ - -#if __SUNPRO_CC - template - void run( task_handle& h ) { - internal_run< task_handle, internal::task_handle_task >( h ); - } -#else - using task_group_base::run; -#endif - - template - void run( const F& f ) { - internal_run< const F, internal::function_task >( f ); - } - - template - task_group_status run_and_wait( const F& f ) { - return internal_run_and_wait( f ); - } - - template - task_group_status run_and_wait( task_handle& h ) { - return internal_run_and_wait< task_handle >( h ); - } -}; // class task_group - -class structured_task_group : public internal::task_group_base { -public: - template - task_group_status run_and_wait ( task_handle& h ) { - return internal_run_and_wait< task_handle >( h ); - } - - task_group_status wait() { - task_group_status res = task_group_base::wait(); - my_root->set_ref_count(1); - return res; - } -}; // class structured_task_group - -inline -bool is_current_task_group_canceling() { - return task::self().is_cancelled(); -} - -template -task_handle make_task( const F& f ) { - return task_handle( f ); -} - -} // namespace tbb - -#endif /* __TBB_task_group_H */ diff --git a/deal.II/bundled/tbb30_104oss/include/tbb/task_scheduler_init.h b/deal.II/bundled/tbb30_104oss/include/tbb/task_scheduler_init.h deleted file mode 100644 index 458afb26f5..0000000000 --- a/deal.II/bundled/tbb30_104oss/include/tbb/task_scheduler_init.h +++ /dev/null @@ -1,106 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_task_scheduler_init_H -#define __TBB_task_scheduler_init_H - -#include "tbb_stddef.h" - -namespace tbb { - -typedef std::size_t stack_size_type; - -//! @cond INTERNAL -namespace internal { - //! Internal to library. Should not be used by clients. - /** @ingroup task_scheduling */ - class scheduler; -} // namespace internal -//! @endcond - -//! Class representing reference to tbb scheduler. -/** A thread must construct a task_scheduler_init, and keep it alive, - during the time that it uses the services of class task. - @ingroup task_scheduling */ -class task_scheduler_init: internal::no_copy { - /** NULL if not currently initialized. */ - internal::scheduler* my_scheduler; -public: - - //! Typedef for number of threads that is automatic. - static const int automatic = -1; - - //! Argument to initialize() or constructor that causes initialization to be deferred. - static const int deferred = -2; - - //! Ensure that scheduler exists for this thread - /** A value of -1 lets tbb decide on the number of threads, which is typically - the number of hardware threads. For production code, the default value of -1 - should be used, particularly if the client code is mixed with third party clients - that might also use tbb. - - The number_of_threads is ignored if any other task_scheduler_inits - currently exist. A thread may construct multiple task_scheduler_inits. - Doing so does no harm because the underlying scheduler is reference counted. */ - void __TBB_EXPORTED_METHOD initialize( int number_of_threads=automatic ); - - //! The overloaded method with stack size parameter - /** Overloading is necessary to preserve ABI compatibility */ - void __TBB_EXPORTED_METHOD initialize( int number_of_threads, stack_size_type thread_stack_size ); - - //! Inverse of method initialize. - void __TBB_EXPORTED_METHOD terminate(); - - //! Shorthand for default constructor followed by call to intialize(number_of_threads). - task_scheduler_init( int number_of_threads=automatic, stack_size_type thread_stack_size=0 ) : my_scheduler(NULL) { - initialize( number_of_threads, thread_stack_size ); - } - - //! Destroy scheduler for this thread if thread has no other live task_scheduler_inits. - ~task_scheduler_init() { - if( my_scheduler ) - terminate(); - internal::poison_pointer( my_scheduler ); - } - //! Returns the number of threads tbb scheduler would create if initialized by default. - /** Result returned by this method does not depend on whether the scheduler - has already been initialized. - - Because tbb 2.0 does not support blocking tasks yet, you may use this method - to boost the number of threads in the tbb's internal pool, if your tasks are - doing I/O operations. The optimal number of additional threads depends on how - much time your tasks spend in the blocked state. */ - static int __TBB_EXPORTED_FUNC default_num_threads (); - - //! Returns true if scheduler is active (initialized); false otherwise - bool is_active() const { return my_scheduler != NULL; } -}; - -} // namespace tbb - -#endif /* __TBB_task_scheduler_init_H */ diff --git a/deal.II/bundled/tbb30_104oss/include/tbb/task_scheduler_observer.h b/deal.II/bundled/tbb30_104oss/include/tbb/task_scheduler_observer.h deleted file mode 100644 index 61003e524d..0000000000 --- a/deal.II/bundled/tbb30_104oss/include/tbb/task_scheduler_observer.h +++ /dev/null @@ -1,74 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_task_scheduler_observer_H -#define __TBB_task_scheduler_observer_H - -#include "atomic.h" - -#if __TBB_SCHEDULER_OBSERVER - -namespace tbb { - -namespace internal { - -class observer_proxy; - -class task_scheduler_observer_v3 { - friend class observer_proxy; - observer_proxy* my_proxy; - atomic my_busy_count; -public: - //! Enable or disable observation - void __TBB_EXPORTED_METHOD observe( bool state=true ); - - //! True if observation is enables; false otherwise. - bool is_observing() const {return my_proxy!=NULL;} - - //! Construct observer with observation disabled. - task_scheduler_observer_v3() : my_proxy(NULL) {my_busy_count=0;} - - //! Called by thread before first steal since observation became enabled - virtual void on_scheduler_entry( bool /*is_worker*/ ) {} - - //! Called by thread when it no longer takes part in task stealing. - virtual void on_scheduler_exit( bool /*is_worker*/ ) {} - - //! Destructor - virtual ~task_scheduler_observer_v3() {observe(false);} -}; - -} // namespace internal - -typedef internal::task_scheduler_observer_v3 task_scheduler_observer; - -} // namespace tbb - -#endif /* __TBB_SCHEDULER_OBSERVER */ - -#endif /* __TBB_task_scheduler_observer_H */ diff --git a/deal.II/bundled/tbb30_104oss/include/tbb/tbb.h b/deal.II/bundled/tbb30_104oss/include/tbb/tbb.h deleted file mode 100644 index 9c5ac0ff3e..0000000000 --- a/deal.II/bundled/tbb30_104oss/include/tbb/tbb.h +++ /dev/null @@ -1,80 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_tbb_H -#define __TBB_tbb_H - -/** - This header bulk-includes declarations or definitions of all the functionality - provided by TBB (save for malloc dependent headers). - - If you use only a few TBB constructs, consider including specific headers only. - Any header listed below can be included independently of others. -**/ - -#include "aligned_space.h" -#include "atomic.h" -#include "blocked_range.h" -#include "blocked_range2d.h" -#include "blocked_range3d.h" -#include "cache_aligned_allocator.h" -#include "combinable.h" -#include "concurrent_unordered_map.h" -#include "concurrent_hash_map.h" -#include "concurrent_queue.h" -#include "concurrent_vector.h" -#include "critical_section.h" -#include "enumerable_thread_specific.h" -#include "mutex.h" -#include "null_mutex.h" -#include "null_rw_mutex.h" -#include "parallel_do.h" -#include "parallel_for.h" -#include "parallel_for_each.h" -#include "parallel_invoke.h" -#include "parallel_reduce.h" -#include "parallel_scan.h" -#include "parallel_sort.h" -#include "partitioner.h" -#include "pipeline.h" -#include "queuing_mutex.h" -#include "queuing_rw_mutex.h" -#include "reader_writer_lock.h" -#include "recursive_mutex.h" -#include "spin_mutex.h" -#include "spin_rw_mutex.h" -#include "task.h" -#include "task_group.h" -#include "task_scheduler_init.h" -#include "task_scheduler_observer.h" -#include "tbb_allocator.h" -#include "tbb_exception.h" -#include "tbb_thread.h" -#include "tick_count.h" - -#endif /* __TBB_tbb_H */ diff --git a/deal.II/bundled/tbb30_104oss/include/tbb/tbb_allocator.h b/deal.II/bundled/tbb30_104oss/include/tbb/tbb_allocator.h deleted file mode 100644 index 008422df30..0000000000 --- a/deal.II/bundled/tbb30_104oss/include/tbb/tbb_allocator.h +++ /dev/null @@ -1,214 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_tbb_allocator_H -#define __TBB_tbb_allocator_H - -#include "tbb_stddef.h" -#include - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (push) - #pragma warning (disable: 4530) -#endif - -#include - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - #pragma warning (pop) -#endif - -namespace tbb { - -//! @cond INTERNAL -namespace internal { - - //! Deallocates memory using FreeHandler - /** The function uses scalable_free if scalable allocator is available and free if not*/ - void __TBB_EXPORTED_FUNC deallocate_via_handler_v3( void *p ); - - //! Allocates memory using MallocHandler - /** The function uses scalable_malloc if scalable allocator is available and malloc if not*/ - void* __TBB_EXPORTED_FUNC allocate_via_handler_v3( size_t n ); - - //! Returns true if standard malloc/free are used to work with memory. - bool __TBB_EXPORTED_FUNC is_malloc_used_v3(); -} -//! @endcond - -#if _MSC_VER && !defined(__INTEL_COMPILER) - // Workaround for erroneous "unreferenced parameter" warning in method destroy. - #pragma warning (push) - #pragma warning (disable: 4100) -#endif - -//! Meets "allocator" requirements of ISO C++ Standard, Section 20.1.5 -/** The class selects the best memory allocation mechanism available - from scalable_malloc and standard malloc. - The members are ordered the same way they are in section 20.4.1 - of the ISO C++ standard. - @ingroup memory_allocation */ -template -class tbb_allocator { -public: - typedef typename internal::allocator_type::value_type value_type; - typedef value_type* pointer; - typedef const value_type* const_pointer; - typedef value_type& reference; - typedef const value_type& const_reference; - typedef size_t size_type; - typedef ptrdiff_t difference_type; - template struct rebind { - typedef tbb_allocator other; - }; - - //! Specifies current allocator - enum malloc_type { - scalable, - standard - }; - - tbb_allocator() throw() {} - tbb_allocator( const tbb_allocator& ) throw() {} - template tbb_allocator(const tbb_allocator&) throw() {} - - pointer address(reference x) const {return &x;} - const_pointer address(const_reference x) const {return &x;} - - //! Allocate space for n objects. - pointer allocate( size_type n, const void* /*hint*/ = 0) { - return pointer(internal::allocate_via_handler_v3( n * sizeof(value_type) )); - } - - //! Free previously allocated block of memory. - void deallocate( pointer p, size_type ) { - internal::deallocate_via_handler_v3(p); - } - - //! Largest value for which method allocate might succeed. - size_type max_size() const throw() { - size_type max = static_cast(-1) / sizeof (value_type); - return (max > 0 ? max : 1); - } - - //! Copy-construct value at location pointed to by p. - void construct( pointer p, const value_type& value ) {::new((void*)(p)) value_type(value);} - - //! Destroy value at location pointed to by p. - void destroy( pointer p ) {p->~value_type();} - - //! Returns current allocator - static malloc_type allocator_type() { - return internal::is_malloc_used_v3() ? standard : scalable; - } -}; - -#if _MSC_VER && !defined(__INTEL_COMPILER) - #pragma warning (pop) -#endif // warning 4100 is back - -//! Analogous to std::allocator, as defined in ISO C++ Standard, Section 20.4.1 -/** @ingroup memory_allocation */ -template<> -class tbb_allocator { -public: - typedef void* pointer; - typedef const void* const_pointer; - typedef void value_type; - template struct rebind { - typedef tbb_allocator other; - }; -}; - -template -inline bool operator==( const tbb_allocator&, const tbb_allocator& ) {return true;} - -template -inline bool operator!=( const tbb_allocator&, const tbb_allocator& ) {return false;} - -//! Meets "allocator" requirements of ISO C++ Standard, Section 20.1.5 -/** The class is an adapter over an actual allocator that fills the allocation - using memset function with template argument C as the value. - The members are ordered the same way they are in section 20.4.1 - of the ISO C++ standard. - @ingroup memory_allocation */ -template class Allocator = tbb_allocator> -class zero_allocator : public Allocator -{ -public: - typedef Allocator base_allocator_type; - typedef typename base_allocator_type::value_type value_type; - typedef typename base_allocator_type::pointer pointer; - typedef typename base_allocator_type::const_pointer const_pointer; - typedef typename base_allocator_type::reference reference; - typedef typename base_allocator_type::const_reference const_reference; - typedef typename base_allocator_type::size_type size_type; - typedef typename base_allocator_type::difference_type difference_type; - template struct rebind { - typedef zero_allocator other; - }; - - zero_allocator() throw() { } - zero_allocator(const zero_allocator &a) throw() : base_allocator_type( a ) { } - template - zero_allocator(const zero_allocator &a) throw() : base_allocator_type( Allocator( a ) ) { } - - pointer allocate(const size_type n, const void *hint = 0 ) { - pointer ptr = base_allocator_type::allocate( n, hint ); - std::memset( ptr, 0, n * sizeof(value_type) ); - return ptr; - } -}; - -//! Analogous to std::allocator, as defined in ISO C++ Standard, Section 20.4.1 -/** @ingroup memory_allocation */ -template class Allocator> -class zero_allocator : public Allocator { -public: - typedef Allocator base_allocator_type; - typedef typename base_allocator_type::value_type value_type; - typedef typename base_allocator_type::pointer pointer; - typedef typename base_allocator_type::const_pointer const_pointer; - template struct rebind { - typedef zero_allocator other; - }; -}; - -template class B1, typename T2, template class B2> -inline bool operator==( const zero_allocator &a, const zero_allocator &b) { - return static_cast< B1 >(a) == static_cast< B2 >(b); -} -template class B1, typename T2, template class B2> -inline bool operator!=( const zero_allocator &a, const zero_allocator &b) { - return static_cast< B1 >(a) != static_cast< B2 >(b); -} - -} // namespace tbb - -#endif /* __TBB_tbb_allocator_H */ diff --git a/deal.II/bundled/tbb30_104oss/include/tbb/tbb_config.h b/deal.II/bundled/tbb30_104oss/include/tbb/tbb_config.h deleted file mode 100644 index a65fd153ce..0000000000 --- a/deal.II/bundled/tbb30_104oss/include/tbb/tbb_config.h +++ /dev/null @@ -1,208 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_tbb_config_H -#define __TBB_tbb_config_H - -/** This header is supposed to contain macro definitions and C style comments only. - The macros defined here are intended to control such aspects of TBB build as - - compilation modes - - feature sets - - workarounds presence -**/ - -/** Compilation modes **/ - -#ifndef TBB_USE_DEBUG -#ifdef TBB_DO_ASSERT -#define TBB_USE_DEBUG TBB_DO_ASSERT -#else -#define TBB_USE_DEBUG 0 -#endif /* TBB_DO_ASSERT */ -#else -#define TBB_DO_ASSERT TBB_USE_DEBUG -#endif /* TBB_USE_DEBUG */ - -#ifndef TBB_USE_ASSERT -#ifdef TBB_DO_ASSERT -#define TBB_USE_ASSERT TBB_DO_ASSERT -#else -#define TBB_USE_ASSERT TBB_USE_DEBUG -#endif /* TBB_DO_ASSERT */ -#endif /* TBB_USE_ASSERT */ - -#ifndef TBB_USE_THREADING_TOOLS -#ifdef TBB_DO_THREADING_TOOLS -#define TBB_USE_THREADING_TOOLS TBB_DO_THREADING_TOOLS -#else -#define TBB_USE_THREADING_TOOLS TBB_USE_DEBUG -#endif /* TBB_DO_THREADING_TOOLS */ -#endif /* TBB_USE_THREADING_TOOLS */ - -#ifndef TBB_USE_PERFORMANCE_WARNINGS -#ifdef TBB_PERFORMANCE_WARNINGS -#define TBB_USE_PERFORMANCE_WARNINGS TBB_PERFORMANCE_WARNINGS -#else -#define TBB_USE_PERFORMANCE_WARNINGS TBB_USE_DEBUG -#endif /* TBB_PEFORMANCE_WARNINGS */ -#endif /* TBB_USE_PERFORMANCE_WARNINGS */ - -#if !defined(__EXCEPTIONS) && !defined(_CPPUNWIND) && !defined(__SUNPRO_CC) || defined(_XBOX) - #if TBB_USE_EXCEPTIONS - #error Compilation settings do not support exception handling. Please do not set TBB_USE_EXCEPTIONS macro or set it to 0. - #elif !defined(TBB_USE_EXCEPTIONS) - #define TBB_USE_EXCEPTIONS 0 - #endif -#elif !defined(TBB_USE_EXCEPTIONS) - #define TBB_USE_EXCEPTIONS 1 -#endif - -#ifndef TBB_IMPLEMENT_CPP0X - /** By default, use C++0x classes if available **/ - #if __GNUC__==4 && __GNUC_MINOR__>=4 && __GXX_EXPERIMENTAL_CXX0X__ - #define TBB_IMPLEMENT_CPP0X 0 - #else - #define TBB_IMPLEMENT_CPP0X 1 - #endif -#endif /* TBB_IMPLEMENT_CPP0X */ - -/** Feature sets **/ - -#ifndef __TBB_COUNT_TASK_NODES - #define __TBB_COUNT_TASK_NODES TBB_USE_ASSERT -#endif - -#ifndef __TBB_TASK_GROUP_CONTEXT - #define __TBB_TASK_GROUP_CONTEXT 1 -#endif /* __TBB_TASK_GROUP_CONTEXT */ - -#ifndef __TBB_SCHEDULER_OBSERVER - #define __TBB_SCHEDULER_OBSERVER 1 -#endif /* __TBB_SCHEDULER_OBSERVER */ - -#ifndef __TBB_ARENA_PER_MASTER - #define __TBB_ARENA_PER_MASTER 1 -#endif /* __TBB_ARENA_PER_MASTER */ - -#if !defined(__TBB_SURVIVE_THREAD_SWITCH) && (_WIN32 || _WIN64 || __linux__) - #define __TBB_SURVIVE_THREAD_SWITCH 1 -#endif /* __TBB_SURVIVE_THREAD_SWITCH */ - - -/* TODO: The following condition should be extended as soon as new compilers/runtimes - with std::exception_ptr support appear. */ -#define __TBB_EXCEPTION_PTR_PRESENT (_MSC_VER >= 1600 || __GXX_EXPERIMENTAL_CXX0X__ && (__GNUC__==4 && __GNUC_MINOR__>=4)) - - -#ifndef TBB_USE_CAPTURED_EXCEPTION - #if __TBB_EXCEPTION_PTR_PRESENT - #define TBB_USE_CAPTURED_EXCEPTION 0 - #else - #define TBB_USE_CAPTURED_EXCEPTION 1 - #endif -#else /* defined TBB_USE_CAPTURED_EXCEPTION */ - #if !TBB_USE_CAPTURED_EXCEPTION && !__TBB_EXCEPTION_PTR_PRESENT - #error Current runtime does not support std::exception_ptr. Set TBB_USE_CAPTURED_EXCEPTION and make sure that your code is ready to catch tbb::captured_exception. - #endif -#endif /* defined TBB_USE_CAPTURED_EXCEPTION */ - - -#ifndef __TBB_DEFAULT_PARTITIONER -#if TBB_DEPRECATED -/** Default partitioner for parallel loop templates in TBB 1.0-2.1 */ -#define __TBB_DEFAULT_PARTITIONER tbb::simple_partitioner -#else -/** Default partitioner for parallel loop templates in TBB 2.2 */ -#define __TBB_DEFAULT_PARTITIONER tbb::auto_partitioner -#endif /* TBB_DEFAULT_PARTITIONER */ -#endif /* !defined(__TBB_DEFAULT_PARTITIONER */ - -/** Workarounds presence **/ - -#if __GNUC__==4 && __GNUC_MINOR__>=4 && !defined(__INTEL_COMPILER) - #define __TBB_GCC_WARNING_SUPPRESSION_ENABLED 1 -#endif - -/** Macros of the form __TBB_XXX_BROKEN denote known issues that are caused by - the bugs in compilers, standard or OS specific libraries. They should be - removed as soon as the corresponding bugs are fixed or the buggy OS/compiler - versions go out of the support list. -**/ - -#if _MSC_VER && __INTEL_COMPILER && (__INTEL_COMPILER<1110 || __INTEL_COMPILER==1110 && __INTEL_COMPILER_BUILD_DATE < 20091012) - /** Necessary to avoid ICL error (or warning in non-strict mode): - "exception specification for implicitly declared virtual destructor is - incompatible with that of overridden one". **/ - #define __TBB_DEFAULT_DTOR_THROW_SPEC_BROKEN 1 -#endif - -#if defined(_MSC_VER) && _MSC_VER < 1500 && !defined(__INTEL_COMPILER) - /** VS2005 and earlier do not allow declaring template class as a friend - of classes defined in other namespaces. **/ - #define __TBB_TEMPLATE_FRIENDS_BROKEN 1 -#endif - -#if __GLIBC__==2 && __GLIBC_MINOR__==3 || __MINGW32__ - //! Macro controlling EH usages in TBB tests - /** Some older versions of glibc crash when exception handling happens concurrently. **/ - #define __TBB_THROW_ACROSS_MODULE_BOUNDARY_BROKEN 1 -#endif - -#if (_WIN32||_WIN64) && __INTEL_COMPILER == 1110 - /** That's a bug in Intel compiler 11.1.044/IA-32/Windows, that leads to a worker thread crash on the thread's startup. **/ - #define __TBB_ICL_11_1_CODE_GEN_BROKEN 1 -#endif - -#if __GNUC__==3 && __GNUC_MINOR__==3 && !defined(__INTEL_COMPILER) - /** A bug in GCC 3.3 with access to nested classes declared in protected area */ - #define __TBB_GCC_3_3_PROTECTED_BROKEN 1 -#endif - -#if __MINGW32__ && (__GNUC__<4 || __GNUC__==4 && __GNUC_MINOR__<2) - /** MinGW has a bug with stack alignment for routines invoked from MS RTLs. - Since GCC 4.2, the bug can be worked around via a special attribute. **/ - #define __TBB_SSE_STACK_ALIGNMENT_BROKEN 1 -#endif - -#if __FreeBSD__ - /** A bug in FreeBSD 8.0 results in kernel panic when there is contention - on a mutex created with this attribute. **/ - #define __TBB_PRIO_INHERIT_BROKEN 1 - - /** A bug in FreeBSD 8.0 results in test hanging when an exception occurs - during (concurrent?) object construction by means of placement new operator. **/ - #define __TBB_PLACEMENT_NEW_EXCEPTION_SAFETY_BROKEN 1 -#endif /* __FreeBSD__ */ - -#if (__linux__ || __APPLE__) && __i386__ && defined(__INTEL_COMPILER) - /** The Intel compiler for IA-32 (Linux|Mac OS X) crashes or generates - incorrect code when __asm__ arguments have a cast to volatile. **/ - #define __TBB_ICC_ASM_VOLATILE_BROKEN 1 -#endif - -#endif /* __TBB_tbb_config_H */ diff --git a/deal.II/bundled/tbb30_104oss/include/tbb/tbb_exception.h b/deal.II/bundled/tbb30_104oss/include/tbb/tbb_exception.h deleted file mode 100644 index d8ae898bb9..0000000000 --- a/deal.II/bundled/tbb30_104oss/include/tbb/tbb_exception.h +++ /dev/null @@ -1,362 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_exception_H -#define __TBB_exception_H - -#include "tbb_stddef.h" - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (push) - #pragma warning (disable: 4530) -#endif - -#include -#include // required to construct std exception classes - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - #pragma warning (pop) -#endif - -namespace tbb { - -//! Exception for concurrent containers -class bad_last_alloc : public std::bad_alloc { -public: - /*override*/ const char* what() const throw(); -#if __TBB_DEFAULT_DTOR_THROW_SPEC_BROKEN - /*override*/ ~bad_last_alloc() throw() {} -#endif -}; - -//! Exception for PPL locks -class improper_lock : public std::exception { -public: - /*override*/ const char* what() const throw(); -}; - -//! Exception for missing wait on structured_task_group -class missing_wait : public std::exception { -public: - /*override*/ const char* what() const throw(); -}; - -//! Exception for repeated scheduling of the same task_handle -class invalid_multiple_scheduling : public std::exception { -public: - /*override*/ const char* what() const throw(); -}; - -namespace internal { -//! Obsolete -void __TBB_EXPORTED_FUNC throw_bad_last_alloc_exception_v4(); - -enum exception_id { - eid_bad_alloc = 1, - eid_bad_last_alloc, - eid_nonpositive_step, - eid_out_of_range, - eid_segment_range_error, - eid_index_range_error, - eid_missing_wait, - eid_invalid_multiple_scheduling, - eid_improper_lock, - eid_possible_deadlock, - eid_operation_not_permitted, - eid_condvar_wait_failed, - eid_invalid_load_factor, - eid_reserved, // free slot for backward compatibility, can be reused. - eid_invalid_swap, - eid_reservation_length_error, - eid_invalid_key, - //! The last enumerator tracks the number of defined IDs. It must remain the last one. - /** When adding new IDs, place them immediately _before_ this comment (that is - _after_ all the existing IDs. NEVER insert new IDs between the existing ones. **/ - eid_max -}; - -//! Gathers all throw operators in one place. -/** Its purpose is to minimize code bloat that can be caused by throw operators - scattered in multiple places, especially in templates. **/ -void __TBB_EXPORTED_FUNC throw_exception_v4 ( exception_id ); - -//! Versionless convenience wrapper for throw_exception_v4() -inline void throw_exception ( exception_id eid ) { throw_exception_v4(eid); } - -} // namespace internal -} // namespace tbb - -#if __TBB_TASK_GROUP_CONTEXT -#include "tbb_allocator.h" -#include -#include -#include - -namespace tbb { - -//! Interface to be implemented by all exceptions TBB recognizes and propagates across the threads. -/** If an unhandled exception of the type derived from tbb::tbb_exception is intercepted - by the TBB scheduler in one of the worker threads, it is delivered to and re-thrown in - the root thread. The root thread is the thread that has started the outermost algorithm - or root task sharing the same task_group_context with the guilty algorithm/task (the one - that threw the exception first). - - Note: when documentation mentions workers with respect to exception handling, - masters are implied as well, because they are completely equivalent in this context. - Consequently a root thread can be master or worker thread. - - NOTE: In case of nested algorithms or complex task hierarchies when the nested - levels share (explicitly or by means of implicit inheritance) the task group - context of the outermost level, the exception may be (re-)thrown multiple times - (ultimately - in each worker on each nesting level) before reaching the root - thread at the outermost level. IMPORTANT: if you intercept an exception derived - from this class on a nested level, you must re-throw it in the catch block by means - of the "throw;" operator. - - TBB provides two implementations of this interface: tbb::captured_exception and - template class tbb::movable_exception. See their declarations for more info. **/ -class tbb_exception : public std::exception -{ - /** No operator new is provided because the TBB usage model assumes dynamic - creation of the TBB exception objects only by means of applying move() - operation on an exception thrown out of TBB scheduler. **/ - void* operator new ( size_t ); - -public: - //! Creates and returns pointer to the deep copy of this exception object. - /** Move semantics is allowed. **/ - virtual tbb_exception* move () throw() = 0; - - //! Destroys objects created by the move() method. - /** Frees memory and calls destructor for this exception object. - Can and must be used only on objects created by the move method. **/ - virtual void destroy () throw() = 0; - - //! Throws this exception object. - /** Make sure that if you have several levels of derivation from this interface - you implement or override this method on the most derived level. The implementation - is as simple as "throw *this;". Failure to do this will result in exception - of a base class type being thrown. **/ - virtual void throw_self () = 0; - - //! Returns RTTI name of the originally intercepted exception - virtual const char* name() const throw() = 0; - - //! Returns the result of originally intercepted exception's what() method. - virtual const char* what() const throw() = 0; - - /** Operator delete is provided only to allow using existing smart pointers - with TBB exception objects obtained as the result of applying move() - operation on an exception thrown out of TBB scheduler. - - When overriding method move() make sure to override operator delete as well - if memory is allocated not by TBB's scalable allocator. **/ - void operator delete ( void* p ) { - internal::deallocate_via_handler_v3(p); - } -}; - -//! This class is used by TBB to propagate information about unhandled exceptions into the root thread. -/** Exception of this type is thrown by TBB in the root thread (thread that started a parallel - algorithm ) if an unhandled exception was intercepted during the algorithm execution in one - of the workers. - \sa tbb::tbb_exception **/ -class captured_exception : public tbb_exception -{ -public: - captured_exception ( const captured_exception& src ) - : tbb_exception(src), my_dynamic(false) - { - set(src.my_exception_name, src.my_exception_info); - } - - captured_exception ( const char* name_, const char* info ) - : my_dynamic(false) - { - set(name_, info); - } - - __TBB_EXPORTED_METHOD ~captured_exception () throw() { - clear(); - } - - captured_exception& operator= ( const captured_exception& src ) { - if ( this != &src ) { - clear(); - set(src.my_exception_name, src.my_exception_info); - } - return *this; - } - - /*override*/ - captured_exception* __TBB_EXPORTED_METHOD move () throw(); - - /*override*/ - void __TBB_EXPORTED_METHOD destroy () throw(); - - /*override*/ - void throw_self () { __TBB_THROW(*this); } - - /*override*/ - const char* __TBB_EXPORTED_METHOD name() const throw(); - - /*override*/ - const char* __TBB_EXPORTED_METHOD what() const throw(); - - void __TBB_EXPORTED_METHOD set ( const char* name, const char* info ) throw(); - void __TBB_EXPORTED_METHOD clear () throw(); - -private: - //! Used only by method clone(). - captured_exception() {} - - //! Functionally equivalent to {captured_exception e(name,info); return e.clone();} - static captured_exception* allocate ( const char* name, const char* info ); - - bool my_dynamic; - const char* my_exception_name; - const char* my_exception_info; -}; - -//! Template that can be used to implement exception that transfers arbitrary ExceptionData to the root thread -/** Code using TBB can instantiate this template with an arbitrary ExceptionData type - and throw this exception object. Such exceptions are intercepted by the TBB scheduler - and delivered to the root thread (). - \sa tbb::tbb_exception **/ -template -class movable_exception : public tbb_exception -{ - typedef movable_exception self_type; - -public: - movable_exception ( const ExceptionData& data_ ) - : my_exception_data(data_) - , my_dynamic(false) - , my_exception_name( -#if TBB_USE_EXCEPTIONS - typeid(self_type).name() -#else /* !TBB_USE_EXCEPTIONS */ - "movable_exception" -#endif /* !TBB_USE_EXCEPTIONS */ - ) - {} - - movable_exception ( const movable_exception& src ) throw () - : tbb_exception(src) - , my_exception_data(src.my_exception_data) - , my_dynamic(false) - , my_exception_name(src.my_exception_name) - {} - - ~movable_exception () throw() {} - - const movable_exception& operator= ( const movable_exception& src ) { - if ( this != &src ) { - my_exception_data = src.my_exception_data; - my_exception_name = src.my_exception_name; - } - return *this; - } - - ExceptionData& data () throw() { return my_exception_data; } - - const ExceptionData& data () const throw() { return my_exception_data; } - - /*override*/ const char* name () const throw() { return my_exception_name; } - - /*override*/ const char* what () const throw() { return "tbb::movable_exception"; } - - /*override*/ - movable_exception* move () throw() { - void* e = internal::allocate_via_handler_v3(sizeof(movable_exception)); - if ( e ) { - ::new (e) movable_exception(*this); - ((movable_exception*)e)->my_dynamic = true; - } - return (movable_exception*)e; - } - /*override*/ - void destroy () throw() { - __TBB_ASSERT ( my_dynamic, "Method destroy can be called only on dynamically allocated movable_exceptions" ); - if ( my_dynamic ) { - this->~movable_exception(); - internal::deallocate_via_handler_v3(this); - } - } - /*override*/ - void throw_self () { __TBB_THROW( *this ); } - -protected: - //! User data - ExceptionData my_exception_data; - -private: - //! Flag specifying whether this object has been dynamically allocated (by the move method) - bool my_dynamic; - - //! RTTI name of this class - /** We rely on the fact that RTTI names are static string constants. **/ - const char* my_exception_name; -}; - -#if !TBB_USE_CAPTURED_EXCEPTION -namespace internal { - -//! Exception container that preserves the exact copy of the original exception -/** This class can be used only when the appropriate runtime support (mandated - by C++0x) is present **/ -class tbb_exception_ptr { - std::exception_ptr my_ptr; - -public: - static tbb_exception_ptr* allocate (); - static tbb_exception_ptr* allocate ( const tbb_exception& tag ); - //! This overload uses move semantics (i.e. it empties src) - static tbb_exception_ptr* allocate ( captured_exception& src ); - - //! Destroys this objects - /** Note that objects of this type can be created only by the allocate() method. **/ - void destroy () throw(); - - //! Throws the contained exception . - void throw_self () { std::rethrow_exception(my_ptr); } - -private: - tbb_exception_ptr ( const std::exception_ptr& src ) : my_ptr(src) {} - tbb_exception_ptr ( const captured_exception& src ) : my_ptr(std::copy_exception(src)) {} -}; // class tbb::internal::tbb_exception_ptr - -} // namespace internal -#endif /* !TBB_USE_CAPTURED_EXCEPTION */ - -} // namespace tbb - -#endif /* __TBB_TASK_GROUP_CONTEXT */ - -#endif /* __TBB_exception_H */ diff --git a/deal.II/bundled/tbb30_104oss/include/tbb/tbb_machine.h b/deal.II/bundled/tbb30_104oss/include/tbb/tbb_machine.h deleted file mode 100644 index 5efa08e2ce..0000000000 --- a/deal.II/bundled/tbb30_104oss/include/tbb/tbb_machine.h +++ /dev/null @@ -1,691 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_machine_H -#define __TBB_machine_H - -#include "tbb_stddef.h" - -#if _WIN32||_WIN64 - -#ifdef _MANAGED -#pragma managed(push, off) -#endif - -#if __MINGW64__ -#include "machine/linux_intel64.h" -extern "C" __declspec(dllimport) int __stdcall SwitchToThread( void ); -#define __TBB_Yield() SwitchToThread() -#elif __MINGW32__ -#include "machine/linux_ia32.h" -extern "C" __declspec(dllimport) int __stdcall SwitchToThread( void ); -#define __TBB_Yield() SwitchToThread() -#elif defined(_M_IX86) -#include "machine/windows_ia32.h" -#elif defined(_M_AMD64) -#include "machine/windows_intel64.h" -#elif _XBOX -#include "machine/xbox360_ppc.h" -#endif - -#ifdef _MANAGED -#pragma managed(pop) -#endif - -#elif __linux__ || __NetBSD__ || __FreeBSD__ - -#if __i386__ -#include "machine/linux_ia32.h" -#elif __x86_64__ -#include "machine/linux_intel64.h" -#elif __ia64__ -#include "machine/linux_ia64.h" -#elif __powerpc__ -#include "machine/mac_ppc.h" -#endif -#include "machine/linux_common.h" - -#elif __APPLE__ - -#if __i386__ -#include "machine/linux_ia32.h" -#elif __x86_64__ -#include "machine/linux_intel64.h" -#elif __POWERPC__ -#include "machine/mac_ppc.h" -#endif -#include "machine/macos_common.h" - -#elif _AIX - -#include "machine/ibm_aix51.h" - -#elif __sun || __SUNPRO_CC - -#define __asm__ asm -#define __volatile__ volatile -#if __i386 || __i386__ -#include "machine/linux_ia32.h" -#elif __x86_64__ -#include "machine/linux_intel64.h" -#elif __sparc -#include "machine/sunos_sparc.h" -#endif -#include -#define __TBB_Yield() sched_yield() - -#endif - -//! Prerequisites for each architecture port -/** There are no generic implementation for these macros so they have to be implemented - in each machine architecture specific header. - - __TBB_full_memory_fence must prevent all memory operations from being reordered - across the fence. And all such fences must be totally ordered (or sequentially - consistent). These fence must affect both compiler and hardware. - - __TBB_release_consistency_helper is used to enforce guarantees of acquire or - release semantics in generic implementations of __TBB_load_with_acquire and - __TBB_store_with_release below. Depending on the particular combination of - architecture+compiler it can be a hardware fence, a compiler fence, both or - nothing. **/ -#if !defined(__TBB_CompareAndSwap4) \ - || !defined(__TBB_CompareAndSwap8) \ - || !defined(__TBB_Yield) \ - || !defined(__TBB_full_memory_fence) \ - || !defined(__TBB_release_consistency_helper) -#error Minimal requirements for tbb_machine.h not satisfied; platform is not supported. -#endif - -#ifndef __TBB_Pause - inline void __TBB_Pause(int32_t) { - __TBB_Yield(); - } -#endif - -namespace tbb { -namespace internal { - -//! Class that implements exponential backoff. -/** See implementation of spin_wait_while_eq for an example. */ -class atomic_backoff : no_copy { - //! Time delay, in units of "pause" instructions. - /** Should be equal to approximately the number of "pause" instructions - that take the same time as an context switch. */ - static const int32_t LOOPS_BEFORE_YIELD = 16; - int32_t count; -public: - atomic_backoff() : count(1) {} - - //! Pause for a while. - void pause() { - if( count<=LOOPS_BEFORE_YIELD ) { - __TBB_Pause(count); - // Pause twice as long the next time. - count*=2; - } else { - // Pause is so long that we might as well yield CPU to scheduler. - __TBB_Yield(); - } - } - - // pause for a few times and then return false immediately. - bool bounded_pause() { - if( count<=LOOPS_BEFORE_YIELD ) { - __TBB_Pause(count); - // Pause twice as long the next time. - count*=2; - return true; - } else { - return false; - } - } - - void reset() { - count = 1; - } -}; - -//! Spin WHILE the value of the variable is equal to a given value -/** T and U should be comparable types. */ -template -void spin_wait_while_eq( const volatile T& location, U value ) { - atomic_backoff backoff; - while( location==value ) backoff.pause(); -} - -//! Spin UNTIL the value of the variable is equal to a given value -/** T and U should be comparable types. */ -template -void spin_wait_until_eq( const volatile T& location, const U value ) { - atomic_backoff backoff; - while( location!=value ) backoff.pause(); -} - -// T should be unsigned, otherwise sign propagation will break correctness of bit manipulations. -// S should be either 1 or 2, for the mask calculation to work correctly. -// Together, these rules limit applicability of Masked CAS to unsigned char and unsigned short. -template -inline T __TBB_MaskedCompareAndSwap (volatile T *ptr, T value, T comparand ) { - volatile uint32_t * base = (uint32_t*)( (uintptr_t)ptr & ~(uintptr_t)0x3 ); -#if __TBB_BIG_ENDIAN - const uint8_t bitoffset = uint8_t( 8*( 4-S - (uintptr_t(ptr) & 0x3) ) ); -#else - const uint8_t bitoffset = uint8_t( 8*((uintptr_t)ptr & 0x3) ); -#endif - const uint32_t mask = ( (1<<(S*8)) - 1 )<> bitoffset); -} - -template -inline T __TBB_CompareAndSwapGeneric (volatile void *ptr, T value, T comparand ) { - return __TBB_CompareAndSwapW((T *)ptr,value,comparand); -} - -template<> -inline uint8_t __TBB_CompareAndSwapGeneric <1,uint8_t> (volatile void *ptr, uint8_t value, uint8_t comparand ) { -#ifdef __TBB_CompareAndSwap1 - return __TBB_CompareAndSwap1(ptr,value,comparand); -#else - return __TBB_MaskedCompareAndSwap<1,uint8_t>((volatile uint8_t *)ptr,value,comparand); -#endif -} - -template<> -inline uint16_t __TBB_CompareAndSwapGeneric <2,uint16_t> (volatile void *ptr, uint16_t value, uint16_t comparand ) { -#ifdef __TBB_CompareAndSwap2 - return __TBB_CompareAndSwap2(ptr,value,comparand); -#else - return __TBB_MaskedCompareAndSwap<2,uint16_t>((volatile uint16_t *)ptr,value,comparand); -#endif -} - -template<> -inline uint32_t __TBB_CompareAndSwapGeneric <4,uint32_t> (volatile void *ptr, uint32_t value, uint32_t comparand ) { - return __TBB_CompareAndSwap4(ptr,value,comparand); -} - -template<> -inline uint64_t __TBB_CompareAndSwapGeneric <8,uint64_t> (volatile void *ptr, uint64_t value, uint64_t comparand ) { - return __TBB_CompareAndSwap8(ptr,value,comparand); -} - -template -inline T __TBB_FetchAndAddGeneric (volatile void *ptr, T addend) { - atomic_backoff b; - T result; - for(;;) { - result = *reinterpret_cast(ptr); - // __TBB_CompareAndSwapGeneric presumed to have full fence. - if( __TBB_CompareAndSwapGeneric ( ptr, result+addend, result )==result ) - break; - b.pause(); - } - return result; -} - -template -inline T __TBB_FetchAndStoreGeneric (volatile void *ptr, T value) { - atomic_backoff b; - T result; - for(;;) { - result = *reinterpret_cast(ptr); - // __TBB_CompareAndSwapGeneric presumed to have full fence. - if( __TBB_CompareAndSwapGeneric ( ptr, value, result )==result ) - break; - b.pause(); - } - return result; -} - -// Macro __TBB_TypeWithAlignmentAtLeastAsStrict(T) should be a type with alignment at least as -// strict as type T. Type type should have a trivial default constructor and destructor, so that -// arrays of that type can be declared without initializers. -// It is correct (but perhaps a waste of space) if __TBB_TypeWithAlignmentAtLeastAsStrict(T) expands -// to a type bigger than T. -// The default definition here works on machines where integers are naturally aligned and the -// strictest alignment is 16. -#ifndef __TBB_TypeWithAlignmentAtLeastAsStrict - -#if __GNUC__ || __SUNPRO_CC -struct __TBB_machine_type_with_strictest_alignment { - int member[4]; -} __attribute__((aligned(16))); -#elif _MSC_VER -__declspec(align(16)) struct __TBB_machine_type_with_strictest_alignment { - int member[4]; -}; -#else -#error Must define __TBB_TypeWithAlignmentAtLeastAsStrict(T) or __TBB_machine_type_with_strictest_alignment -#endif - -template struct type_with_alignment {__TBB_machine_type_with_strictest_alignment member;}; -template<> struct type_with_alignment<1> { char member; }; -template<> struct type_with_alignment<2> { uint16_t member; }; -template<> struct type_with_alignment<4> { uint32_t member; }; -template<> struct type_with_alignment<8> { uint64_t member; }; - -#if _MSC_VER||defined(__GNUC__)&&__GNUC__==3 && __GNUC_MINOR__<=2 -//! Work around for bug in GNU 3.2 and MSVC compilers. -/** Bug is that compiler sometimes returns 0 for __alignof(T) when T has not yet been instantiated. - The work-around forces instantiation by forcing computation of sizeof(T) before __alignof(T). */ -template -struct work_around_alignment_bug { -#if _MSC_VER - static const size_t alignment = __alignof(T); -#else - static const size_t alignment = __alignof__(T); -#endif -}; -#define __TBB_TypeWithAlignmentAtLeastAsStrict(T) tbb::internal::type_with_alignment::alignment> -#elif __GNUC__ || __SUNPRO_CC -#define __TBB_TypeWithAlignmentAtLeastAsStrict(T) tbb::internal::type_with_alignment<__alignof__(T)> -#else -#define __TBB_TypeWithAlignmentAtLeastAsStrict(T) __TBB_machine_type_with_strictest_alignment -#endif -#endif /* ____TBB_TypeWithAlignmentAtLeastAsStrict */ - -// Template class here is to avoid instantiation of the static data for modules that don't use it -template -struct reverse { - static const T byte_table[256]; -}; -// An efficient implementation of the reverse function utilizes a 2^8 lookup table holding the bit-reversed -// values of [0..2^8 - 1]. Those values can also be computed on the fly at a slightly higher cost. -template -const T reverse::byte_table[256] = { - 0x00, 0x80, 0x40, 0xC0, 0x20, 0xA0, 0x60, 0xE0, 0x10, 0x90, 0x50, 0xD0, 0x30, 0xB0, 0x70, 0xF0, - 0x08, 0x88, 0x48, 0xC8, 0x28, 0xA8, 0x68, 0xE8, 0x18, 0x98, 0x58, 0xD8, 0x38, 0xB8, 0x78, 0xF8, - 0x04, 0x84, 0x44, 0xC4, 0x24, 0xA4, 0x64, 0xE4, 0x14, 0x94, 0x54, 0xD4, 0x34, 0xB4, 0x74, 0xF4, - 0x0C, 0x8C, 0x4C, 0xCC, 0x2C, 0xAC, 0x6C, 0xEC, 0x1C, 0x9C, 0x5C, 0xDC, 0x3C, 0xBC, 0x7C, 0xFC, - 0x02, 0x82, 0x42, 0xC2, 0x22, 0xA2, 0x62, 0xE2, 0x12, 0x92, 0x52, 0xD2, 0x32, 0xB2, 0x72, 0xF2, - 0x0A, 0x8A, 0x4A, 0xCA, 0x2A, 0xAA, 0x6A, 0xEA, 0x1A, 0x9A, 0x5A, 0xDA, 0x3A, 0xBA, 0x7A, 0xFA, - 0x06, 0x86, 0x46, 0xC6, 0x26, 0xA6, 0x66, 0xE6, 0x16, 0x96, 0x56, 0xD6, 0x36, 0xB6, 0x76, 0xF6, - 0x0E, 0x8E, 0x4E, 0xCE, 0x2E, 0xAE, 0x6E, 0xEE, 0x1E, 0x9E, 0x5E, 0xDE, 0x3E, 0xBE, 0x7E, 0xFE, - 0x01, 0x81, 0x41, 0xC1, 0x21, 0xA1, 0x61, 0xE1, 0x11, 0x91, 0x51, 0xD1, 0x31, 0xB1, 0x71, 0xF1, - 0x09, 0x89, 0x49, 0xC9, 0x29, 0xA9, 0x69, 0xE9, 0x19, 0x99, 0x59, 0xD9, 0x39, 0xB9, 0x79, 0xF9, - 0x05, 0x85, 0x45, 0xC5, 0x25, 0xA5, 0x65, 0xE5, 0x15, 0x95, 0x55, 0xD5, 0x35, 0xB5, 0x75, 0xF5, - 0x0D, 0x8D, 0x4D, 0xCD, 0x2D, 0xAD, 0x6D, 0xED, 0x1D, 0x9D, 0x5D, 0xDD, 0x3D, 0xBD, 0x7D, 0xFD, - 0x03, 0x83, 0x43, 0xC3, 0x23, 0xA3, 0x63, 0xE3, 0x13, 0x93, 0x53, 0xD3, 0x33, 0xB3, 0x73, 0xF3, - 0x0B, 0x8B, 0x4B, 0xCB, 0x2B, 0xAB, 0x6B, 0xEB, 0x1B, 0x9B, 0x5B, 0xDB, 0x3B, 0xBB, 0x7B, 0xFB, - 0x07, 0x87, 0x47, 0xC7, 0x27, 0xA7, 0x67, 0xE7, 0x17, 0x97, 0x57, 0xD7, 0x37, 0xB7, 0x77, 0xF7, - 0x0F, 0x8F, 0x4F, 0xCF, 0x2F, 0xAF, 0x6F, 0xEF, 0x1F, 0x9F, 0x5F, 0xDF, 0x3F, 0xBF, 0x7F, 0xFF -}; - -} // namespace internal -} // namespace tbb - -#ifndef __TBB_CompareAndSwap1 -#define __TBB_CompareAndSwap1 tbb::internal::__TBB_CompareAndSwapGeneric<1,uint8_t> -#endif - -#ifndef __TBB_CompareAndSwap2 -#define __TBB_CompareAndSwap2 tbb::internal::__TBB_CompareAndSwapGeneric<2,uint16_t> -#endif - -#ifndef __TBB_CompareAndSwapW -#define __TBB_CompareAndSwapW tbb::internal::__TBB_CompareAndSwapGeneric -#endif - -#ifndef __TBB_FetchAndAdd1 -#define __TBB_FetchAndAdd1 tbb::internal::__TBB_FetchAndAddGeneric<1,uint8_t> -#endif - -#ifndef __TBB_FetchAndAdd2 -#define __TBB_FetchAndAdd2 tbb::internal::__TBB_FetchAndAddGeneric<2,uint16_t> -#endif - -#ifndef __TBB_FetchAndAdd4 -#define __TBB_FetchAndAdd4 tbb::internal::__TBB_FetchAndAddGeneric<4,uint32_t> -#endif - -#ifndef __TBB_FetchAndAdd8 -#define __TBB_FetchAndAdd8 tbb::internal::__TBB_FetchAndAddGeneric<8,uint64_t> -#endif - -#ifndef __TBB_FetchAndAddW -#define __TBB_FetchAndAddW tbb::internal::__TBB_FetchAndAddGeneric -#endif - -#ifndef __TBB_FetchAndStore1 -#define __TBB_FetchAndStore1 tbb::internal::__TBB_FetchAndStoreGeneric<1,uint8_t> -#endif - -#ifndef __TBB_FetchAndStore2 -#define __TBB_FetchAndStore2 tbb::internal::__TBB_FetchAndStoreGeneric<2,uint16_t> -#endif - -#ifndef __TBB_FetchAndStore4 -#define __TBB_FetchAndStore4 tbb::internal::__TBB_FetchAndStoreGeneric<4,uint32_t> -#endif - -#ifndef __TBB_FetchAndStore8 -#define __TBB_FetchAndStore8 tbb::internal::__TBB_FetchAndStoreGeneric<8,uint64_t> -#endif - -#ifndef __TBB_FetchAndStoreW -#define __TBB_FetchAndStoreW tbb::internal::__TBB_FetchAndStoreGeneric -#endif - -#if __TBB_DECL_FENCED_ATOMICS - -#ifndef __TBB_CompareAndSwap1__TBB_full_fence -#define __TBB_CompareAndSwap1__TBB_full_fence __TBB_CompareAndSwap1 -#endif -#ifndef __TBB_CompareAndSwap1acquire -#define __TBB_CompareAndSwap1acquire __TBB_CompareAndSwap1__TBB_full_fence -#endif -#ifndef __TBB_CompareAndSwap1release -#define __TBB_CompareAndSwap1release __TBB_CompareAndSwap1__TBB_full_fence -#endif - -#ifndef __TBB_CompareAndSwap2__TBB_full_fence -#define __TBB_CompareAndSwap2__TBB_full_fence __TBB_CompareAndSwap2 -#endif -#ifndef __TBB_CompareAndSwap2acquire -#define __TBB_CompareAndSwap2acquire __TBB_CompareAndSwap2__TBB_full_fence -#endif -#ifndef __TBB_CompareAndSwap2release -#define __TBB_CompareAndSwap2release __TBB_CompareAndSwap2__TBB_full_fence -#endif - -#ifndef __TBB_CompareAndSwap4__TBB_full_fence -#define __TBB_CompareAndSwap4__TBB_full_fence __TBB_CompareAndSwap4 -#endif -#ifndef __TBB_CompareAndSwap4acquire -#define __TBB_CompareAndSwap4acquire __TBB_CompareAndSwap4__TBB_full_fence -#endif -#ifndef __TBB_CompareAndSwap4release -#define __TBB_CompareAndSwap4release __TBB_CompareAndSwap4__TBB_full_fence -#endif - -#ifndef __TBB_CompareAndSwap8__TBB_full_fence -#define __TBB_CompareAndSwap8__TBB_full_fence __TBB_CompareAndSwap8 -#endif -#ifndef __TBB_CompareAndSwap8acquire -#define __TBB_CompareAndSwap8acquire __TBB_CompareAndSwap8__TBB_full_fence -#endif -#ifndef __TBB_CompareAndSwap8release -#define __TBB_CompareAndSwap8release __TBB_CompareAndSwap8__TBB_full_fence -#endif - -#ifndef __TBB_FetchAndAdd1__TBB_full_fence -#define __TBB_FetchAndAdd1__TBB_full_fence __TBB_FetchAndAdd1 -#endif -#ifndef __TBB_FetchAndAdd1acquire -#define __TBB_FetchAndAdd1acquire __TBB_FetchAndAdd1__TBB_full_fence -#endif -#ifndef __TBB_FetchAndAdd1release -#define __TBB_FetchAndAdd1release __TBB_FetchAndAdd1__TBB_full_fence -#endif - -#ifndef __TBB_FetchAndAdd2__TBB_full_fence -#define __TBB_FetchAndAdd2__TBB_full_fence __TBB_FetchAndAdd2 -#endif -#ifndef __TBB_FetchAndAdd2acquire -#define __TBB_FetchAndAdd2acquire __TBB_FetchAndAdd2__TBB_full_fence -#endif -#ifndef __TBB_FetchAndAdd2release -#define __TBB_FetchAndAdd2release __TBB_FetchAndAdd2__TBB_full_fence -#endif - -#ifndef __TBB_FetchAndAdd4__TBB_full_fence -#define __TBB_FetchAndAdd4__TBB_full_fence __TBB_FetchAndAdd4 -#endif -#ifndef __TBB_FetchAndAdd4acquire -#define __TBB_FetchAndAdd4acquire __TBB_FetchAndAdd4__TBB_full_fence -#endif -#ifndef __TBB_FetchAndAdd4release -#define __TBB_FetchAndAdd4release __TBB_FetchAndAdd4__TBB_full_fence -#endif - -#ifndef __TBB_FetchAndAdd8__TBB_full_fence -#define __TBB_FetchAndAdd8__TBB_full_fence __TBB_FetchAndAdd8 -#endif -#ifndef __TBB_FetchAndAdd8acquire -#define __TBB_FetchAndAdd8acquire __TBB_FetchAndAdd8__TBB_full_fence -#endif -#ifndef __TBB_FetchAndAdd8release -#define __TBB_FetchAndAdd8release __TBB_FetchAndAdd8__TBB_full_fence -#endif - -#ifndef __TBB_FetchAndStore1__TBB_full_fence -#define __TBB_FetchAndStore1__TBB_full_fence __TBB_FetchAndStore1 -#endif -#ifndef __TBB_FetchAndStore1acquire -#define __TBB_FetchAndStore1acquire __TBB_FetchAndStore1__TBB_full_fence -#endif -#ifndef __TBB_FetchAndStore1release -#define __TBB_FetchAndStore1release __TBB_FetchAndStore1__TBB_full_fence -#endif - -#ifndef __TBB_FetchAndStore2__TBB_full_fence -#define __TBB_FetchAndStore2__TBB_full_fence __TBB_FetchAndStore2 -#endif -#ifndef __TBB_FetchAndStore2acquire -#define __TBB_FetchAndStore2acquire __TBB_FetchAndStore2__TBB_full_fence -#endif -#ifndef __TBB_FetchAndStore2release -#define __TBB_FetchAndStore2release __TBB_FetchAndStore2__TBB_full_fence -#endif - -#ifndef __TBB_FetchAndStore4__TBB_full_fence -#define __TBB_FetchAndStore4__TBB_full_fence __TBB_FetchAndStore4 -#endif -#ifndef __TBB_FetchAndStore4acquire -#define __TBB_FetchAndStore4acquire __TBB_FetchAndStore4__TBB_full_fence -#endif -#ifndef __TBB_FetchAndStore4release -#define __TBB_FetchAndStore4release __TBB_FetchAndStore4__TBB_full_fence -#endif - -#ifndef __TBB_FetchAndStore8__TBB_full_fence -#define __TBB_FetchAndStore8__TBB_full_fence __TBB_FetchAndStore8 -#endif -#ifndef __TBB_FetchAndStore8acquire -#define __TBB_FetchAndStore8acquire __TBB_FetchAndStore8__TBB_full_fence -#endif -#ifndef __TBB_FetchAndStore8release -#define __TBB_FetchAndStore8release __TBB_FetchAndStore8__TBB_full_fence -#endif - -#endif // __TBB_DECL_FENCED_ATOMICS - -// Special atomic functions -#ifndef __TBB_FetchAndAddWrelease -#define __TBB_FetchAndAddWrelease __TBB_FetchAndAddW -#endif - -#ifndef __TBB_FetchAndIncrementWacquire -#define __TBB_FetchAndIncrementWacquire(P) __TBB_FetchAndAddW(P,1) -#endif - -#ifndef __TBB_FetchAndDecrementWrelease -#define __TBB_FetchAndDecrementWrelease(P) __TBB_FetchAndAddW(P,(-1)) -#endif - -template -struct __TBB_machine_load_store { - static inline T load_with_acquire(const volatile T& location) { - T to_return = location; - __TBB_release_consistency_helper(); - return to_return; - } - - static inline void store_with_release(volatile T &location, T value) { - __TBB_release_consistency_helper(); - location = value; - } -}; - -#if __TBB_WORDSIZE==4 -#if _MSC_VER -using tbb::internal::int64_t; -#endif -// On 32-bit platforms, there should be definition of __TBB_Store8 and __TBB_Load8 -#ifndef __TBB_Store8 -inline void __TBB_Store8 (volatile void *ptr, int64_t value) { - for(;;) { - int64_t result = *(int64_t *)ptr; - if( __TBB_CompareAndSwap8(ptr,value,result)==result ) break; - } -} -#endif - -#ifndef __TBB_Load8 -inline int64_t __TBB_Load8 (const volatile void *ptr) { - const int64_t anyvalue = 3264; // Could be anything, just the same for comparand and new value - return __TBB_CompareAndSwap8(const_cast(ptr),anyvalue,anyvalue); -} -#endif - -template -struct __TBB_machine_load_store { - static inline T load_with_acquire(const volatile T& location) { - T to_return = (T)__TBB_Load8((const volatile void*)&location); - __TBB_release_consistency_helper(); - return to_return; - } - - static inline void store_with_release(volatile T& location, T value) { - __TBB_release_consistency_helper(); - __TBB_Store8((volatile void *)&location,(int64_t)value); - } -}; -#endif /* __TBB_WORDSIZE==4 */ - -#ifndef __TBB_load_with_acquire -template -inline T __TBB_load_with_acquire(const volatile T &location) { - return __TBB_machine_load_store::load_with_acquire(location); -} -#endif - -#ifndef __TBB_store_with_release -template -inline void __TBB_store_with_release(volatile T& location, V value) { - __TBB_machine_load_store::store_with_release(location,T(value)); -} -//! Overload that exists solely to avoid /Wp64 warnings. -inline void __TBB_store_with_release(volatile size_t& location, size_t value) { - __TBB_machine_load_store::store_with_release(location,value); -} -#endif - -#ifndef __TBB_Log2 -inline intptr_t __TBB_Log2( uintptr_t x ) { - if( x==0 ) return -1; - intptr_t result = 0; - uintptr_t tmp; -#if __TBB_WORDSIZE>=8 - if( (tmp = x>>32) ) { x=tmp; result += 32; } -#endif - if( (tmp = x>>16) ) { x=tmp; result += 16; } - if( (tmp = x>>8) ) { x=tmp; result += 8; } - if( (tmp = x>>4) ) { x=tmp; result += 4; } - if( (tmp = x>>2) ) { x=tmp; result += 2; } - return (x&2)? result+1: result; -} -#endif - -#ifndef __TBB_AtomicOR -inline void __TBB_AtomicOR( volatile void *operand, uintptr_t addend ) { - tbb::internal::atomic_backoff b; - for(;;) { - uintptr_t tmp = *(volatile uintptr_t *)operand; - uintptr_t result = __TBB_CompareAndSwapW(operand, tmp|addend, tmp); - if( result==tmp ) break; - b.pause(); - } -} -#endif - -#ifndef __TBB_AtomicAND -inline void __TBB_AtomicAND( volatile void *operand, uintptr_t addend ) { - tbb::internal::atomic_backoff b; - for(;;) { - uintptr_t tmp = *(volatile uintptr_t *)operand; - uintptr_t result = __TBB_CompareAndSwapW(operand, tmp&addend, tmp); - if( result==tmp ) break; - b.pause(); - } -} -#endif - -#ifndef __TBB_TryLockByte -inline bool __TBB_TryLockByte( unsigned char &flag ) { - return __TBB_CompareAndSwap1(&flag,1,0)==0; -} -#endif - -#ifndef __TBB_LockByte -inline uintptr_t __TBB_LockByte( unsigned char& flag ) { - if ( !__TBB_TryLockByte(flag) ) { - tbb::internal::atomic_backoff b; - do { - b.pause(); - } while ( !__TBB_TryLockByte(flag) ); - } - return 0; -} -#endif - -#ifndef __TBB_ReverseByte -inline unsigned char __TBB_ReverseByte(unsigned char src) { - return tbb::internal::reverse::byte_table[src]; -} -#endif - -template -T __TBB_ReverseBits(T src) -{ - T dst; - unsigned char *original = (unsigned char *) &src; - unsigned char *reversed = (unsigned char *) &dst; - - for( int i = sizeof(T)-1; i >= 0; i-- ) - reversed[i] = __TBB_ReverseByte( original[sizeof(T)-i-1] ); - - return dst; -} - -#endif /* __TBB_machine_H */ diff --git a/deal.II/bundled/tbb30_104oss/include/tbb/tbb_profiling.h b/deal.II/bundled/tbb30_104oss/include/tbb/tbb_profiling.h deleted file mode 100644 index c3bbb51259..0000000000 --- a/deal.II/bundled/tbb30_104oss/include/tbb/tbb_profiling.h +++ /dev/null @@ -1,105 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_profiling_H -#define __TBB_profiling_H - -// Check if the tools support is enabled -#if (_WIN32||_WIN64||__linux__) && !__MINGW32__ && TBB_USE_THREADING_TOOLS - -#if _WIN32||_WIN64 -#include /* mbstowcs_s */ -#endif -#include "tbb_stddef.h" - -namespace tbb { - namespace internal { -#if _WIN32||_WIN64 - void __TBB_EXPORTED_FUNC itt_set_sync_name_v3( void *obj, const wchar_t* name ); - inline size_t multibyte_to_widechar( wchar_t* wcs, const char* mbs, size_t bufsize) { -#if _MSC_VER>=1400 - size_t len; - mbstowcs_s( &len, wcs, bufsize, mbs, _TRUNCATE ); - return len; // mbstowcs_s counts null terminator -#else - size_t len = mbstowcs( wcs, mbs, bufsize ); - if(wcs && len!=size_t(-1) ) - wcs[lenModules (groups of functionality) implemented by the library - * - Classes provided by the library - * - Files constituting the library. - * . - * Please note that significant part of TBB functionality is implemented in the form of - * template functions, descriptions of which are not accessible on the Classes - * tab. Use Modules or Namespace/Namespace Members - * tabs to find them. - * - * Additional pieces of information can be found here - * - \subpage concepts - * . - */ - -/** \page concepts TBB concepts - - A concept is a set of requirements to a type, which are necessary and sufficient - for the type to model a particular behavior or a set of behaviors. Some concepts - are specific to a particular algorithm (e.g. algorithm body), while other ones - are common to several algorithms (e.g. range concept). - - All TBB algorithms make use of different classes implementing various concepts. - Implementation classes are supplied by the user as type arguments of template - parameters and/or as objects passed as function call arguments. The library - provides predefined implementations of some concepts (e.g. several kinds of - \ref range_req "ranges"), while other ones must always be implemented by the user. - - TBB defines a set of minimal requirements each concept must conform to. Here is - the list of different concepts hyperlinked to the corresponding requirements specifications: - - \subpage range_req - - \subpage parallel_do_body_req - - \subpage parallel_for_body_req - - \subpage parallel_reduce_body_req - - \subpage parallel_scan_body_req - - \subpage parallel_sort_iter_req -**/ - -// Define preprocessor symbols used to determine architecture -#if _WIN32||_WIN64 -# if defined(_M_AMD64) -# define __TBB_x86_64 1 -# elif defined(_M_IA64) -# define __TBB_ipf 1 -# elif defined(_M_IX86)||defined(__i386__) // the latter for MinGW support -# define __TBB_x86_32 1 -# endif -#else /* Assume generic Unix */ -# if !__linux__ && !__APPLE__ -# define __TBB_generic_os 1 -# endif -# if __x86_64__ -# define __TBB_x86_64 1 -# elif __ia64__ -# define __TBB_ipf 1 -# elif __i386__||__i386 // __i386 is for Sun OS -# define __TBB_x86_32 1 -# else -# define __TBB_generic_arch 1 -# endif -#endif - -#if _MSC_VER -// define the parts of stdint.h that are needed, but put them inside tbb::internal -namespace tbb { -namespace internal { - typedef __int8 int8_t; - typedef __int16 int16_t; - typedef __int32 int32_t; - typedef __int64 int64_t; - typedef unsigned __int8 uint8_t; - typedef unsigned __int16 uint16_t; - typedef unsigned __int32 uint32_t; - typedef unsigned __int64 uint64_t; -} // namespace internal -} // namespace tbb -#else -#include -#endif /* _MSC_VER */ - -#if _MSC_VER >=1400 -#define __TBB_EXPORTED_FUNC __cdecl -#define __TBB_EXPORTED_METHOD __thiscall -#else -#define __TBB_EXPORTED_FUNC -#define __TBB_EXPORTED_METHOD -#endif - -#include /* Need size_t and ptrdiff_t */ - -#if _MSC_VER -#define __TBB_tbb_windef_H -#include "_tbb_windef.h" -#undef __TBB_tbb_windef_H -#endif - -#include "tbb_config.h" - -//! The namespace tbb contains all components of the library. -namespace tbb { - -using std::size_t; using std::ptrdiff_t; - - //! Type for an assertion handler - typedef void(*assertion_handler_type)( const char* filename, int line, const char* expression, const char * comment ); - -#if TBB_USE_ASSERT - -//! Assert that x is true. -/** If x is false, print assertion failure message. - If the comment argument is not NULL, it is printed as part of the failure message. - The comment argument has no other effect. */ -#define __TBB_ASSERT(predicate,message) ((predicate)?((void)0):tbb::assertion_failure(__FILE__,__LINE__,#predicate,message)) -#define __TBB_ASSERT_EX __TBB_ASSERT - - //! Set assertion handler and return previous value of it. - assertion_handler_type __TBB_EXPORTED_FUNC set_assertion_handler( assertion_handler_type new_handler ); - - //! Process an assertion failure. - /** Normally called from __TBB_ASSERT macro. - If assertion handler is null, print message for assertion failure and abort. - Otherwise call the assertion handler. */ - void __TBB_EXPORTED_FUNC assertion_failure( const char* filename, int line, const char* expression, const char* comment ); - -#else - -//! No-op version of __TBB_ASSERT. -#define __TBB_ASSERT(predicate,comment) ((void)0) -//! "Extended" version is useful to suppress warnings if a variable is only used with an assert -#define __TBB_ASSERT_EX(predicate,comment) ((void)(1 && (predicate))) - -#endif /* TBB_USE_ASSERT */ - -//! The function returns the interface version of the TBB shared library being used. -/** - * The version it returns is determined at runtime, not at compile/link time. - * So it can be different than the value of TBB_INTERFACE_VERSION obtained at compile time. - */ -extern "C" int __TBB_EXPORTED_FUNC TBB_runtime_interface_version(); - -//! Dummy type that distinguishes splitting constructor from copy constructor. -/** - * See description of parallel_for and parallel_reduce for example usages. - * @ingroup algorithms - */ -class split { -}; - -/** - * @cond INTERNAL - * @brief Identifiers declared inside namespace internal should never be used directly by client code. - */ -namespace internal { - -//! Compile-time constant that is upper bound on cache line/sector size. -/** It should be used only in situations where having a compile-time upper - bound is more useful than a run-time exact answer. - @ingroup memory_allocation */ -const size_t NFS_MaxLineSize = 128; - -template -struct padded_base : T { - char pad[NFS_MaxLineSize - sizeof(T) % NFS_MaxLineSize]; -}; -template struct padded_base : T {}; - -//! Pads type T to fill out to a multiple of cache line size. -template -struct padded : padded_base {}; - -//! Extended variant of the standard offsetof macro -/** The standard offsetof macro is not sufficient for TBB as it can be used for - POD-types only. The constant 0x1000 (not NULL) is necessary to appease GCC. **/ -#define __TBB_offsetof(class_name, member_name) \ - ((ptrdiff_t)&(reinterpret_cast(0x1000)->member_name) - 0x1000) - -//! Returns address of the object containing a member with the given name and address -#define __TBB_get_object_ref(class_name, member_name, member_addr) \ - (*reinterpret_cast((char*)member_addr - __TBB_offsetof(class_name, member_name))) - -//! Throws std::runtime_error with what() returning error_code description prefixed with aux_info -void __TBB_EXPORTED_FUNC handle_perror( int error_code, const char* aux_info ); - -#if TBB_USE_EXCEPTIONS - #define __TBB_TRY try - #define __TBB_CATCH(e) catch(e) - #define __TBB_THROW(e) throw e - #define __TBB_RETHROW() throw -#else /* !TBB_USE_EXCEPTIONS */ - inline bool __TBB_false() { return false; } - #define __TBB_TRY - #define __TBB_CATCH(e) if ( tbb::internal::__TBB_false() ) - #define __TBB_THROW(e) ((void)0) - #define __TBB_RETHROW() ((void)0) -#endif /* !TBB_USE_EXCEPTIONS */ - -//! Report a runtime warning. -void __TBB_EXPORTED_FUNC runtime_warning( const char* format, ... ); - -#if TBB_USE_ASSERT -static void* const poisoned_ptr = reinterpret_cast(-1); - -//! Set p to invalid pointer value. -template -inline void poison_pointer( T*& p ) { p = reinterpret_cast(poisoned_ptr); } - -/** Expected to be used in assertions only, thus no empty form is defined. **/ -template -inline bool is_poisoned( T* p ) { return p == reinterpret_cast(poisoned_ptr); } -#else -template -inline void poison_pointer( T* ) {/*do nothing*/} -#endif /* !TBB_USE_ASSERT */ - -//! Cast pointer from U* to T. -/** This method should be used sparingly as a last resort for dealing with - situations that inherently break strict ISO C++ aliasing rules. */ -template -inline T punned_cast( U* ptr ) { - uintptr_t x = reinterpret_cast(ptr); - return reinterpret_cast(x); -} - -//! Base class for types that should not be assigned. -class no_assign { - // Deny assignment - void operator=( const no_assign& ); -public: -#if __GNUC__ - //! Explicitly define default construction, because otherwise gcc issues gratuitous warning. - no_assign() {} -#endif /* __GNUC__ */ -}; - -//! Base class for types that should not be copied or assigned. -class no_copy: no_assign { - //! Deny copy construction - no_copy( const no_copy& ); -public: - //! Allow default construction - no_copy() {} -}; - -//! Class for determining type of std::allocator::value_type. -template -struct allocator_type { - typedef T value_type; -}; - -#if _MSC_VER -//! Microsoft std::allocator has non-standard extension that strips const from a type. -template -struct allocator_type { - typedef T value_type; -}; -#endif - -// Struct to be used as a version tag for inline functions. -/** Version tag can be necessary to prevent loader on Linux from using the wrong - symbol in debug builds (when inline functions are compiled as out-of-line). **/ -struct version_tag_v3 {}; - -typedef version_tag_v3 version_tag; - -} // internal -//! @endcond - -} // tbb - -#endif /* RC_INVOKED */ -#endif /* __TBB_tbb_stddef_H */ diff --git a/deal.II/bundled/tbb30_104oss/include/tbb/tbb_thread.h b/deal.II/bundled/tbb30_104oss/include/tbb/tbb_thread.h deleted file mode 100644 index 14ee61e9d5..0000000000 --- a/deal.II/bundled/tbb30_104oss/include/tbb/tbb_thread.h +++ /dev/null @@ -1,293 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_tbb_thread_H -#define __TBB_tbb_thread_H - -#if _WIN32||_WIN64 -#include "machine/windows_api.h" -#define __TBB_NATIVE_THREAD_ROUTINE unsigned WINAPI -#define __TBB_NATIVE_THREAD_ROUTINE_PTR(r) unsigned (WINAPI* r)( void* ) -#else -#define __TBB_NATIVE_THREAD_ROUTINE void* -#define __TBB_NATIVE_THREAD_ROUTINE_PTR(r) void* (*r)( void* ) -#include -#endif // _WIN32||_WIN64 - -#include "tbb_stddef.h" -#include "tick_count.h" - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (push) - #pragma warning (disable: 4530) -#endif - -#include - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - #pragma warning (pop) -#endif - -namespace tbb { - -//! @cond INTERNAL -namespace internal { - - class tbb_thread_v3; - -} // namespace internal - -void swap( internal::tbb_thread_v3& t1, internal::tbb_thread_v3& t2 ); - -namespace internal { - - //! Allocate a closure - void* __TBB_EXPORTED_FUNC allocate_closure_v3( size_t size ); - //! Free a closure allocated by allocate_closure_v3 - void __TBB_EXPORTED_FUNC free_closure_v3( void* ); - - struct thread_closure_base { - void* operator new( size_t size ) {return allocate_closure_v3(size);} - void operator delete( void* ptr ) {free_closure_v3(ptr);} - }; - - template struct thread_closure_0: thread_closure_base { - F function; - - static __TBB_NATIVE_THREAD_ROUTINE start_routine( void* c ) { - thread_closure_0 *self = static_cast(c); - self->function(); - delete self; - return 0; - } - thread_closure_0( const F& f ) : function(f) {} - }; - //! Structure used to pass user function with 1 argument to thread. - template struct thread_closure_1: thread_closure_base { - F function; - X arg1; - //! Routine passed to Windows's _beginthreadex by thread::internal_start() inside tbb.dll - static __TBB_NATIVE_THREAD_ROUTINE start_routine( void* c ) { - thread_closure_1 *self = static_cast(c); - self->function(self->arg1); - delete self; - return 0; - } - thread_closure_1( const F& f, const X& x ) : function(f), arg1(x) {} - }; - template struct thread_closure_2: thread_closure_base { - F function; - X arg1; - Y arg2; - //! Routine passed to Windows's _beginthreadex by thread::internal_start() inside tbb.dll - static __TBB_NATIVE_THREAD_ROUTINE start_routine( void* c ) { - thread_closure_2 *self = static_cast(c); - self->function(self->arg1, self->arg2); - delete self; - return 0; - } - thread_closure_2( const F& f, const X& x, const Y& y ) : function(f), arg1(x), arg2(y) {} - }; - - //! Versioned thread class. - class tbb_thread_v3 { - tbb_thread_v3(const tbb_thread_v3&); // = delete; // Deny access - public: -#if _WIN32||_WIN64 - typedef HANDLE native_handle_type; -#else - typedef pthread_t native_handle_type; -#endif // _WIN32||_WIN64 - - class id; - //! Constructs a thread object that does not represent a thread of execution. - tbb_thread_v3() : my_handle(0) -#if _WIN32||_WIN64 - , my_thread_id(0) -#endif // _WIN32||_WIN64 - {} - - //! Constructs an object and executes f() in a new thread - template explicit tbb_thread_v3(F f) { - typedef internal::thread_closure_0 closure_type; - internal_start(closure_type::start_routine, new closure_type(f)); - } - //! Constructs an object and executes f(x) in a new thread - template tbb_thread_v3(F f, X x) { - typedef internal::thread_closure_1 closure_type; - internal_start(closure_type::start_routine, new closure_type(f,x)); - } - //! Constructs an object and executes f(x,y) in a new thread - template tbb_thread_v3(F f, X x, Y y) { - typedef internal::thread_closure_2 closure_type; - internal_start(closure_type::start_routine, new closure_type(f,x,y)); - } - - tbb_thread_v3& operator=(tbb_thread_v3& x) { - if (joinable()) detach(); - my_handle = x.my_handle; - x.my_handle = 0; -#if _WIN32||_WIN64 - my_thread_id = x.my_thread_id; - x.my_thread_id = 0; -#endif // _WIN32||_WIN64 - return *this; - } - void swap( tbb_thread_v3& t ) {tbb::swap( *this, t );} - bool joinable() const {return my_handle!=0; } - //! The completion of the thread represented by *this happens before join() returns. - void __TBB_EXPORTED_METHOD join(); - //! When detach() returns, *this no longer represents the possibly continuing thread of execution. - void __TBB_EXPORTED_METHOD detach(); - ~tbb_thread_v3() {if( joinable() ) detach();} - inline id get_id() const; - native_handle_type native_handle() { return my_handle; } - - //! The number of hardware thread contexts. - static unsigned __TBB_EXPORTED_FUNC hardware_concurrency(); - private: - native_handle_type my_handle; -#if _WIN32||_WIN64 - DWORD my_thread_id; -#endif // _WIN32||_WIN64 - - /** Runs start_routine(closure) on another thread and sets my_handle to the handle of the created thread. */ - void __TBB_EXPORTED_METHOD internal_start( __TBB_NATIVE_THREAD_ROUTINE_PTR(start_routine), - void* closure ); - friend void __TBB_EXPORTED_FUNC move_v3( tbb_thread_v3& t1, tbb_thread_v3& t2 ); - friend void tbb::swap( tbb_thread_v3& t1, tbb_thread_v3& t2 ); - }; - - class tbb_thread_v3::id { -#if _WIN32||_WIN64 - DWORD my_id; - id( DWORD id_ ) : my_id(id_) {} -#else - pthread_t my_id; - id( pthread_t id_ ) : my_id(id_) {} -#endif // _WIN32||_WIN64 - friend class tbb_thread_v3; - public: - id() : my_id(0) {} - - friend bool operator==( tbb_thread_v3::id x, tbb_thread_v3::id y ); - friend bool operator!=( tbb_thread_v3::id x, tbb_thread_v3::id y ); - friend bool operator<( tbb_thread_v3::id x, tbb_thread_v3::id y ); - friend bool operator<=( tbb_thread_v3::id x, tbb_thread_v3::id y ); - friend bool operator>( tbb_thread_v3::id x, tbb_thread_v3::id y ); - friend bool operator>=( tbb_thread_v3::id x, tbb_thread_v3::id y ); - - template - friend std::basic_ostream& - operator<< (std::basic_ostream &out, - tbb_thread_v3::id id) - { - out << id.my_id; - return out; - } - friend tbb_thread_v3::id __TBB_EXPORTED_FUNC thread_get_id_v3(); - }; // tbb_thread_v3::id - - tbb_thread_v3::id tbb_thread_v3::get_id() const { -#if _WIN32||_WIN64 - return id(my_thread_id); -#else - return id(my_handle); -#endif // _WIN32||_WIN64 - } - void __TBB_EXPORTED_FUNC move_v3( tbb_thread_v3& t1, tbb_thread_v3& t2 ); - tbb_thread_v3::id __TBB_EXPORTED_FUNC thread_get_id_v3(); - void __TBB_EXPORTED_FUNC thread_yield_v3(); - void __TBB_EXPORTED_FUNC thread_sleep_v3(const tick_count::interval_t &i); - - inline bool operator==(tbb_thread_v3::id x, tbb_thread_v3::id y) - { - return x.my_id == y.my_id; - } - inline bool operator!=(tbb_thread_v3::id x, tbb_thread_v3::id y) - { - return x.my_id != y.my_id; - } - inline bool operator<(tbb_thread_v3::id x, tbb_thread_v3::id y) - { - return x.my_id < y.my_id; - } - inline bool operator<=(tbb_thread_v3::id x, tbb_thread_v3::id y) - { - return x.my_id <= y.my_id; - } - inline bool operator>(tbb_thread_v3::id x, tbb_thread_v3::id y) - { - return x.my_id > y.my_id; - } - inline bool operator>=(tbb_thread_v3::id x, tbb_thread_v3::id y) - { - return x.my_id >= y.my_id; - } - -} // namespace internal; - -//! Users reference thread class by name tbb_thread -typedef internal::tbb_thread_v3 tbb_thread; - -using internal::operator==; -using internal::operator!=; -using internal::operator<; -using internal::operator>; -using internal::operator<=; -using internal::operator>=; - -inline void move( tbb_thread& t1, tbb_thread& t2 ) { - internal::move_v3(t1, t2); -} - -inline void swap( internal::tbb_thread_v3& t1, internal::tbb_thread_v3& t2 ) { - tbb::tbb_thread::native_handle_type h = t1.my_handle; - t1.my_handle = t2.my_handle; - t2.my_handle = h; -#if _WIN32||_WIN64 - DWORD i = t1.my_thread_id; - t1.my_thread_id = t2.my_thread_id; - t2.my_thread_id = i; -#endif /* _WIN32||_WIN64 */ -} - -namespace this_tbb_thread { - inline tbb_thread::id get_id() { return internal::thread_get_id_v3(); } - //! Offers the operating system the opportunity to schedule another thread. - inline void yield() { internal::thread_yield_v3(); } - //! The current thread blocks at least until the time specified. - inline void sleep(const tick_count::interval_t &i) { - internal::thread_sleep_v3(i); - } -} // namespace this_tbb_thread - -} // namespace tbb - -#endif /* __TBB_tbb_thread_H */ diff --git a/deal.II/bundled/tbb30_104oss/include/tbb/tbbmalloc_proxy.h b/deal.II/bundled/tbb30_104oss/include/tbb/tbbmalloc_proxy.h deleted file mode 100644 index f15ca12e18..0000000000 --- a/deal.II/bundled/tbb30_104oss/include/tbb/tbbmalloc_proxy.h +++ /dev/null @@ -1,74 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -/* -Replacing the standard memory allocation routines in Microsoft* C/C++ RTL -(malloc/free, global new/delete, etc.) with the TBB memory allocator. - -Include the following header to a source of any binary which is loaded during -application startup - -#include "tbb/tbbmalloc_proxy.h" - -or add following parameters to the linker options for the binary which is -loaded during application startup. It can be either exe-file or dll. - -For win32 -tbbmalloc_proxy.lib /INCLUDE:"___TBB_malloc_proxy" -win64 -tbbmalloc_proxy.lib /INCLUDE:"__TBB_malloc_proxy" -*/ - -#ifndef __TBB_tbbmalloc_proxy_H -#define __TBB_tbbmalloc_proxy_H - -#if _MSC_VER - -#ifdef _DEBUG - #pragma comment(lib, "tbbmalloc_proxy_debug.lib") -#else - #pragma comment(lib, "tbbmalloc_proxy.lib") -#endif - -#if defined(_WIN64) - #pragma comment(linker, "/include:__TBB_malloc_proxy") -#else - #pragma comment(linker, "/include:___TBB_malloc_proxy") -#endif - -#else -/* Primarily to support MinGW */ - -extern "C" void __TBB_malloc_proxy(); -struct __TBB_malloc_proxy_caller { - __TBB_malloc_proxy_caller() { __TBB_malloc_proxy(); } -} volatile __TBB_malloc_proxy_helper_object; - -#endif // _MSC_VER - -#endif //__TBB_tbbmalloc_proxy_H diff --git a/deal.II/bundled/tbb30_104oss/include/tbb/tick_count.h b/deal.II/bundled/tbb30_104oss/include/tbb/tick_count.h deleted file mode 100644 index de6b3c25d2..0000000000 --- a/deal.II/bundled/tbb30_104oss/include/tbb/tick_count.h +++ /dev/null @@ -1,155 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_tick_count_H -#define __TBB_tick_count_H - -#include "tbb_stddef.h" - -#if _WIN32||_WIN64 -#include "machine/windows_api.h" -#elif __linux__ -#include -#else /* generic Unix */ -#include -#endif /* (choice of OS) */ - -namespace tbb { - -//! Absolute timestamp -/** @ingroup timing */ -class tick_count { -public: - //! Relative time interval. - class interval_t { - long long value; - explicit interval_t( long long value_ ) : value(value_) {} - public: - //! Construct a time interval representing zero time duration - interval_t() : value(0) {}; - - //! Construct a time interval representing sec seconds time duration - explicit interval_t( double sec ); - - //! Return the length of a time interval in seconds - double seconds() const; - - friend class tbb::tick_count; - - //! Extract the intervals from the tick_counts and subtract them. - friend interval_t operator-( const tick_count& t1, const tick_count& t0 ); - - //! Add two intervals. - friend interval_t operator+( const interval_t& i, const interval_t& j ) { - return interval_t(i.value+j.value); - } - - //! Subtract two intervals. - friend interval_t operator-( const interval_t& i, const interval_t& j ) { - return interval_t(i.value-j.value); - } - - //! Accumulation operator - interval_t& operator+=( const interval_t& i ) {value += i.value; return *this;} - - //! Subtraction operator - interval_t& operator-=( const interval_t& i ) {value -= i.value; return *this;} - }; - - //! Construct an absolute timestamp initialized to zero. - tick_count() : my_count(0) {}; - - //! Return current time. - static tick_count now(); - - //! Subtract two timestamps to get the time interval between - friend interval_t operator-( const tick_count& t1, const tick_count& t0 ); - -private: - long long my_count; -}; - -inline tick_count tick_count::now() { - tick_count result; -#if _WIN32||_WIN64 - LARGE_INTEGER qpcnt; - QueryPerformanceCounter(&qpcnt); - result.my_count = qpcnt.QuadPart; -#elif __linux__ - struct timespec ts; -#if TBB_USE_ASSERT - int status = -#endif /* TBB_USE_ASSERT */ - clock_gettime( CLOCK_REALTIME, &ts ); - __TBB_ASSERT( status==0, "CLOCK_REALTIME not supported" ); - result.my_count = static_cast(1000000000UL)*static_cast(ts.tv_sec) + static_cast(ts.tv_nsec); -#else /* generic Unix */ - struct timeval tv; -#if TBB_USE_ASSERT - int status = -#endif /* TBB_USE_ASSERT */ - gettimeofday(&tv, NULL); - __TBB_ASSERT( status==0, "gettimeofday failed" ); - result.my_count = static_cast(1000000)*static_cast(tv.tv_sec) + static_cast(tv.tv_usec); -#endif /*(choice of OS) */ - return result; -} - -inline tick_count::interval_t::interval_t( double sec ) -{ -#if _WIN32||_WIN64 - LARGE_INTEGER qpfreq; - QueryPerformanceFrequency(&qpfreq); - value = static_cast(sec*qpfreq.QuadPart); -#elif __linux__ - value = static_cast(sec*1E9); -#else /* generic Unix */ - value = static_cast(sec*1E6); -#endif /* (choice of OS) */ -} - -inline tick_count::interval_t operator-( const tick_count& t1, const tick_count& t0 ) { - return tick_count::interval_t( t1.my_count-t0.my_count ); -} - -inline double tick_count::interval_t::seconds() const { -#if _WIN32||_WIN64 - LARGE_INTEGER qpfreq; - QueryPerformanceFrequency(&qpfreq); - return value/(double)qpfreq.QuadPart; -#elif __linux__ - return value*1E-9; -#else /* generic Unix */ - return value*1E-6; -#endif /* (choice of OS) */ -} - -} // namespace tbb - -#endif /* __TBB_tick_count_H */ - diff --git a/deal.II/bundled/tbb30_104oss/index.html b/deal.II/bundled/tbb30_104oss/index.html deleted file mode 100644 index a12689107c..0000000000 --- a/deal.II/bundled/tbb30_104oss/index.html +++ /dev/null @@ -1,44 +0,0 @@ - - - -

Overview

-Top level directory for Threading Building Blocks (TBB). -

-To build TBB, use the top-level Makefile; see also the build directions. -To port TBB to a new platform, operating system or architecture, see the porting directions. -

- -

Files

-
-
Makefile -
Top-level Makefile for TBB. See also the build directions. -
- -

Directories

-
-
doc -
Documentation for the library. -
include -
Include files required for compiling code that uses the library. -
examples -
Examples of how to use the library. -
src -
Source code for the library. -
build -
Internal Makefile infrastructure for TBB. Do not use directly; see the build directions. -
ia32, intel64, ia64 -
Platform-specific binary files for the library. -
- -
-

-Copyright © 2005-2010 Intel Corporation. All Rights Reserved. -

-Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are -registered trademarks or trademarks of Intel Corporation or its -subsidiaries in the United States and other countries. -

-* Other names and brands may be claimed as the property of others. - - - diff --git a/deal.II/bundled/tbb30_104oss/src/CMakeLists.txt b/deal.II/bundled/tbb30_104oss/src/CMakeLists.txt deleted file mode 100644 index cb9c5027c1..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/CMakeLists.txt +++ /dev/null @@ -1,78 +0,0 @@ -##### -## -## Copyright (C) 2012 by the deal.II authors -## -## This file is part of the deal.II library. -## -## -## This file is dual licensed under QPL 1.0 and LGPL 2.1 or any later -## version of the LGPL license. -## -## Author: Matthias Maier -## -##### - -SET(CMAKE_INCLUDE_CURRENT_DIR TRUE) -INCLUDE_DIRECTORIES( - ${CMAKE_CURRENT_SOURCE_DIR}/rml/include - ) - -IF(NOT EXISTS "${CMAKE_CURRENT_BINARY_DIR}/version_string.tmp") - FILE(WRITE "${CMAKE_CURRENT_BINARY_DIR}/version_string.tmp" - "#define __TBB_VERSION_STRINGS \"Empty\"\n" - ) -ENDIF() - -SET(src_tbb - rml/client/omp_dynamic_link.cpp - rml/client/rml_omp.cpp - rml/client/rml_tbb.cpp - rml/server/rml_server.cpp - tbb/arena.cpp - tbb/cache_aligned_allocator.cpp - tbb/concurrent_hash_map.cpp - tbb/concurrent_monitor.cpp - tbb/concurrent_queue.cpp - tbb/concurrent_vector.cpp - tbb/condition_variable.cpp - tbb/critical_section.cpp - tbb/dynamic_link.cpp - tbb/governor.cpp - tbb/itt_notify.cpp - tbb/market.cpp - tbb/mutex.cpp - tbb/observer_proxy.cpp - tbb/pipeline.cpp - tbb/private_server.cpp - tbb/queuing_mutex.cpp - tbb/queuing_rw_mutex.cpp - tbb/reader_writer_lock.cpp - tbb/recursive_mutex.cpp - tbb/scheduler.cpp - tbb/spin_mutex.cpp - tbb/spin_rw_mutex.cpp - tbb/task.cpp - tbb/task_group_context.cpp - tbb/tbb_main.cpp - tbb/tbb_misc.cpp - tbb/tbb_statistics.cpp - tbb/tbb_thread.cpp - ) -DEAL_II_ADD_LIBRARY(obj_tbb OBJECT ${src_tbb}) - -# -# Add necessary definitions: -# - -IF(CMAKE_SYSTEM_NAME MATCHES "Windows") - DEAL_II_ADD_DEFINITIONS(obj_tbb "USE_WINTHREAD") - -ELSE() - - DEAL_II_ADD_DEFINITIONS(obj_tbb "USE_PTHREAD") - - IF(NOT CMAKE_SYSTEM_NAME MATCHES "Darwin") - DEAL_II_ADD_DEFINITIONS(obj_tbb "DO_ITT_NOTIFY") - ENDIF() -ENDIF() - diff --git a/deal.II/bundled/tbb30_104oss/src/index.html b/deal.II/bundled/tbb30_104oss/src/index.html deleted file mode 100644 index 8caaeac52d..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/index.html +++ /dev/null @@ -1,77 +0,0 @@ - - - -

Overview

-This directory contains the source code and unit tests for Threading Building Blocks. - -

Directories

-
-
tbb -
Source code of the TBB library core. -
tbbmalloc -
Source code of the TBB scalable memory allocator. -
test -
Source code of the TBB unit tests. -
rml -
Source code of the Resource Management Layer (RML). -
perf -
Source code of microbenchmarks. -
old -
Source code of deprecated TBB entities that are still shipped as part of the TBB library for the sake of backward compatibility. -
- -

Files

-
-
Makefile -
Advanced Makefile for developing and debugging of TBB. See the basic build directions. Additional targets and options: -
-
make test_{name} time_{name} -
Make and run individual test or benchmark.
-
make stress_{name} -
Equivalent to 'make test_{name}' but runs until a failure detected or terminated by user.
-
make run_cmd="{command}" [(above options or targets)] -
Command prefix for tests execution. Also, "run_cmd=-" will ignore test execution failures. See also -k and -i options of the GNU make for more options to keep building and testing despite of failures.
-
make debug_{name} -
Equivalent to 'make test_{name}' but compiles in debug mode and runs under debugger ("run_cmd=$(debugger)").
-
make args="{command-line arguments}" [(above options or targets)] -
Additional arguments for the run.
-
make repeat="{N}" [(above options or targets)] -
Repeats execution N times.
-
make clean_{filename} -
Removes executable, object, and other intermediate files with specified filename ('*' also works).
-
make cfg={debug|release} [(above options or targets)] -
Specifies a build mode or corresponding directory to work in.
-
make tbb_strict=1 [(above options or targets)] -
Enables warnings as errors.
-
make examples/{target} -
Invokes examples/Makefile with specified target.
-
make clean_release clean_debug clean_examples -
Removes release or debug build directories, or cleans all examples.
-
make test_no_depends -
Equivalent to 'make test' but does not check for libraries updates.
-
make info -
Output information about build configuration and directories.
-
make cpp0x=1 [(above options or targets)] -
Enables C++0x extensions like lambdas for compilers that implement them as experimental features.
-
make CXXFLAGS={Flags} [(above options or targets)] -
Specifies additional options for compiler.
-
make target={name} [(above options or targets)] -
Includes additional build/{name}.inc file after OS-specific one.
-
make extra_inc={filename} [(above options or targets)] -
Includes additional makefile.
- -
- - -
-Up to parent directory -

-Copyright © 2005-2010 Intel Corporation. All Rights Reserved. -

-Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are -registered trademarks or trademarks of Intel Corporation or its -subsidiaries in the United States and other countries. -

-* Other names and brands may be claimed as the property of others. - - diff --git a/deal.II/bundled/tbb30_104oss/src/old/concurrent_queue_v2.cpp b/deal.II/bundled/tbb30_104oss/src/old/concurrent_queue_v2.cpp deleted file mode 100644 index 0359a95b33..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/old/concurrent_queue_v2.cpp +++ /dev/null @@ -1,382 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#include "concurrent_queue_v2.h" -#include "tbb/cache_aligned_allocator.h" -#include "tbb/spin_mutex.h" -#include "tbb/atomic.h" -#include -#include - -#if defined(_MSC_VER) && defined(_Wp64) - // Workaround for overzealous compiler warnings in /Wp64 mode - #pragma warning (disable: 4267) -#endif - -#define RECORD_EVENTS 0 - -using namespace std; - -namespace tbb { - -namespace internal { - -class concurrent_queue_rep; - -//! A queue using simple locking. -/** For efficient, this class has no constructor. - The caller is expected to zero-initialize it. */ -struct micro_queue { - typedef concurrent_queue_base::page page; - typedef size_t ticket; - - atomic head_page; - atomic head_counter; - - atomic tail_page; - atomic tail_counter; - - spin_mutex page_mutex; - - class push_finalizer: no_copy { - ticket my_ticket; - micro_queue& my_queue; - public: - push_finalizer( micro_queue& queue, ticket k ) : - my_ticket(k), my_queue(queue) - {} - ~push_finalizer() { - my_queue.tail_counter = my_ticket; - } - }; - - void push( const void* item, ticket k, concurrent_queue_base& base ); - - class pop_finalizer: no_copy { - ticket my_ticket; - micro_queue& my_queue; - page* my_page; - public: - pop_finalizer( micro_queue& queue, ticket k, page* p ) : - my_ticket(k), my_queue(queue), my_page(p) - {} - ~pop_finalizer() { - page* p = my_page; - if( p ) { - spin_mutex::scoped_lock lock( my_queue.page_mutex ); - page* q = p->next; - my_queue.head_page = q; - if( !q ) { - my_queue.tail_page = NULL; - } - } - my_queue.head_counter = my_ticket; - if( p ) - operator delete(p); - } - }; - - bool pop( void* dst, ticket k, concurrent_queue_base& base ); -}; - -//! Internal representation of a ConcurrentQueue. -/** For efficient, this class has no constructor. - The caller is expected to zero-initialize it. */ -class concurrent_queue_rep { -public: - typedef size_t ticket; - -private: - friend struct micro_queue; - - //! Approximately n_queue/golden ratio - static const size_t phi = 3; - -public: - //! Must be power of 2 - static const size_t n_queue = 8; - - //! Map ticket to an array index - static size_t index( ticket k ) { - return k*phi%n_queue; - } - - atomic head_counter; - char pad1[NFS_MaxLineSize-sizeof(atomic)]; - - atomic tail_counter; - char pad2[NFS_MaxLineSize-sizeof(atomic)]; - micro_queue array[n_queue]; - - micro_queue& choose( ticket k ) { - // The formula here approximates LRU in a cache-oblivious way. - return array[index(k)]; - } - - //! Value for effective_capacity that denotes unbounded queue. - static const ptrdiff_t infinite_capacity = ptrdiff_t(~size_t(0)/2); -}; - -#if _MSC_VER && !defined(__INTEL_COMPILER) - // unary minus operator applied to unsigned type, result still unsigned - #pragma warning( push ) - #pragma warning( disable: 4146 ) -#endif - -//------------------------------------------------------------------------ -// micro_queue -//------------------------------------------------------------------------ -void micro_queue::push( const void* item, ticket k, concurrent_queue_base& base ) { - k &= -concurrent_queue_rep::n_queue; - page* p = NULL; - size_t index = (k/concurrent_queue_rep::n_queue & base.items_per_page-1); - if( !index ) { - size_t n = sizeof(page) + base.items_per_page*base.item_size; - p = static_cast(operator new( n )); - p->mask = 0; - p->next = NULL; - } - { - push_finalizer finalizer( *this, k+concurrent_queue_rep::n_queue ); - spin_wait_until_eq( tail_counter, k ); - if( p ) { - spin_mutex::scoped_lock lock( page_mutex ); - if( page* q = tail_page ) - q->next = p; - else - head_page = p; - tail_page = p; - } else { - p = tail_page; - } - base.copy_item( *p, index, item ); - // If no exception was thrown, mark item as present. - p->mask |= uintptr_t(1)<1 ? item_size : 2); - my_rep = cache_aligned_allocator().allocate(1); - __TBB_ASSERT( (size_t)my_rep % NFS_GetLineSize()==0, "alignment error" ); - __TBB_ASSERT( (size_t)&my_rep->head_counter % NFS_GetLineSize()==0, "alignment error" ); - __TBB_ASSERT( (size_t)&my_rep->tail_counter % NFS_GetLineSize()==0, "alignment error" ); - __TBB_ASSERT( (size_t)&my_rep->array % NFS_GetLineSize()==0, "alignment error" ); - memset(my_rep,0,sizeof(concurrent_queue_rep)); - this->item_size = item_size; -} - -concurrent_queue_base::~concurrent_queue_base() { - size_t nq = my_rep->n_queue; - for( size_t i=0; iarray[i].tail_page; - __TBB_ASSERT( my_rep->array[i].head_page==tp, "at most one page should remain" ); - if( tp!=NULL ) - delete tp; - } - cache_aligned_allocator().deallocate(my_rep,1); -} - -void concurrent_queue_base::internal_push( const void* src ) { - concurrent_queue_rep& r = *my_rep; - concurrent_queue_rep::ticket k = r.tail_counter++; - ptrdiff_t e = my_capacity; - if( e(my_capacity); - } - } - r.choose(k).push(src,k,*this); -} - -void concurrent_queue_base::internal_pop( void* dst ) { - concurrent_queue_rep& r = *my_rep; - concurrent_queue_rep::ticket k; - do { - k = r.head_counter++; - } while( !r.choose(k).pop(dst,k,*this) ); -} - -bool concurrent_queue_base::internal_pop_if_present( void* dst ) { - concurrent_queue_rep& r = *my_rep; - concurrent_queue_rep::ticket k; - do { - atomic_backoff backoff; - for(;;) { - k = r.head_counter; - if( r.tail_counter<=k ) { - // Queue is empty - return false; - } - // Queue had item with ticket k when we looked. Attempt to get that item. - if( r.head_counter.compare_and_swap(k+1,k)==k ) { - break; - } - // Another thread snatched the item, so pause and retry. - backoff.pause(); - } - } while( !r.choose(k).pop(dst,k,*this) ); - return true; -} - -bool concurrent_queue_base::internal_push_if_not_full( const void* src ) { - concurrent_queue_rep& r = *my_rep; - atomic_backoff backoff; - concurrent_queue_rep::ticket k; - for(;;) { - k = r.tail_counter; - if( (ptrdiff_t)(k-r.head_counter)>=my_capacity ) { - // Queue is full - return false; - } - // Queue had empty slot with ticket k when we looked. Attempt to claim that slot. - if( r.tail_counter.compare_and_swap(k+1,k)==k ) - break; - // Another thread claimed the slot, so pause and retry. - backoff.pause(); - } - r.choose(k).push(src,k,*this); - return true; -} - -ptrdiff_t concurrent_queue_base::internal_size() const { - __TBB_ASSERT( sizeof(ptrdiff_t)<=sizeof(size_t), NULL ); - return ptrdiff_t(my_rep->tail_counter-my_rep->head_counter); -} - -void concurrent_queue_base::internal_set_capacity( ptrdiff_t capacity, size_t /*item_size*/ ) { - my_capacity = capacity<0 ? concurrent_queue_rep::infinite_capacity : capacity; -} - -//------------------------------------------------------------------------ -// concurrent_queue_iterator_rep -//------------------------------------------------------------------------ -class concurrent_queue_iterator_rep: no_assign { -public: - typedef concurrent_queue_rep::ticket ticket; - ticket head_counter; - const concurrent_queue_base& my_queue; - concurrent_queue_base::page* array[concurrent_queue_rep::n_queue]; - concurrent_queue_iterator_rep( const concurrent_queue_base& queue ) : - head_counter(queue.my_rep->head_counter), - my_queue(queue) - { - const concurrent_queue_rep& rep = *queue.my_rep; - for( size_t k=0; ktail_counter ) - return NULL; - else { - concurrent_queue_base::page* p = array[concurrent_queue_rep::index(k)]; - __TBB_ASSERT(p,NULL); - size_t i = k/concurrent_queue_rep::n_queue & my_queue.items_per_page-1; - return static_cast(static_cast(p+1)) + my_queue.item_size*i; - } - } -}; - -//------------------------------------------------------------------------ -// concurrent_queue_iterator_base -//------------------------------------------------------------------------ -concurrent_queue_iterator_base::concurrent_queue_iterator_base( const concurrent_queue_base& queue ) { - my_rep = new concurrent_queue_iterator_rep(queue); - my_item = my_rep->choose(my_rep->head_counter); -} - -void concurrent_queue_iterator_base::assign( const concurrent_queue_iterator_base& other ) { - if( my_rep!=other.my_rep ) { - if( my_rep ) { - delete my_rep; - my_rep = NULL; - } - if( other.my_rep ) { - my_rep = new concurrent_queue_iterator_rep( *other.my_rep ); - } - } - my_item = other.my_item; -} - -void concurrent_queue_iterator_base::advance() { - __TBB_ASSERT( my_item, "attempt to increment iterator past end of queue" ); - size_t k = my_rep->head_counter; - const concurrent_queue_base& queue = my_rep->my_queue; - __TBB_ASSERT( my_item==my_rep->choose(k), NULL ); - size_t i = k/concurrent_queue_rep::n_queue & queue.items_per_page-1; - if( i==queue.items_per_page-1 ) { - concurrent_queue_base::page*& root = my_rep->array[concurrent_queue_rep::index(k)]; - root = root->next; - } - my_rep->head_counter = k+1; - my_item = my_rep->choose(k+1); -} - -concurrent_queue_iterator_base::~concurrent_queue_iterator_base() { - delete my_rep; - my_rep = NULL; -} - -} // namespace internal - -} // namespace tbb diff --git a/deal.II/bundled/tbb30_104oss/src/old/concurrent_queue_v2.h b/deal.II/bundled/tbb30_104oss/src/old/concurrent_queue_v2.h deleted file mode 100644 index d55b5a39fb..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/old/concurrent_queue_v2.h +++ /dev/null @@ -1,328 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_concurrent_queue_H -#define __TBB_concurrent_queue_H - -#include "tbb/tbb_stddef.h" -#include - -namespace tbb { - -template class concurrent_queue; - -//! @cond INTERNAL -namespace internal { - -class concurrent_queue_rep; -class concurrent_queue_iterator_rep; -class concurrent_queue_iterator_base; -template class concurrent_queue_iterator; - -//! For internal use only. -/** Type-independent portion of concurrent_queue. - @ingroup containers */ -class concurrent_queue_base: no_copy { - //! Internal representation - concurrent_queue_rep* my_rep; - - friend class concurrent_queue_rep; - friend struct micro_queue; - friend class concurrent_queue_iterator_rep; - friend class concurrent_queue_iterator_base; -protected: - //! Prefix on a page - struct page { - page* next; - uintptr_t mask; - }; - - //! Capacity of the queue - ptrdiff_t my_capacity; - - //! Always a power of 2 - size_t items_per_page; - - //! Size of an item - size_t item_size; -private: - virtual void copy_item( page& dst, size_t index, const void* src ) = 0; - virtual void assign_and_destroy_item( void* dst, page& src, size_t index ) = 0; -protected: - __TBB_EXPORTED_METHOD concurrent_queue_base( size_t item_size ); - virtual __TBB_EXPORTED_METHOD ~concurrent_queue_base(); - - //! Enqueue item at tail of queue - void __TBB_EXPORTED_METHOD internal_push( const void* src ); - - //! Dequeue item from head of queue - void __TBB_EXPORTED_METHOD internal_pop( void* dst ); - - //! Attempt to enqueue item onto queue. - bool __TBB_EXPORTED_METHOD internal_push_if_not_full( const void* src ); - - //! Attempt to dequeue item from queue. - /** NULL if there was no item to dequeue. */ - bool __TBB_EXPORTED_METHOD internal_pop_if_present( void* dst ); - - //! Get size of queue - ptrdiff_t __TBB_EXPORTED_METHOD internal_size() const; - - void __TBB_EXPORTED_METHOD internal_set_capacity( ptrdiff_t capacity, size_t element_size ); -}; - -//! Type-independent portion of concurrent_queue_iterator. -/** @ingroup containers */ -class concurrent_queue_iterator_base { - //! Concurrentconcurrent_queue over which we are iterating. - /** NULL if one past last element in queue. */ - concurrent_queue_iterator_rep* my_rep; - - template - friend bool operator==( const concurrent_queue_iterator& i, const concurrent_queue_iterator& j ); - - template - friend bool operator!=( const concurrent_queue_iterator& i, const concurrent_queue_iterator& j ); -protected: - //! Pointer to current item - mutable void* my_item; - - //! Default constructor - __TBB_EXPORTED_METHOD concurrent_queue_iterator_base() : my_rep(NULL), my_item(NULL) {} - - //! Copy constructor - concurrent_queue_iterator_base( const concurrent_queue_iterator_base& i ) : my_rep(NULL), my_item(NULL) { - assign(i); - } - - //! Construct iterator pointing to head of queue. - concurrent_queue_iterator_base( const concurrent_queue_base& queue ); - - //! Assignment - void __TBB_EXPORTED_METHOD assign( const concurrent_queue_iterator_base& i ); - - //! Advance iterator one step towards tail of queue. - void __TBB_EXPORTED_METHOD advance(); - - //! Destructor - __TBB_EXPORTED_METHOD ~concurrent_queue_iterator_base(); -}; - -//! Meets requirements of a forward iterator for STL. -/** Value is either the T or const T type of the container. - @ingroup containers */ -template -class concurrent_queue_iterator: public concurrent_queue_iterator_base { -#if !defined(_MSC_VER) || defined(__INTEL_COMPILER) - template - friend class ::tbb::concurrent_queue; -#else -public: // workaround for MSVC -#endif - //! Construct iterator pointing to head of queue. - concurrent_queue_iterator( const concurrent_queue_base& queue ) : - concurrent_queue_iterator_base(queue) - { - } -public: - concurrent_queue_iterator() {} - - /** If Value==Container::value_type, then this routine is the copy constructor. - If Value==const Container::value_type, then this routine is a conversion constructor. */ - concurrent_queue_iterator( const concurrent_queue_iterator& other ) : - concurrent_queue_iterator_base(other) - {} - - //! Iterator assignment - concurrent_queue_iterator& operator=( const concurrent_queue_iterator& other ) { - assign(other); - return *this; - } - - //! Reference to current item - Value& operator*() const { - return *static_cast(my_item); - } - - Value* operator->() const {return &operator*();} - - //! Advance to next item in queue - concurrent_queue_iterator& operator++() { - advance(); - return *this; - } - - //! Post increment - Value* operator++(int) { - Value* result = &operator*(); - operator++(); - return result; - } -}; // concurrent_queue_iterator - -template -bool operator==( const concurrent_queue_iterator& i, const concurrent_queue_iterator& j ) { - return i.my_item==j.my_item; -} - -template -bool operator!=( const concurrent_queue_iterator& i, const concurrent_queue_iterator& j ) { - return i.my_item!=j.my_item; -} - -} // namespace internal; -//! @endcond - -//! A high-performance thread-safe queue. -/** Multiple threads may each push and pop concurrently. - Assignment and copy construction are not allowed. - @ingroup containers */ -template -class concurrent_queue: public internal::concurrent_queue_base { - template friend class internal::concurrent_queue_iterator; - - //! Class used to ensure exception-safety of method "pop" - class destroyer { - T& my_value; - public: - destroyer( T& value ) : my_value(value) {} - ~destroyer() {my_value.~T();} - }; - - T& get_ref( page& page, size_t index ) { - __TBB_ASSERT( index(static_cast(&page+1))[index]; - } - - /*override*/ virtual void copy_item( page& dst, size_t index, const void* src ) { - new( &get_ref(dst,index) ) T(*static_cast(src)); - } - - /*override*/ virtual void assign_and_destroy_item( void* dst, page& src, size_t index ) { - T& from = get_ref(src,index); - destroyer d(from); - *static_cast(dst) = from; - } - -public: - //! Element type in the queue. - typedef T value_type; - - //! Reference type - typedef T& reference; - - //! Const reference type - typedef const T& const_reference; - - //! Integral type for representing size of the queue. - /** Notice that the size_type is a signed integral type. - This is because the size can be negative if there are pending pops without corresponding pushes. */ - typedef std::ptrdiff_t size_type; - - //! Difference type for iterator - typedef std::ptrdiff_t difference_type; - - //! Construct empty queue - concurrent_queue() : - concurrent_queue_base( sizeof(T) ) - { - } - - //! Destroy queue - ~concurrent_queue(); - - //! Enqueue an item at tail of queue. - void push( const T& source ) { - internal_push( &source ); - } - - //! Dequeue item from head of queue. - /** Block until an item becomes available, and then dequeue it. */ - void pop( T& destination ) { - internal_pop( &destination ); - } - - //! Enqueue an item at tail of queue if queue is not already full. - /** Does not wait for queue to become not full. - Returns true if item is pushed; false if queue was already full. */ - bool push_if_not_full( const T& source ) { - return internal_push_if_not_full( &source ); - } - - //! Attempt to dequeue an item from head of queue. - /** Does not wait for item to become available. - Returns true if successful; false otherwise. */ - bool pop_if_present( T& destination ) { - return internal_pop_if_present( &destination ); - } - - //! Return number of pushes minus number of pops. - /** Note that the result can be negative if there are pops waiting for the - corresponding pushes. The result can also exceed capacity() if there - are push operations in flight. */ - size_type size() const {return internal_size();} - - //! Equivalent to size()<=0. - bool empty() const {return size()<=0;} - - //! Maximum number of allowed elements - size_type capacity() const { - return my_capacity; - } - - //! Set the capacity - /** Setting the capacity to 0 causes subsequent push_if_not_full operations to always fail, - and subsequent push operations to block forever. */ - void set_capacity( size_type capacity ) { - internal_set_capacity( capacity, sizeof(T) ); - } - - typedef internal::concurrent_queue_iterator iterator; - typedef internal::concurrent_queue_iterator const_iterator; - - //------------------------------------------------------------------------ - // The iterators are intended only for debugging. They are slow and not thread safe. - //------------------------------------------------------------------------ - iterator begin() {return iterator(*this);} - iterator end() {return iterator();} - const_iterator begin() const {return const_iterator(*this);} - const_iterator end() const {return const_iterator();} - -}; - -template -concurrent_queue::~concurrent_queue() { - while( !empty() ) { - T value; - internal_pop(&value); - } -} - -} // namespace tbb - -#endif /* __TBB_concurrent_queue_H */ diff --git a/deal.II/bundled/tbb30_104oss/src/old/concurrent_vector_v2.cpp b/deal.II/bundled/tbb30_104oss/src/old/concurrent_vector_v2.cpp deleted file mode 100644 index b0d161d1d2..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/old/concurrent_vector_v2.cpp +++ /dev/null @@ -1,277 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#include "concurrent_vector_v2.h" -#include "tbb/tbb_machine.h" -#include "../tbb/itt_notify.h" -#include "tbb/task.h" - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (push) - #pragma warning (disable: 4530) -#endif - -#include -#include - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - #pragma warning (pop) -#endif - - -#if defined(_MSC_VER) && defined(_Wp64) - // Workaround for overzealous compiler warnings in /Wp64 mode - #pragma warning (disable: 4267) -#endif - -namespace tbb { - -namespace internal { - -void concurrent_vector_base::internal_grow_to_at_least( size_type new_size, size_type element_size, internal_array_op1 init ) { - size_type e = my_early_size; - while( e=pointers_per_short_segment && v.my_segment==v.my_storage ) { - extend_segment(v); - } - } -}; - -void concurrent_vector_base::helper::extend_segment( concurrent_vector_base& v ) { - const size_t pointers_per_long_segment = sizeof(void*)==4 ? 32 : 64; - segment_t* s = (segment_t*)NFS_Allocate( pointers_per_long_segment, sizeof(segment_t), NULL ); - std::memset( s, 0, pointers_per_long_segment*sizeof(segment_t) ); - // If other threads are trying to set pointers in the short segment, wait for them to finish their - // assigments before we copy the short segment to the long segment. - atomic_backoff backoff; - while( !v.my_storage[0].array || !v.my_storage[1].array ) { - backoff.pause(); - } - s[0] = v.my_storage[0]; - s[1] = v.my_storage[1]; - if( v.my_segment.compare_and_swap( s, v.my_storage )!=v.my_storage ) - NFS_Free(s); -} - -concurrent_vector_base::size_type concurrent_vector_base::internal_capacity() const { - return segment_base( helper::find_segment_end(*this) ); -} - -void concurrent_vector_base::internal_reserve( size_type n, size_type element_size, size_type max_size ) { - if( n>max_size ) { - __TBB_THROW( std::length_error("argument to ConcurrentVector::reserve exceeds ConcurrentVector::max_size()") ); - } - for( segment_index_t k = helper::find_segment_end(*this); segment_base(k)n-b ) m = n-b; - copy( my_segment[k].array, src.my_segment[k].array, m ); - } - } -} - -void concurrent_vector_base::internal_assign( const concurrent_vector_base& src, size_type element_size, internal_array_op1 destroy, internal_array_op2 assign, internal_array_op2 copy ) { - size_type n = src.my_early_size; - while( my_early_size>n ) { - segment_index_t k = segment_index_of( my_early_size-1 ); - size_type b=segment_base(k); - size_type new_end = b>=n ? b : n; - __TBB_ASSERT( my_early_size>new_end, NULL ); - destroy( (char*)my_segment[k].array+element_size*(new_end-b), my_early_size-new_end ); - my_early_size = new_end; - } - size_type dst_initialized_size = my_early_size; - my_early_size = n; - size_type b; - for( segment_index_t k=0; (b=segment_base(k))n-b ) m = n-b; - size_type a = 0; - if( dst_initialized_size>b ) { - a = dst_initialized_size-b; - if( a>m ) a = m; - assign( my_segment[k].array, src.my_segment[k].array, a ); - m -= a; - a *= element_size; - } - if( m>0 ) - copy( (char*)my_segment[k].array+a, (char*)src.my_segment[k].array+a, m ); - } - __TBB_ASSERT( src.my_early_size==n, "detected use of ConcurrentVector::operator= with right side that was concurrently modified" ); -} - -void* concurrent_vector_base::internal_push_back( size_type element_size, size_type& index ) { - __TBB_ASSERT( sizeof(my_early_size)==sizeof(reference_count), NULL ); - //size_t tmp = __TBB_FetchAndIncrementWacquire(*(tbb::internal::reference_count*)&my_early_size); - size_t tmp = __TBB_FetchAndIncrementWacquire((tbb::internal::reference_count*)&my_early_size); - index = tmp; - segment_index_t k_old = segment_index_of( tmp ); - size_type base = segment_base(k_old); - helper::extend_segment_if_necessary(*this,k_old); - segment_t& s = my_segment[k_old]; - void* array = s.array; - if( !array ) { - // FIXME - consider factoring this out and share with internal_grow_by - if( base==tmp ) { - __TBB_ASSERT( !s.array, NULL ); - size_t n = segment_size(k_old); - array = NFS_Allocate( n, element_size, NULL ); - ITT_NOTIFY( sync_releasing, &s.array ); - s.array = array; - } else { - ITT_NOTIFY(sync_prepare, &s.array); - spin_wait_while_eq( s.array, (void*)0 ); - ITT_NOTIFY(sync_acquired, &s.array); - array = s.array; - } - } - size_type j_begin = tmp-base; - return (void*)((char*)array+element_size*j_begin); -} - -concurrent_vector_base::size_type concurrent_vector_base::internal_grow_by( size_type delta, size_type element_size, internal_array_op1 init ) { - size_type result = my_early_size.fetch_and_add(delta); - internal_grow( result, result+delta, element_size, init ); - return result; -} - -void concurrent_vector_base::internal_grow( const size_type start, size_type finish, size_type element_size, internal_array_op1 init ) { - __TBB_ASSERT( start finish-base ? finish-base : n; - (*init)( (void*)((char*)array+element_size*j_begin), j_end-j_begin ); - tmp = base+j_end; - } while( tmp0 ) { - segment_index_t k_old = segment_index_of(finish-1); - segment_t& s = my_segment[k_old]; - __TBB_ASSERT( s.array, NULL ); - size_type base = segment_base(k_old); - size_type j_end = finish-base; - __TBB_ASSERT( j_end, NULL ); - (*destroy)( s.array, j_end ); - finish = base; - } - - // Free the arrays - if( reclaim_storage ) { - size_t k = helper::find_segment_end(*this); - while( k>0 ) { - --k; - segment_t& s = my_segment[k]; - void* array = s.array; - s.array = NULL; - NFS_Free( array ); - } - // Clear short segment. - my_storage[0].array = NULL; - my_storage[1].array = NULL; - segment_t* s = my_segment; - if( s!=my_storage ) { - my_segment = my_storage; - NFS_Free( s ); - } - } -} - -} // namespace internal - -} // tbb diff --git a/deal.II/bundled/tbb30_104oss/src/old/concurrent_vector_v2.h b/deal.II/bundled/tbb30_104oss/src/old/concurrent_vector_v2.h deleted file mode 100644 index 738d29a218..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/old/concurrent_vector_v2.h +++ /dev/null @@ -1,522 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_concurrent_vector_H -#define __TBB_concurrent_vector_H - -#include "tbb/tbb_stddef.h" -#include "tbb/atomic.h" -#include "tbb/cache_aligned_allocator.h" -#include "tbb/blocked_range.h" -#include "tbb/tbb_machine.h" -#include - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (push) - #pragma warning (disable: 4530) -#endif - -#include - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - #pragma warning (pop) -#endif - -namespace tbb { - -template -class concurrent_vector; - -//! @cond INTERNAL -namespace internal { - - //! Base class of concurrent vector implementation. - /** @ingroup containers */ - class concurrent_vector_base { - protected: - typedef unsigned long segment_index_t; - - //! Log2 of "min_segment_size". - static const int lg_min_segment_size = 4; - - //! Minimum size (in physical items) of a segment. - static const int min_segment_size = segment_index_t(1)<>1< my_early_size; - - /** Can be zero-initialized. */ - struct segment_t { - /** Declared volatile because in weak memory model, must have ld.acq/st.rel */ - void* volatile array; -#if TBB_DO_ASSERT - ~segment_t() { - __TBB_ASSERT( !array, "should have been set to NULL by clear" ); - } -#endif /* TBB_DO_ASSERT */ - }; - - atomic my_segment; - - segment_t my_storage[2]; - - concurrent_vector_base() { - my_early_size = 0; - my_storage[0].array = NULL; - my_storage[1].array = NULL; - my_segment = my_storage; - } - - //! An operation on an n-lement array starting at begin. - typedef void(__TBB_EXPORTED_FUNC *internal_array_op1)(void* begin, size_type n ); - - //! An operation on n-element destination array and n-element source array. - typedef void(__TBB_EXPORTED_FUNC *internal_array_op2)(void* dst, const void* src, size_type n ); - - void __TBB_EXPORTED_METHOD internal_grow_to_at_least( size_type new_size, size_type element_size, internal_array_op1 init ); - void internal_grow( size_type start, size_type finish, size_type element_size, internal_array_op1 init ); - size_type __TBB_EXPORTED_METHOD internal_grow_by( size_type delta, size_type element_size, internal_array_op1 init ); - void* __TBB_EXPORTED_METHOD internal_push_back( size_type element_size, size_type& index ); - void __TBB_EXPORTED_METHOD internal_clear( internal_array_op1 destroy, bool reclaim_storage ); - void __TBB_EXPORTED_METHOD internal_copy( const concurrent_vector_base& src, size_type element_size, internal_array_op2 copy ); - void __TBB_EXPORTED_METHOD internal_assign( const concurrent_vector_base& src, size_type element_size, - internal_array_op1 destroy, internal_array_op2 assign, internal_array_op2 copy ); -private: - //! Private functionality that does not cross DLL boundary. - class helper; - - friend class helper; - }; - - //! Meets requirements of a forward iterator for STL and a Value for a blocked_range.*/ - /** Value is either the T or const T type of the container. - @ingroup containers */ - template - class vector_iterator -#if defined(_WIN64) && defined(_MSC_VER) - // Ensure that Microsoft's internal template function _Val_type works correctly. - : public std::iterator -#endif /* defined(_WIN64) && defined(_MSC_VER) */ - { - //! concurrent_vector over which we are iterating. - Container* my_vector; - - //! Index into the vector - size_t my_index; - - //! Caches my_vector->internal_subscript(my_index) - /** NULL if cached value is not available */ - mutable Value* my_item; - - template - friend bool operator==( const vector_iterator& i, const vector_iterator& j ); - - template - friend bool operator<( const vector_iterator& i, const vector_iterator& j ); - - template - friend ptrdiff_t operator-( const vector_iterator& i, const vector_iterator& j ); - - template - friend class internal::vector_iterator; - -#if !defined(_MSC_VER) || defined(__INTEL_COMPILER) - template - friend class tbb::concurrent_vector; -#else -public: // workaround for MSVC -#endif - - vector_iterator( const Container& vector, size_t index ) : - my_vector(const_cast(&vector)), - my_index(index), - my_item(NULL) - {} - - public: - //! Default constructor - vector_iterator() : my_vector(NULL), my_index(~size_t(0)), my_item(NULL) {} - - vector_iterator( const vector_iterator& other ) : - my_vector(other.my_vector), - my_index(other.my_index), - my_item(other.my_item) - {} - - vector_iterator operator+( ptrdiff_t offset ) const { - return vector_iterator( *my_vector, my_index+offset ); - } - friend vector_iterator operator+( ptrdiff_t offset, const vector_iterator& v ) { - return vector_iterator( *v.my_vector, v.my_index+offset ); - } - vector_iterator operator+=( ptrdiff_t offset ) { - my_index+=offset; - my_item = NULL; - return *this; - } - vector_iterator operator-( ptrdiff_t offset ) const { - return vector_iterator( *my_vector, my_index-offset ); - } - vector_iterator operator-=( ptrdiff_t offset ) { - my_index-=offset; - my_item = NULL; - return *this; - } - Value& operator*() const { - Value* item = my_item; - if( !item ) { - item = my_item = &my_vector->internal_subscript(my_index); - } - __TBB_ASSERT( item==&my_vector->internal_subscript(my_index), "corrupt cache" ); - return *item; - } - Value& operator[]( ptrdiff_t k ) const { - return my_vector->internal_subscript(my_index+k); - } - Value* operator->() const {return &operator*();} - - //! Pre increment - vector_iterator& operator++() { - size_t k = ++my_index; - if( my_item ) { - // Following test uses 2's-complement wizardry and fact that - // min_segment_size is a power of 2. - if( (k& k-concurrent_vector::min_segment_size)==0 ) { - // k is a power of two that is at least k-min_segment_size - my_item= NULL; - } else { - ++my_item; - } - } - return *this; - } - - //! Pre decrement - vector_iterator& operator--() { - __TBB_ASSERT( my_index>0, "operator--() applied to iterator already at beginning of concurrent_vector" ); - size_t k = my_index--; - if( my_item ) { - // Following test uses 2's-complement wizardry and fact that - // min_segment_size is a power of 2. - if( (k& k-concurrent_vector::min_segment_size)==0 ) { - // k is a power of two that is at least k-min_segment_size - my_item= NULL; - } else { - --my_item; - } - } - return *this; - } - - //! Post increment - vector_iterator operator++(int) { - vector_iterator result = *this; - operator++(); - return result; - } - - //! Post decrement - vector_iterator operator--(int) { - vector_iterator result = *this; - operator--(); - return result; - } - - // STL support - - typedef ptrdiff_t difference_type; - typedef Value value_type; - typedef Value* pointer; - typedef Value& reference; - typedef std::random_access_iterator_tag iterator_category; - }; - - template - bool operator==( const vector_iterator& i, const vector_iterator& j ) { - return i.my_index==j.my_index; - } - - template - bool operator!=( const vector_iterator& i, const vector_iterator& j ) { - return !(i==j); - } - - template - bool operator<( const vector_iterator& i, const vector_iterator& j ) { - return i.my_index - bool operator>( const vector_iterator& i, const vector_iterator& j ) { - return j - bool operator>=( const vector_iterator& i, const vector_iterator& j ) { - return !(i - bool operator<=( const vector_iterator& i, const vector_iterator& j ) { - return !(j - ptrdiff_t operator-( const vector_iterator& i, const vector_iterator& j ) { - return ptrdiff_t(i.my_index)-ptrdiff_t(j.my_index); - } - -} // namespace internal -//! @endcond - -//! Concurrent vector -/** @ingroup containers */ -template -class concurrent_vector: private internal::concurrent_vector_base { -public: - using internal::concurrent_vector_base::size_type; -private: - template - class generic_range_type: public blocked_range { - public: - typedef T value_type; - typedef T& reference; - typedef const T& const_reference; - typedef I iterator; - typedef ptrdiff_t difference_type; - generic_range_type( I begin_, I end_, size_t grainsize ) : blocked_range(begin_,end_,grainsize) {} - generic_range_type( generic_range_type& r, split ) : blocked_range(r,split()) {} - }; - - template - friend class internal::vector_iterator; -public: - typedef T& reference; - typedef const T& const_reference; - - //! Construct empty vector. - concurrent_vector() {} - - //! Copy a vector. - concurrent_vector( const concurrent_vector& vector ) {internal_copy(vector,sizeof(T),©_array);} - - //! Assignment - concurrent_vector& operator=( const concurrent_vector& vector ) { - if( this!=&vector ) - internal_assign(vector,sizeof(T),&destroy_array,&assign_array,©_array); - return *this; - } - - //! Clear and destroy vector. - ~concurrent_vector() {internal_clear(&destroy_array,/*reclaim_storage=*/true);} - - //------------------------------------------------------------------------ - // Concurrent operations - //------------------------------------------------------------------------ - //! Grow by "delta" elements. - /** Returns old size. */ - size_type grow_by( size_type delta ) { - return delta ? internal_grow_by( delta, sizeof(T), &initialize_array ) : my_early_size; - } - - //! Grow array until it has at least n elements. - void grow_to_at_least( size_type n ) { - if( my_early_size iterator; - typedef internal::vector_iterator const_iterator; - -#if !defined(_MSC_VER) || _CPPLIB_VER>=300 - // Assume ISO standard definition of std::reverse_iterator - typedef std::reverse_iterator reverse_iterator; - typedef std::reverse_iterator const_reverse_iterator; -#else - // Use non-standard std::reverse_iterator - typedef std::reverse_iterator reverse_iterator; - typedef std::reverse_iterator const_reverse_iterator; -#endif /* defined(_MSC_VER) && (_MSC_VER<1300) */ - - typedef generic_range_type range_type; - typedef generic_range_type const_range_type; - - range_type range( size_t grainsize = 1 ) { - return range_type( begin(), end(), grainsize ); - } - - const_range_type range( size_t grainsize = 1 ) const { - return const_range_type( begin(), end(), grainsize ); - } - - //------------------------------------------------------------------------ - // Capacity - //------------------------------------------------------------------------ - //! Return size of vector. - size_type size() const {return my_early_size;} - - //! Return size of vector. - bool empty() const {return !my_early_size;} - - //! Maximum size to which array can grow without allocating more memory. - size_type capacity() const {return internal_capacity();} - - //! Allocate enough space to grow to size n without having to allocate more memory later. - /** Like most of the methods provided for STL compatibility, this method is *not* thread safe. - The capacity afterwards may be bigger than the requested reservation. */ - void reserve( size_type n ) { - if( n ) - internal_reserve(n, sizeof(T), max_size()); - } - - //! Upper bound on argument to reserve. - size_type max_size() const {return (~size_t(0))/sizeof(T);} - - //------------------------------------------------------------------------ - // STL support - //------------------------------------------------------------------------ - - typedef T value_type; - typedef ptrdiff_t difference_type; - - iterator begin() {return iterator(*this,0);} - iterator end() {return iterator(*this,size());} - const_iterator begin() const {return const_iterator(*this,0);} - const_iterator end() const {return const_iterator(*this,size());} - - reverse_iterator rbegin() {return reverse_iterator(end());} - reverse_iterator rend() {return reverse_iterator(begin());} - const_reverse_iterator rbegin() const {return const_reverse_iterator(end());} - const_reverse_iterator rend() const {return const_reverse_iterator(begin());} - - //! Not thread safe - /** Does not change capacity. */ - void clear() {internal_clear(&destroy_array,/*reclaim_storage=*/false);} -private: - //! Get reference to element at given index. - T& internal_subscript( size_type index ) const; - - //! Construct n instances of T, starting at "begin". - static void __TBB_EXPORTED_FUNC initialize_array( void* begin, size_type n ); - - //! Construct n instances of T, starting at "begin". - static void __TBB_EXPORTED_FUNC copy_array( void* dst, const void* src, size_type n ); - - //! Assign n instances of T, starting at "begin". - static void __TBB_EXPORTED_FUNC assign_array( void* dst, const void* src, size_type n ); - - //! Destroy n instances of T, starting at "begin". - static void __TBB_EXPORTED_FUNC destroy_array( void* begin, size_type n ); -}; - -template -T& concurrent_vector::internal_subscript( size_type index ) const { - __TBB_ASSERT( index(my_segment[k].array)[j]; -} - -template -void concurrent_vector::initialize_array( void* begin, size_type n ) { - T* array = static_cast(begin); - for( size_type j=0; j -void concurrent_vector::copy_array( void* dst, const void* src, size_type n ) { - T* d = static_cast(dst); - const T* s = static_cast(src); - for( size_type j=0; j -void concurrent_vector::assign_array( void* dst, const void* src, size_type n ) { - T* d = static_cast(dst); - const T* s = static_cast(src); - for( size_type j=0; j -void concurrent_vector::destroy_array( void* begin, size_type n ) { - T* array = static_cast(begin); - for( size_type j=n; j>0; --j ) - array[j-1].~T(); -} - -} // namespace tbb - -#endif /* __TBB_concurrent_vector_H */ diff --git a/deal.II/bundled/tbb30_104oss/src/old/spin_rw_mutex_v2.cpp b/deal.II/bundled/tbb30_104oss/src/old/spin_rw_mutex_v2.cpp deleted file mode 100644 index a19ec93f2b..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/old/spin_rw_mutex_v2.cpp +++ /dev/null @@ -1,166 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#include "spin_rw_mutex_v2.h" -#include "tbb/tbb_machine.h" -#include "../tbb/itt_notify.h" - -namespace tbb { - -using namespace internal; - -static inline bool CAS(volatile uintptr_t &addr, uintptr_t newv, uintptr_t oldv) { - return __TBB_CompareAndSwapW((volatile void *)&addr, (intptr_t)newv, (intptr_t)oldv) == (intptr_t)oldv; -} - -//! Signal that write lock is released -void spin_rw_mutex::internal_itt_releasing(spin_rw_mutex *mutex) { - ITT_NOTIFY(sync_releasing, mutex); -#if !DO_ITT_NOTIFY - (void)mutex; -#endif -} - -bool spin_rw_mutex::internal_acquire_writer(spin_rw_mutex *mutex) -{ - ITT_NOTIFY(sync_prepare, mutex); - atomic_backoff backoff; - for(;;) { - state_t s = mutex->state; - if( !(s & BUSY) ) { // no readers, no writers - if( CAS(mutex->state, WRITER, s) ) - break; // successfully stored writer flag - backoff.reset(); // we could be very close to complete op. - } else if( !(s & WRITER_PENDING) ) { // no pending writers - __TBB_AtomicOR(&mutex->state, WRITER_PENDING); - } - backoff.pause(); - } - ITT_NOTIFY(sync_acquired, mutex); - __TBB_ASSERT( (mutex->state & BUSY)==WRITER, "invalid state of a write lock" ); - return false; -} - -//! Signal that write lock is released -void spin_rw_mutex::internal_release_writer(spin_rw_mutex *mutex) { - __TBB_ASSERT( (mutex->state & BUSY)==WRITER, "invalid state of a write lock" ); - ITT_NOTIFY(sync_releasing, mutex); - mutex->state = 0; -} - -//! Acquire lock on given mutex. -void spin_rw_mutex::internal_acquire_reader(spin_rw_mutex *mutex) { - ITT_NOTIFY(sync_prepare, mutex); - atomic_backoff backoff; - for(;;) { - state_t s = mutex->state; - if( !(s & (WRITER|WRITER_PENDING)) ) { // no writer or write requests - if( CAS(mutex->state, s+ONE_READER, s) ) - break; // successfully stored increased number of readers - backoff.reset(); // we could be very close to complete op. - } - backoff.pause(); - } - ITT_NOTIFY(sync_acquired, mutex); - __TBB_ASSERT( mutex->state & READERS, "invalid state of a read lock: no readers" ); - __TBB_ASSERT( !(mutex->state & WRITER), "invalid state of a read lock: active writer" ); -} - -//! Upgrade reader to become a writer. -/** Returns true if the upgrade happened without re-acquiring the lock and false if opposite */ -bool spin_rw_mutex::internal_upgrade(spin_rw_mutex *mutex) { - state_t s = mutex->state; - __TBB_ASSERT( s & READERS, "invalid state before upgrade: no readers " ); - __TBB_ASSERT( !(s & WRITER), "invalid state before upgrade: active writer " ); - // check and set writer-pending flag - // required conditions: either no pending writers, or we are the only reader - // (with multiple readers and pending writer, another upgrade could have been requested) - while( (s & READERS)==ONE_READER || !(s & WRITER_PENDING) ) { - if( CAS(mutex->state, s | WRITER_PENDING, s) ) - { - atomic_backoff backoff; - ITT_NOTIFY(sync_prepare, mutex); - while( (mutex->state & READERS) != ONE_READER ) // more than 1 reader - backoff.pause(); - // the state should be 0...0110, i.e. 1 reader and waiting writer; - // both new readers and writers are blocked - __TBB_ASSERT(mutex->state == (ONE_READER | WRITER_PENDING),"invalid state when upgrading to writer"); - mutex->state = WRITER; - ITT_NOTIFY(sync_acquired, mutex); - __TBB_ASSERT( (mutex->state & BUSY) == WRITER, "invalid state after upgrade" ); - return true; // successfully upgraded - } else { - s = mutex->state; // re-read - } - } - // slow reacquire - internal_release_reader(mutex); - return internal_acquire_writer(mutex); // always returns false -} - -void spin_rw_mutex::internal_downgrade(spin_rw_mutex *mutex) { - __TBB_ASSERT( (mutex->state & BUSY) == WRITER, "invalid state before downgrade" ); - ITT_NOTIFY(sync_releasing, mutex); - mutex->state = ONE_READER; - __TBB_ASSERT( mutex->state & READERS, "invalid state after downgrade: no readers" ); - __TBB_ASSERT( !(mutex->state & WRITER), "invalid state after downgrade: active writer" ); -} - -void spin_rw_mutex::internal_release_reader(spin_rw_mutex *mutex) -{ - __TBB_ASSERT( mutex->state & READERS, "invalid state of a read lock: no readers" ); - __TBB_ASSERT( !(mutex->state & WRITER), "invalid state of a read lock: active writer" ); - ITT_NOTIFY(sync_releasing, mutex); // release reader - __TBB_FetchAndAddWrelease((volatile void *)&(mutex->state),-(intptr_t)ONE_READER); -} - -bool spin_rw_mutex::internal_try_acquire_writer( spin_rw_mutex * mutex ) -{ -// for a writer: only possible to acquire if no active readers or writers - state_t s = mutex->state; // on Itanium, this volatile load has acquire semantic - if( !(s & BUSY) ) // no readers, no writers; mask is 1..1101 - if( CAS(mutex->state, WRITER, s) ) { - ITT_NOTIFY(sync_acquired, mutex); - return true; // successfully stored writer flag - } - return false; -} - -bool spin_rw_mutex::internal_try_acquire_reader( spin_rw_mutex * mutex ) -{ -// for a reader: acquire if no active or waiting writers - state_t s = mutex->state; // on Itanium, a load of volatile variable has acquire semantic - while( !(s & (WRITER|WRITER_PENDING)) ) // no writers - if( CAS(mutex->state, s+ONE_READER, s) ) { - ITT_NOTIFY(sync_acquired, mutex); - return true; // successfully stored increased number of readers - } - return false; -} - -} // namespace tbb diff --git a/deal.II/bundled/tbb30_104oss/src/old/spin_rw_mutex_v2.h b/deal.II/bundled/tbb30_104oss/src/old/spin_rw_mutex_v2.h deleted file mode 100644 index bc45277e69..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/old/spin_rw_mutex_v2.h +++ /dev/null @@ -1,185 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_spin_rw_mutex_H -#define __TBB_spin_rw_mutex_H - -#include "tbb/tbb_stddef.h" - -namespace tbb { - -//! Fast, unfair, spinning reader-writer lock with backoff and writer-preference -/** @ingroup synchronization */ -class spin_rw_mutex { - //! @cond INTERNAL - - //! Present so that 1.0 headers work with 1.1 dynamic library. - static void __TBB_EXPORTED_FUNC internal_itt_releasing(spin_rw_mutex *); - - //! Internal acquire write lock. - static bool __TBB_EXPORTED_FUNC internal_acquire_writer(spin_rw_mutex *); - - //! Out of line code for releasing a write lock. - /** This code is has debug checking and instrumentation for Intel(R) Thread Checker and Intel(R) Thread Profiler. */ - static void __TBB_EXPORTED_FUNC internal_release_writer(spin_rw_mutex *); - - //! Internal acquire read lock. - static void __TBB_EXPORTED_FUNC internal_acquire_reader(spin_rw_mutex *); - - //! Internal upgrade reader to become a writer. - static bool __TBB_EXPORTED_FUNC internal_upgrade(spin_rw_mutex *); - - //! Out of line code for downgrading a writer to a reader. - /** This code is has debug checking and instrumentation for Intel(R) Thread Checker and Intel(R) Thread Profiler. */ - static void __TBB_EXPORTED_FUNC internal_downgrade(spin_rw_mutex *); - - //! Internal release read lock. - static void __TBB_EXPORTED_FUNC internal_release_reader(spin_rw_mutex *); - - //! Internal try_acquire write lock. - static bool __TBB_EXPORTED_FUNC internal_try_acquire_writer(spin_rw_mutex *); - - //! Internal try_acquire read lock. - static bool __TBB_EXPORTED_FUNC internal_try_acquire_reader(spin_rw_mutex *); - - //! @endcond -public: - //! Construct unacquired mutex. - spin_rw_mutex() : state(0) {} - -#if TBB_DO_ASSERT - //! Destructor asserts if the mutex is acquired, i.e. state is zero. - ~spin_rw_mutex() { - __TBB_ASSERT( !state, "destruction of an acquired mutex"); - }; -#endif /* TBB_DO_ASSERT */ - - //! The scoped locking pattern - /** It helps to avoid the common problem of forgetting to release lock. - It also nicely provides the "node" for queuing locks. */ - class scoped_lock : private internal::no_copy { - public: - //! Construct lock that has not acquired a mutex. - /** Equivalent to zero-initialization of *this. */ - scoped_lock() : mutex(NULL) {} - - //! Acquire lock on given mutex. - /** Upon entry, *this should not be in the "have acquired a mutex" state. */ - scoped_lock( spin_rw_mutex& m, bool write = true ) : mutex(NULL) { - acquire(m, write); - } - - //! Release lock (if lock is held). - ~scoped_lock() { - if( mutex ) release(); - } - - //! Acquire lock on given mutex. - void acquire( spin_rw_mutex& m, bool write = true ) { - __TBB_ASSERT( !mutex, "holding mutex already" ); - is_writer = write; - mutex = &m; - if( write ) internal_acquire_writer(mutex); - else internal_acquire_reader(mutex); - } - - //! Upgrade reader to become a writer. - /** Returns true if the upgrade happened without re-acquiring the lock and false if opposite */ - bool upgrade_to_writer() { - __TBB_ASSERT( mutex, "lock is not acquired" ); - __TBB_ASSERT( !is_writer, "not a reader" ); - is_writer = true; - return internal_upgrade(mutex); - } - - //! Release lock. - void release() { - __TBB_ASSERT( mutex, "lock is not acquired" ); - spin_rw_mutex *m = mutex; - mutex = NULL; - if( is_writer ) { -#if TBB_DO_THREADING_TOOLS||TBB_DO_ASSERT - internal_release_writer(m); -#else - m->state = 0; -#endif /* TBB_DO_THREADING_TOOLS||TBB_DO_ASSERT */ - } else { - internal_release_reader(m); - } - }; - - //! Downgrade writer to become a reader. - bool downgrade_to_reader() { -#if TBB_DO_THREADING_TOOLS||TBB_DO_ASSERT - __TBB_ASSERT( mutex, "lock is not acquired" ); - __TBB_ASSERT( is_writer, "not a writer" ); - internal_downgrade(mutex); -#else - mutex->state = 4; // Bit 2 - reader, 00..00100 -#endif - is_writer = false; - - return true; - } - - //! Try acquire lock on given mutex. - bool try_acquire( spin_rw_mutex& m, bool write = true ) { - __TBB_ASSERT( !mutex, "holding mutex already" ); - bool result; - is_writer = write; - result = write? internal_try_acquire_writer(&m) - : internal_try_acquire_reader(&m); - if( result ) mutex = &m; - return result; - } - - private: - //! The pointer to the current mutex that is held, or NULL if no mutex is held. - spin_rw_mutex* mutex; - - //! True if holding a writer lock, false if holding a reader lock. - /** Not defined if not holding a lock. */ - bool is_writer; - }; - -private: - typedef uintptr_t state_t; - static const state_t WRITER = 1; - static const state_t WRITER_PENDING = 2; - static const state_t READERS = ~(WRITER | WRITER_PENDING); - static const state_t ONE_READER = 4; - static const state_t BUSY = WRITER | READERS; - /** Bit 0 = writer is holding lock - Bit 1 = request by a writer to acquire lock (hint to readers to wait) - Bit 2..N = number of readers holding lock */ - volatile state_t state; -}; - -} // namespace ThreadingBuildingBlocks - -#endif /* __TBB_spin_rw_mutex_H */ diff --git a/deal.II/bundled/tbb30_104oss/src/old/task_v2.cpp b/deal.II/bundled/tbb30_104oss/src/old/task_v2.cpp deleted file mode 100644 index 7deccfc2fb..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/old/task_v2.cpp +++ /dev/null @@ -1,46 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -/* This compilation unit provides definition of task::destroy( task& ) - that is binary compatible with TBB 2.x. In TBB 3.0, the method became - static, and its name decoration changed, though the definition remained. - - The macro switch should be set prior to including task.h - or any TBB file that might bring task.h up. -*/ -#define __TBB_DEPRECATED_TASK_INTERFACE 1 -#include "tbb/task.h" - -namespace tbb { - -void task::destroy( task& victim ) { - // Forward to static version - task_base::destroy( victim ); -} - -} // namespace tbb diff --git a/deal.II/bundled/tbb30_104oss/src/old/test_concurrent_queue_v2.cpp b/deal.II/bundled/tbb30_104oss/src/old/test_concurrent_queue_v2.cpp deleted file mode 100644 index c5ed6eca5d..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/old/test_concurrent_queue_v2.cpp +++ /dev/null @@ -1,356 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#include "tbb/concurrent_queue.h" -#include "tbb/atomic.h" -#include "tbb/tick_count.h" - -#include "../test/harness_assert.h" -#include "../test/harness.h" - -static tbb::atomic FooConstructed; -static tbb::atomic FooDestroyed; - -class Foo { - enum state_t{ - LIVE=0x1234, - DEAD=0xDEAD - }; - state_t state; -public: - int thread_id; - int serial; - Foo() : state(LIVE) { - ++FooConstructed; - } - Foo( const Foo& item ) : state(LIVE) { - ASSERT( item.state==LIVE, NULL ); - ++FooConstructed; - thread_id = item.thread_id; - serial = item.serial; - } - ~Foo() { - ASSERT( state==LIVE, NULL ); - ++FooDestroyed; - state=DEAD; - thread_id=0xDEAD; - serial=0xDEAD; - } - void operator=( Foo& item ) { - ASSERT( item.state==LIVE, NULL ); - ASSERT( state==LIVE, NULL ); - thread_id = item.thread_id; - serial = item.serial; - } - bool is_const() {return false;} - bool is_const() const {return true;} -}; - -const size_t MAXTHREAD = 256; - -static int Sum[MAXTHREAD]; - -//! Count of various pop operations -/** [0] = pop_if_present that failed - [1] = pop_if_present that succeeded - [2] = pop */ -static tbb::atomic PopKind[3]; - -const int M = 10000; - -struct Body { - tbb::concurrent_queue* queue; - const int nthread; - Body( int nthread_ ) : nthread(nthread_) {} - void operator()( long thread_id ) const { - long pop_kind[3] = {0,0,0}; - int serial[MAXTHREAD+1]; - memset( serial, 0, nthread*sizeof(unsigned) ); - ASSERT( thread_idpop_if_present(f); - ++pop_kind[prepopped]; - } - Foo g; - g.thread_id = thread_id; - g.serial = j+1; - queue->push( g ); - if( !prepopped ) { - queue->pop(f); - ++pop_kind[2]; - } - ASSERT( f.thread_id<=nthread, NULL ); - ASSERT( f.thread_id==nthread || serial[f.thread_id]0, "nthread must be positive" ); - if( prefill+1>=capacity ) - return; - bool success = false; - for( int k=0; k<3; ++k ) - PopKind[k] = 0; - for( int trial=0; !success; ++trial ) { - FooConstructed = 0; - FooDestroyed = 0; - Body body(nthread); - tbb::concurrent_queue queue; - queue.set_capacity( capacity ); - body.queue = &queue; - for( int i=0; i=0; ) { - ASSERT( !queue.empty(), NULL ); - Foo f; - queue.pop(f); - ASSERT( queue.size()==i, NULL ); - sum += f.serial-1; - } - ASSERT( queue.empty(), NULL ); - ASSERT( queue.size()==0, NULL ); - if( sum!=expected ) - printf("sum=%d expected=%d\n",sum,expected); - ASSERT( FooConstructed==FooDestroyed, NULL ); - - success = true; - if( nthread>1 && prefill==0 ) { - // Check that pop_if_present got sufficient exercise - for( int k=0; k<2; ++k ) { -#if (_WIN32||_WIN64) - // The TBB library on Windows seems to have a tough time generating - // the desired interleavings for pop_if_present, so the code tries longer, and settles - // for fewer desired interleavings. - const int max_trial = 100; - const int min_requirement = 20; -#else - const int min_requirement = 100; - const int max_trial = 20; -#endif /* _WIN32||_WIN64 */ - if( PopKind[k]=max_trial ) { - if( Verbose ) - printf("Warning: %d threads had only %ld pop_if_present operations %s after %d trials (expected at least %d). " - "This problem may merely be unlucky scheduling. " - "Investigate only if it happens repeatedly.\n", - nthread, long(PopKind[k]), k==0?"failed":"succeeded", max_trial, min_requirement); - else - printf("Warning: the number of %s pop_if_present operations is less than expected for %d threads. Investigate if it happens repeatedly.\n", - k==0?"failed":"succeeded", nthread ); - } else { - success = false; - } - } - } - } - } -} - -template -void TestIteratorAux( Iterator1 i, Iterator2 j, int size ) { - // Now test iteration - Iterator1 old_i; - for( int k=0; k" - ASSERT( k+2==i->serial, NULL ); - } - // Test assignment - old_i = i; - } - ASSERT( k+1==f.serial, NULL ); - } - ASSERT( !(i!=j), NULL ); - ASSERT( i==j, NULL ); -} - -template -void TestIteratorAssignment( Iterator2 j ) { - Iterator1 i(j); - ASSERT( i==j, NULL ); - ASSERT( !(i!=j), NULL ); - Iterator1 k; - k = j; - ASSERT( k==j, NULL ); - ASSERT( !(k!=j), NULL ); -} - -//! Test the iterators for concurrent_queue -void TestIterator() { - tbb::concurrent_queue queue; - tbb::concurrent_queue& const_queue = queue; - for( int j=0; j<500; ++j ) { - TestIteratorAux( queue.begin(), queue.end(), j ); - TestIteratorAux( const_queue.begin(), const_queue.end(), j ); - TestIteratorAux( const_queue.begin(), queue.end(), j ); - TestIteratorAux( queue.begin(), const_queue.end(), j ); - Foo f; - f.serial = j+1; - queue.push(f); - } - TestIteratorAssignment::const_iterator>( const_queue.begin() ); - TestIteratorAssignment::const_iterator>( queue.begin() ); - TestIteratorAssignment::iterator>( queue.begin() ); -} - -void TestConcurrenetQueueType() { - AssertSameType( tbb::concurrent_queue::value_type(), Foo() ); - Foo f; - const Foo g; - tbb::concurrent_queue::reference r = f; - ASSERT( &r==&f, NULL ); - ASSERT( !r.is_const(), NULL ); - tbb::concurrent_queue::const_reference cr = g; - ASSERT( &cr==&g, NULL ); - ASSERT( cr.is_const(), NULL ); -} - -template -void TestEmptyQueue() { - const tbb::concurrent_queue queue; - ASSERT( queue.size()==0, NULL ); - ASSERT( queue.capacity()>0, NULL ); - ASSERT( size_t(queue.capacity())>=size_t(-1)/(sizeof(void*)+sizeof(T)), NULL ); -} - -void TestFullQueue() { - for( int n=0; n<10; ++n ) { - FooConstructed = 0; - FooDestroyed = 0; - tbb::concurrent_queue queue; - queue.set_capacity(n); - for( int i=0; i<=n; ++i ) { - Foo f; - f.serial = i; - bool result = queue.push_if_not_full( f ); - ASSERT( result==(i -struct TestNegativeQueueBody { - tbb::concurrent_queue& queue; - const int nthread; - TestNegativeQueueBody( tbb::concurrent_queue& q, int n ) : queue(q), nthread(n) {} - void operator()( int k ) const { - if( k==0 ) { - int number_of_pops = nthread-1; - // Wait for all pops to pend. - while( queue.size()>-number_of_pops ) { - __TBB_Yield(); - } - for( int i=0; ; ++i ) { - ASSERT( queue.size()==i-number_of_pops, NULL ); - ASSERT( queue.empty()==(queue.size()<=0), NULL ); - if( i==number_of_pops ) break; - // Satisfy another pop - queue.push( T() ); - } - } else { - // Pop item from queue - T item; - queue.pop(item); - } - } -}; - -//! Test a queue with a negative size. -template -void TestNegativeQueue( int nthread ) { - tbb::concurrent_queue queue; - NativeParallelFor( nthread, TestNegativeQueueBody(queue,nthread) ); -} - -int TestMain () { - TestEmptyQueue(); - TestEmptyQueue(); - TestFullQueue(); - TestConcurrenetQueueType(); - TestIterator(); - - // Test concurrent operations - for( int nthread=MinThread; nthread<=MaxThread; ++nthread ) { - TestNegativeQueue(nthread); - for( int prefill=0; prefill<64; prefill+=(1+prefill/3) ) { - TestPushPop(prefill,ptrdiff_t(-1),nthread); - TestPushPop(prefill,ptrdiff_t(1),nthread); - TestPushPop(prefill,ptrdiff_t(2),nthread); - TestPushPop(prefill,ptrdiff_t(10),nthread); - TestPushPop(prefill,ptrdiff_t(100),nthread); - } - } - return Harness::Done; -} diff --git a/deal.II/bundled/tbb30_104oss/src/old/test_concurrent_vector_v2.cpp b/deal.II/bundled/tbb30_104oss/src/old/test_concurrent_vector_v2.cpp deleted file mode 100644 index 62fa5f1ec8..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/old/test_concurrent_vector_v2.cpp +++ /dev/null @@ -1,565 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#include "concurrent_vector_v2.h" -#include -#include -#include "../test/harness_assert.h" - -tbb::atomic FooCount; - -//! Problem size -const size_t N = 500000; - -struct Foo { - int my_bar; -public: - enum State { - DefaultInitialized=0x1234, - CopyInitialized=0x89ab, - Destroyed=0x5678 - } state; - int& bar() { - ASSERT( state==DefaultInitialized||state==CopyInitialized, NULL ); - return my_bar; - } - int bar() const { - ASSERT( state==DefaultInitialized||state==CopyInitialized, NULL ); - return my_bar; - } - static const int initial_value_of_bar = 42; - Foo() { - state = DefaultInitialized; - ++FooCount; - my_bar = initial_value_of_bar; - } - Foo( const Foo& foo ) { - state = CopyInitialized; - ++FooCount; - my_bar = foo.my_bar; - } - ~Foo() { - ASSERT( state==DefaultInitialized||state==CopyInitialized, NULL ); - state = Destroyed; - my_bar = ~initial_value_of_bar; - --FooCount; - } - bool is_const() const {return true;} - bool is_const() {return false;} -}; - -class FooWithAssign: public Foo { -public: - void operator=( const FooWithAssign& x ) { - ASSERT( x.state==DefaultInitialized||x.state==CopyInitialized, NULL ); - ASSERT( state==DefaultInitialized||state==CopyInitialized, NULL ); - my_bar = x.my_bar; - } -}; - -inline void NextSize( int& s ) { - if( s<=32 ) ++s; - else s += s/10; -} - -static void CheckVector( const tbb::concurrent_vector& cv, size_t expected_size, size_t old_size ) { - ASSERT( cv.size()==expected_size, NULL ); - ASSERT( cv.empty()==(expected_size==0), NULL ); - for( int j=0; j vector_t; - for( int old_size=0; old_size<=128; NextSize( old_size ) ) { - for( int new_size=old_size; new_size<=128; NextSize( new_size ) ) { - long count = FooCount; - vector_t v; - ASSERT( count==FooCount, NULL ); - v.grow_by(old_size); - ASSERT( count+old_size==FooCount, NULL ); - for( int j=0; j vector_t; - vector_t v; - v.reserve( old_size ); - ASSERT( v.capacity()>=old_size, NULL ); - v.reserve( new_size ); - ASSERT( v.capacity()>=old_size, NULL ); - ASSERT( v.capacity()>=new_size, NULL ); - for( size_t i=0; i<2*new_size; ++i ) { - ASSERT( size_t(FooCount)==count+i, NULL ); - size_t j = v.grow_by(1); - ASSERT( j==i, NULL ); - } - } - ASSERT( FooCount==count, NULL ); - } - } -} - -struct AssignElement { - typedef tbb::concurrent_vector::range_type::iterator iterator; - iterator base; - void operator()( const tbb::concurrent_vector::range_type& range ) const { - for( iterator i=range.begin(); i!=range.end(); ++i ) { - if( *i!=0 ) - std::printf("ERROR for v[%ld]\n", long(i-base)); - *i = int(i-base); - } - } - AssignElement( iterator base_ ) : base(base_) {} -}; - -struct CheckElement { - typedef tbb::concurrent_vector::const_range_type::iterator iterator; - iterator base; - void operator()( const tbb::concurrent_vector::const_range_type& range ) const { - for( iterator i=range.begin(); i!=range.end(); ++i ) - if( *i != int(i-base) ) - std::printf("ERROR for v[%ld]\n", long(i-base)); - } - CheckElement( iterator base_ ) : base(base_) {} -}; - -#include "tbb/tick_count.h" -#include "tbb/parallel_for.h" -#include "../test/harness.h" - -void TestParallelFor( int nthread ) { - typedef tbb::concurrent_vector vector_t; - vector_t v; - v.grow_to_at_least(N); - tbb::tick_count t0 = tbb::tick_count::now(); - if( Verbose ) - std::printf("Calling parallel_for.h with %ld threads\n",long(nthread)); - tbb::parallel_for( v.range(10000), AssignElement(v.begin()) ); - tbb::tick_count t1 = tbb::tick_count::now(); - const vector_t& u = v; - tbb::parallel_for( u.range(10000), CheckElement(u.begin()) ); - tbb::tick_count t2 = tbb::tick_count::now(); - if( Verbose ) - std::printf("Time for parallel_for.h: assign time = %8.5f, check time = %8.5f\n", - (t1-t0).seconds(),(t2-t1).seconds()); - for( long i=0; size_t(i) -void TestIteratorAssignment( Iterator2 j ) { - Iterator1 i(j); - ASSERT( i==j, NULL ); - ASSERT( !(i!=j), NULL ); - Iterator1 k; - k = j; - ASSERT( k==j, NULL ); - ASSERT( !(k!=j), NULL ); -} - -template -void TestIteratorTraits() { - AssertSameType( static_cast(0), static_cast(0) ); - AssertSameType( static_cast(0), static_cast(0) ); - AssertSameType( static_cast(0), static_cast(0) ); - AssertSameType( static_cast(0), static_cast(0) ); - T x; - typename Iterator::reference xr = x; - typename Iterator::pointer xp = &x; - ASSERT( &xr==xp, NULL ); -} - -template -void CheckConstIterator( const Vector& u, int i, const Iterator& cp ) { - typename Vector::const_reference pref = *cp; - if( pref.bar()!=i ) - std::printf("ERROR for u[%ld] using const_iterator\n", long(i)); - typename Vector::difference_type delta = cp-u.begin(); - ASSERT( delta==i, NULL ); - if( u[i].bar()!=i ) - std::printf("ERROR for u[%ld] using subscripting\n", long(i)); - ASSERT( u.begin()[i].bar()==i, NULL ); -} - -template -void CheckIteratorComparison( V& u ) { - Iterator1 i = u.begin(); - for( int i_count=0; i_count<100; ++i_count ) { - Iterator2 j = u.begin(); - for( int j_count=0; j_count<100; ++j_count ) { - ASSERT( (i==j)==(i_count==j_count), NULL ); - ASSERT( (i!=j)==(i_count!=j_count), NULL ); - ASSERT( (i-j)==(i_count-j_count), NULL ); - ASSERT( (ij)==(i_count>j_count), NULL ); - ASSERT( (i<=j)==(i_count<=j_count), NULL ); - ASSERT( (i>=j)==(i_count>=j_count), NULL ); - ++j; - } - ++i; - } -} - -//! Test sequential iterators for vector type V. -/** Also does timing. */ -template -void TestSequentialFor() { - V v; - v.grow_by(N); - - // Check iterator - tbb::tick_count t0 = tbb::tick_count::now(); - typename V::iterator p = v.begin(); - ASSERT( !(*p).is_const(), NULL ); - ASSERT( !p->is_const(), NULL ); - for( int i=0; size_t(i)is_const(), NULL ); - for( int i=0; size_t(i)0; ) { - --i; - --cp; - if( i>0 ) { - typename V::const_iterator cp_old = cp--; - int here = (*cp_old).bar(); - ASSERT( here==u[i].bar(), NULL ); - typename V::const_iterator cp_new = cp++; - int prev = (*cp_new).bar(); - ASSERT( prev==u[i-1].bar(), NULL ); - } - CheckConstIterator(u,i,cp); - } - - // Now go forwards and backwards - cp = u.begin(); - ptrdiff_t j = 0; - for( size_t i=0; i(v); - CheckIteratorComparison(v); - CheckIteratorComparison(v); - CheckIteratorComparison(v); - - TestIteratorAssignment( u.begin() ); - TestIteratorAssignment( v.begin() ); - TestIteratorAssignment( v.begin() ); - - // Check reverse_iterator - typename V::reverse_iterator rp = v.rbegin(); - for( size_t i=v.size(); i>0; --i, ++rp ) { - typename V::reference pref = *rp; - ASSERT( size_t(pref.bar())==i-1, NULL ); - ASSERT( rp!=v.rend(), NULL ); - } - ASSERT( rp==v.rend(), NULL ); - - // Check const_reverse_iterator - typename V::const_reverse_iterator crp = u.rbegin(); - for( size_t i=v.size(); i>0; --i, ++crp ) { - typename V::const_reference cpref = *crp; - ASSERT( size_t(cpref.bar())==i-1, NULL ); - ASSERT( crp!=u.rend(), NULL ); - } - ASSERT( crp==u.rend(), NULL ); - - TestIteratorAssignment( u.rbegin() ); - TestIteratorAssignment( v.rbegin() ); -} - -static const size_t Modulus = 7; - -typedef tbb::concurrent_vector MyVector; - -class GrowToAtLeast { - MyVector& my_vector; -public: - void operator()( const tbb::blocked_range& range ) const { - for( size_t i=range.begin(); i!=range.end(); ++i ) { - size_t n = my_vector.size(); - size_t k = n==0 ? 0 : i % (2*n+1); - my_vector.grow_to_at_least(k+1); - ASSERT( my_vector.size()>=k+1, NULL ); - } - } - GrowToAtLeast( MyVector& vector ) : my_vector(vector) {} -}; - -void TestConcurrentGrowToAtLeast() { - MyVector v; - for( size_t s=1; s<1000; s*=10 ) { - tbb::parallel_for( tbb::blocked_range(0,1000000,100), GrowToAtLeast(v) ); - } -} - -//! Test concurrent invocations of method concurrent_vector::grow_by -class GrowBy { - MyVector& my_vector; -public: - void operator()( const tbb::blocked_range& range ) const { - for( int i=range.begin(); i!=range.end(); ++i ) { - if( i%3 ) { - Foo& element = my_vector[my_vector.grow_by(1)]; - element.bar() = i; - } else { - Foo f; - f.bar() = i; - size_t k = my_vector.push_back( f ); - ASSERT( my_vector[k].bar()==i, NULL ); - } - } - } - GrowBy( MyVector& vector ) : my_vector(vector) {} -}; - -//! Test concurrent invocations of method concurrent_vector::grow_by -void TestConcurrentGrowBy( int nthread ) { - int m = 100000; - MyVector v; - tbb::parallel_for( tbb::blocked_range(0,m,1000), GrowBy(v) ); - ASSERT( v.size()==size_t(m), NULL ); - - // Verify that v is a permutation of 0..m - int inversions = 0; - bool* found = new bool[m]; - memset( found, 0, m ); - for( int i=0; i0 ) - inversions += v[i].bar()1 || v[i].bar()==i, "sequential execution is wrong" ); - } - delete[] found; - if( nthread>1 && inversions vector_t; - for( int dst_size=1; dst_size<=128; NextSize( dst_size ) ) { - for( int src_size=2; src_size<=128; NextSize( src_size ) ) { - vector_t u; - u.grow_to_at_least(src_size); - for( int i=0; i - -typedef unsigned long Number; - -static tbb::concurrent_vector Primes; - -class FindPrimes { - bool is_prime( Number val ) const { - int limit, factor = 3; - if( val<5u ) - return val==2; - else { - limit = long(sqrtf(float(val))+0.5f); - while( factor<=limit && val % factor ) - ++factor; - return factor>limit; - } - } -public: - void operator()( const tbb::blocked_range& r ) const { - for( Number i=r.begin(); i!=r.end(); ++i ) { - if( i%2 && is_prime(i) ) { - Primes[Primes.grow_by(1)] = i; - } - } - } -}; - -static double TimeFindPrimes( int nthread ) { - Primes.clear(); - tbb::task_scheduler_init init(nthread); - tbb::tick_count t0 = tbb::tick_count::now(); - tbb::parallel_for( tbb::blocked_range(0,1000000,500), FindPrimes() ); - tbb::tick_count t1 = tbb::tick_count::now(); - return (t1-t0).seconds(); -} - -static void TestFindPrimes() { - // Time fully subscribed run. - double t2 = TimeFindPrimes( tbb::task_scheduler_init::automatic ); - - // Time parallel run that is very likely oversubscribed. - double t128 = TimeFindPrimes(128); - - if( Verbose ) - std::printf("TestFindPrimes: t2==%g t128=%g\n", t2, t128 ); - - // We allow the 128-thread run a little extra time to allow for thread overhead. - // Theoretically, following test will fail on machine with >128 processors. - // But that situation is not going to come up in the near future, - // and the generalization to fix the issue is not worth the trouble. - if( t128>1.10*t2 ) { - std::printf("Warning: grow_by is pathetically slow: t2==%g t128=%g\n", t2, t128); - } -} - -//------------------------------------------------------------------------ -// Test compatibility with STL sort. -//------------------------------------------------------------------------ - -#include - -void TestSort() { - for( int n=1; n<100; n*=3 ) { - tbb::concurrent_vector array; - array.grow_by( n ); - for( int i=0; i::iterator,Foo>(); - TestIteratorTraits::const_iterator,const Foo>(); - TestSequentialFor > (); - TestResizeAndCopy(); - TestAssign(); - TestCapacity(); - for( int nthread=MinThread; nthread<=MaxThread; ++nthread ) { - tbb::task_scheduler_init init( nthread ); - TestParallelFor( nthread ); - TestConcurrentGrowToAtLeast(); - TestConcurrentGrowBy( nthread ); - } - TestFindPrimes(); - TestSort(); - return Harness::Done; -} diff --git a/deal.II/bundled/tbb30_104oss/src/old/test_mutex_v2.cpp b/deal.II/bundled/tbb30_104oss/src/old/test_mutex_v2.cpp deleted file mode 100644 index 0c3f699c30..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/old/test_mutex_v2.cpp +++ /dev/null @@ -1,268 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -//------------------------------------------------------------------------ -// Test TBB mutexes when used with parallel_for.h -// -// Usage: test_Mutex.exe [-v] nthread -// -// The -v option causes timing information to be printed. -// -// Compile with _OPENMP and -openmp -//------------------------------------------------------------------------ -#include "tbb/atomic.h" -#include "tbb/blocked_range.h" -#include "tbb/parallel_for.h" -#include "tbb/tick_count.h" -#include "../test/harness.h" -#include "spin_rw_mutex_v2.h" -#include -#include - -#if __linux__ -#define STD std -#else -#define STD /* Cater to broken Windows compilers that are missing "std". */ -#endif /* __linux__ */ - -// This test deliberately avoids a "using tbb" statement, -// so that the error of putting types in the wrong namespace will be caught. - -template -struct Counter { - typedef M mutex_type; - M mutex; - volatile long value; -}; - -//! Function object for use with parallel_for.h. -template -struct AddOne { - C& counter; - /** Increments counter once for each iteration in the iteration space. */ - void operator()( tbb::blocked_range& range ) const { - for( size_t i=range.begin(); i!=range.end(); ++i ) { - if( i&1 ) { - // Try implicit acquire and explicit release - typename C::mutex_type::scoped_lock lock(counter.mutex); - counter.value = counter.value+1; - lock.release(); - } else { - // Try explicit acquire and implicit release - typename C::mutex_type::scoped_lock lock; - lock.acquire(counter.mutex); - counter.value = counter.value+1; - } - } - } - AddOne( C& counter_ ) : counter(counter_) {} -}; - -//! Generic test of a TBB mutex type M. -/** Does not test features specific to reader-writer locks. */ -template -void Test( const char * name ) { - if( Verbose ) { - printf("%s time = ",name); - fflush(stdout); - } - Counter counter; - counter.value = 0; - const int n = 100000; - tbb::tick_count t0 = tbb::tick_count::now(); - tbb::parallel_for(tbb::blocked_range(0,n,10000),AddOne >(counter)); - tbb::tick_count t1 = tbb::tick_count::now(); - if( Verbose ) - printf("%g usec\n",(t1-t0).seconds()); - if( counter.value!=n ) - STD::printf("ERROR for %s: counter.value=%ld\n",name,counter.value); -} - -template -struct Invariant { - typedef M mutex_type; - M mutex; - const char* mutex_name; - volatile long value[N]; - volatile long single_value; - Invariant( const char* mutex_name_ ) : - mutex_name(mutex_name_) - { - single_value = 0; - for( size_t k=0; k -struct TwiddleInvariant { - I& invariant; - /** Increments counter once for each iteration in the iteration space. */ - void operator()( tbb::blocked_range& range ) const { - for( size_t i=range.begin(); i!=range.end(); ++i ) { - //! Every 8th access is a write access - bool write = (i%8)==7; - bool okay = true; - bool lock_kept = true; - if( (i/8)&1 ) { - // Try implicit acquire and explicit release - typename I::mutex_type::scoped_lock lock(invariant.mutex,write); - if( write ) { - long my_value = invariant.value[0]; - invariant.update(); - if( i%16==7 ) { - lock_kept = lock.downgrade_to_reader(); - if( !lock_kept ) - my_value = invariant.value[0] - 1; - okay = invariant.value_is(my_value+1); - } - } else { - okay = invariant.is_okay(); - if( i%8==3 ) { - long my_value = invariant.value[0]; - lock_kept = lock.upgrade_to_writer(); - if( !lock_kept ) - my_value = invariant.value[0]; - invariant.update(); - okay = invariant.value_is(my_value+1); - } - } - lock.release(); - } else { - // Try explicit acquire and implicit release - typename I::mutex_type::scoped_lock lock; - lock.acquire(invariant.mutex,write); - if( write ) { - long my_value = invariant.value[0]; - invariant.update(); - if( i%16==7 ) { - lock_kept = lock.downgrade_to_reader(); - if( !lock_kept ) - my_value = invariant.value[0] - 1; - okay = invariant.value_is(my_value+1); - } - } else { - okay = invariant.is_okay(); - if( i%8==3 ) { - long my_value = invariant.value[0]; - lock_kept = lock.upgrade_to_writer(); - if( !lock_kept ) - my_value = invariant.value[0]; - invariant.update(); - okay = invariant.value_is(my_value+1); - } - } - } - if( !okay ) { - STD::printf( "ERROR for %s at %ld: %s %s %s %s\n",invariant.mutex_name, long(i), - write?"write,":"read,", write?(i%16==7?"downgrade,":""):(i%8==3?"upgrade,":""), - lock_kept?"lock kept,":"lock not kept,", (i/8)&1?"imp/exp":"exp/imp" ); - } - } - } - TwiddleInvariant( I& invariant_ ) : invariant(invariant_) {} -}; - -/** This test is generic so that we can test any other kinds of ReaderWriter locks we write later. */ -template -void TestReaderWriterLock( const char * mutex_name ) { - if( Verbose ) { - printf("%s readers & writers time = ",mutex_name); - fflush(stdout); - } - Invariant invariant(mutex_name); - const size_t n = 500000; - tbb::tick_count t0 = tbb::tick_count::now(); - tbb::parallel_for(tbb::blocked_range(0,n,5000),TwiddleInvariant >(invariant)); - tbb::tick_count t1 = tbb::tick_count::now(); - // There is either a writer or a reader upgraded to a writer for each 4th iteration - long expected_value = n/4; - if( !invariant.value_is(expected_value) ) - STD::printf("ERROR for %s: final invariant value is wrong\n",mutex_name); - if( Verbose ) - printf("%g usec\n",(t1-t0).seconds()); -} - -/** Test try_acquire functionality of a non-reenterable mutex */ -template -void TestTryAcquire_OneThread( const char * mutex_name ) { - M tested_mutex; - typename M::scoped_lock lock1; - if( lock1.try_acquire(tested_mutex) ) - lock1.release(); - else - STD::printf("ERROR for %s: try_acquire failed though it should not\n", mutex_name); - { - typename M::scoped_lock lock2(tested_mutex); - if( lock1.try_acquire(tested_mutex) ) - STD::printf("ERROR for %s: try_acquire succeeded though it should not\n", mutex_name); - } - if( lock1.try_acquire(tested_mutex) ) - lock1.release(); - else - STD::printf("ERROR for %s: try_acquire failed though it should not\n", mutex_name); -} - -#include "tbb/task_scheduler_init.h" - -int TestMain () { - for( int p=MinThread; p<=MaxThread; ++p ) { - tbb::task_scheduler_init init( p ); - if( Verbose ) - printf( "testing with %d workers\n", static_cast(p) ); - // Run each test 3 times. - for( int i=0; i<3; ++i ) { - Test( "Spin RW Mutex" ); - - TestTryAcquire_OneThread("Spin RW Mutex"); // only tests try_acquire for writers - TestReaderWriterLock( "Spin RW Mutex" ); - } - if( Verbose ) - printf( "calling destructor for task_scheduler_init\n" ); - } - return Harness::Done; -} diff --git a/deal.II/bundled/tbb30_104oss/src/perf/fibonacci_cutoff.cpp b/deal.II/bundled/tbb30_104oss/src/perf/fibonacci_cutoff.cpp deleted file mode 100644 index f1f50efb9d..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/perf/fibonacci_cutoff.cpp +++ /dev/null @@ -1,134 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#include -#include - -#include "tbb/task_scheduler_init.h" -#include "tbb/task.h" -#include "tbb/tick_count.h" - -long CutOff = 1; - -long SerialFib( const long n ); - -long ParallelFib( const long n ); - -inline void dump_title() { - printf("Serial/Parallel, P, N, cutoff, repetitions, time, fib, speedup\n"); -} - -inline void output(int P, long n, long c, int T, double serial_elapsed, double elapsed, long result) { - printf("%s, %d, %ld, %ld, %d, %g, %ld, %g\n", ( (P == 0) ? "Serial" : "Parallel" ), P, n, c, T, elapsed, result, serial_elapsed / elapsed); -} - -#define MOVE_BY_FOURTHS 1 -inline long calculate_new_cutoff(const long lo, const long hi) { -#if MOVE_BY_FOURTHS - return lo + (3 + hi - lo ) / 4; -#else - return (hi + lo)/2; -#endif -} - -void find_cutoff(const int P, const long n, const int T, const double serial_elapsed) { - long lo = 1, hi = n; - double elapsed = 0, lo_elapsed = 0, hi_elapsed = 0; - long final_cutoff = -1; - - tbb::task_scheduler_init init(P); - - while(true) { - CutOff = calculate_new_cutoff(lo, hi); - long result = 0; - tbb::tick_count t0; - for (int t = -1; t < T; ++t) { - if (t == 0) t0 = tbb::tick_count::now(); - result += ParallelFib(n); - } - elapsed = (tbb::tick_count::now() - t0).seconds(); - output(P,n,CutOff,T,serial_elapsed,elapsed,result); - - if (serial_elapsed / elapsed >= P/2.0) { - final_cutoff = CutOff; - if (hi == CutOff) { - if (hi == lo) { - // we have had this value at both above and below 50% - lo = 1; lo_elapsed = 0; - } else { - break; - } - } - hi = CutOff; - hi_elapsed = elapsed; - } else { - if (lo == CutOff) break; - lo = CutOff; - lo_elapsed = elapsed; - } - } - - double interpolated_cutoff = lo + ( P/2.0 - serial_elapsed/lo_elapsed ) * ( (hi - lo) / ( serial_elapsed/hi_elapsed - serial_elapsed/lo_elapsed )); - - if (final_cutoff != -1) { - printf("50%% efficiency cutoff is %ld ( linearly interpolated cutoff is %g )\n", final_cutoff, interpolated_cutoff); - } else { - printf("Cannot achieve 50%% efficiency\n"); - } - - return; -} - -int main(int argc, char *argv[]) { - if (argc < 4) { - printf("Usage: %s threads n repetitions\n",argv[0]); - return 1; - } - - dump_title(); - - int P = atoi(argv[1]); - long n = atol(argv[2]); - int T = atoi(argv[3]); - - // collect serial time - long serial_result = 0; - tbb::tick_count t0; - for (int t = -1; t < T; ++t) { - if (t == 0) t0 = tbb::tick_count::now(); - serial_result += SerialFib(n); - } - double serial_elapsed = (tbb::tick_count::now() - t0).seconds(); - output(0,n,0,T,serial_elapsed,serial_elapsed,serial_result); - - // perform search - find_cutoff(P,n,T,serial_elapsed); - - return 0; -} - diff --git a/deal.II/bundled/tbb30_104oss/src/perf/fibonacci_impl_tbb.cpp b/deal.II/bundled/tbb30_104oss/src/perf/fibonacci_impl_tbb.cpp deleted file mode 100644 index 83d7e49235..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/perf/fibonacci_impl_tbb.cpp +++ /dev/null @@ -1,86 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#include -#include - -#include "tbb/task_scheduler_init.h" -#include "tbb/task.h" -#include "tbb/tick_count.h" - -extern long CutOff; - -long SerialFib( const long n ) { - if( n<2 ) - return n; - else - return SerialFib(n-1)+SerialFib(n-2); -} - -struct FibContinuation: public tbb::task { - long* const sum; - long x, y; - FibContinuation( long* sum_ ) : sum(sum_) {} - tbb::task* execute() { - *sum = x+y; - return NULL; - } -}; - -struct FibTask: public tbb::task { - long n; - long * sum; - FibTask( const long n_, long * const sum_ ) : - n(n_), sum(sum_) - {} - tbb::task* execute() { - if( n -#include -#include -#include -#include - -#include "tbb/tick_count.h" - -#define HARNESS_CUSTOM_MAIN 1 -#include "../src/test/harness.h" -#include "../src/test/harness_barrier.h" - -#include "tbb/task_scheduler_init.h" -#include "tbb/task.h" -#include "tbb/atomic.h" - -#if __linux__ || __APPLE__ || __FreeBSD__ - #include -#endif - -__TBB_PERF_API int NumCpus = tbb::task_scheduler_init::default_num_threads(), - NumThreads, - MaxConcurrency; - -namespace Perf { - -SessionSettings theSettings; - -namespace internal { - - typedef std::vector durations_t; - - static uintptr_t NumRuns = 7; - static duration_t RunDuration = 0.01; - - static const int RateFieldLen = 10; - static const int OvhdFieldLen = 12; - - const char* TestNameColumnTitle = "Test name"; - const char* WorkloadNameColumnTitle = "Workload"; - - size_t TitleFieldLen = 0; - size_t WorkloadFieldLen = 0; - - int TotalConfigs = 0; - int MaxTbbMasters = 1; - - //! Defines the mapping between threads and cores in the undersubscription mode - /** When adding new enumerator, insert it before amLast, and do not specify - its value explicitly. **/ - enum AffinitizationMode { - amFirst = 0, - amDense = amFirst, - amSparse, - //! Used to track the number of supported affinitization modes - amLast - }; - - static const int NumAffinitizationModes = amLast - amFirst; - - const char* AffinitizationModeNames[] = { "dense", "sparse" }; - - int NumActiveAffModes = 1; - - //! Settings of a test run configuration - struct RunConfig { - int my_maxConcurrency; - int my_numThreads; // For task scheduler tests this is number of workers + 1 - int my_numMasters; // Used for task scheduler tests only - int my_affinityMode; // Used for task scheduler tests only - int my_workloadID; - - int NumMasters () const { - return theSettings.my_opts & UseTaskScheduler ? my_numMasters : my_numThreads; - } - }; - - double StandardDeviation ( double avg, const durations_t& d ) { - double std_dev = 0; - for ( uintptr_t i = 0; i < d.size(); ++i ) { - double dev = fabs(d[i] - avg); - std_dev += dev * dev; - } - std_dev = sqrt(std_dev / d.size()); - return std_dev / avg * 100; - } - - void Statistics ( const durations_t& d, - duration_t& avgTime, double& stdDev, - duration_t& minTime, duration_t& maxTime ) - { - minTime = maxTime = avgTime = d[0]; - for ( size_t i = 1; i < d.size(); ++i ) { - avgTime += d[i]; - if ( minTime > d[i] ) - minTime = d[i]; - else if ( maxTime < d[i] ) - maxTime = d[i]; - } - avgTime = avgTime / d.size(); - stdDev = StandardDeviation( avgTime, d ); - } - - //! Timing data for the series of repeated runs and results of their statistical processing - struct TimingSeries { - //! Statistical timing series - durations_t my_durations; - - //! Average time obtained from my_durations data - duration_t my_avgTime; - - //! Minimal time obtained from my_durations data - duration_t my_minTime; - - //! Minimal time obtained from my_durations data - duration_t my_maxTime; - - //! Standard deviation of my_avgTime value (per cent) - double my_stdDev; - - TimingSeries ( uintptr_t nruns = NumRuns ) - : my_durations(nruns), my_avgTime(0), my_minTime(0), my_maxTime(0) - {} - - void CalculateStatistics () { - Statistics( my_durations, my_avgTime, my_stdDev, my_minTime, my_maxTime ); - } - }; // struct TimingSeries - - //! Settings and timing results for a test run configuration - struct RunResults { - //! Run configuration settings - RunConfig my_config; - - //! Timing results for this run configuration - TimingSeries my_timing; - }; - - typedef std::vector names_t; - typedef std::vector timings_t; - typedef std::vector test_results_t; - - enum TestMethods { - idRunSerial = 0x01, - idOnStart = 0x02, - idOnFinish = 0x04, - idPrePostProcess = idOnStart | idOnFinish - }; - - //! Set of flags identifying methods not overridden by the currently active test - /** Used as a scratch var. **/ - uintptr_t g_absentMethods; - - //! Test object and timing results for all of its configurations - struct TestResults { - //! Pointer to the test object interface - Test* my_test; - - //! Set of flags identifying optional methods overridden by my_test - /** A set of ORed TestMethods flags **/ - uintptr_t my_availableMethods; - - //! Vector of serial times for each workload supported by this test - /** Element index in the vector serves as a zero based workload ID. **/ - timings_t my_serialBaselines; - - //! Common baselines for both parallel and serial variants - /** Element index in the vector serves as a zero based workload ID. **/ - timings_t my_baselines; - - //! Strings identifying workloads to be used in output - names_t my_workloadNames; - - //! Vector of timings for all run configurations of my_test - test_results_t my_results; - - const char* my_testName; - - mutable bool my_hasOwnership; - - TestResults ( Test* t, const char* className, bool takeOwnership ) - : my_test(t), my_availableMethods(0), my_testName(className), my_hasOwnership(takeOwnership) - {} - - TestResults ( const TestResults& tr ) - : my_test(tr.my_test) - , my_availableMethods(0) - , my_testName(tr.my_testName) - , my_hasOwnership(tr.my_hasOwnership) - { - tr.my_hasOwnership = false; - } - - ~TestResults () { - for ( size_t i = 0; i < my_workloadNames.size(); ++i ) - delete my_workloadNames[i]; - if ( my_hasOwnership ) - delete my_test; - } - }; // struct TestResults - - typedef std::vector session_t; - - session_t theSession; - - TimingSeries CalibrationTiming; - - const uintptr_t CacheSize = 8*1024*1024; - volatile intptr_t W[CacheSize]; - - struct WiperBody { - void operator()( int ) const { - volatile intptr_t sink = 0; - for ( uintptr_t i = 0; i < CacheSize; ++i ) - sink += W[i]; - } - }; - - void TraceHistogram ( const durations_t& t, const char* histogramFileName ) { - FILE* f = histogramFileName ? fopen(histogramFileName, "wt") : stdout; - uintptr_t n = t.size(); - const uintptr_t num_buckets = 100; - double min_val = *std::min_element(t.begin(), t.end()), - max_val = *std::max_element(t.begin(), t.end()), - bucket_size = (max_val - min_val) / num_buckets; - std::vector hist(num_buckets + 1, 0); - for ( uintptr_t i = 0; i < n; ++i ) - ++hist[uintptr_t((t[i]-min_val)/bucket_size)]; - ASSERT (hist[num_buckets] == 1, ""); - ++hist[num_buckets - 1]; - hist.resize(num_buckets); - fprintf (f, "Histogram: nvals = %u, min = %g, max = %g, nbuckets = %u\n", (unsigned)n, min_val, max_val, (unsigned)num_buckets); - double bucket = min_val; - for ( uintptr_t i = 0; i < num_buckets; ++i, bucket+=bucket_size ) - fprintf (f, "%12g\t%u\n", bucket, (unsigned)hist[i]); - fclose(f); - } - -#if _MSC_VER - typedef DWORD_PTR cpu_set_t; - - class AffinityHelper { - static const unsigned MaxAffinitySetSize = sizeof(cpu_set_t) * 8; - static unsigned AffinitySetSize; - - //! Mapping from a CPU index to a valid affinity cpu_mask - /** The first element is not used. **/ - static cpu_set_t m_affinities[MaxAffinitySetSize + 1]; - - static cpu_set_t m_processMask; - - class Initializer { - public: - Initializer () { - SYSTEM_INFO si; - GetSystemInfo(&si); - ASSERT( si.dwNumberOfProcessors <= MaxAffinitySetSize, "Too many CPUs" ); - AffinitySetSize = min (si.dwNumberOfProcessors, MaxAffinitySetSize); - cpu_set_t systemMask = 0; - GetProcessAffinityMask( GetCurrentProcess(), &m_processMask, &systemMask ); - cpu_set_t cpu_mask = 1; - for ( DWORD i = 0; i < AffinitySetSize; ++i ) { - while ( !(cpu_mask & m_processMask) && cpu_mask ) - cpu_mask <<= 1; - ASSERT( cpu_mask != 0, "Process affinity set is culled?" ); - m_affinities[i] = cpu_mask; - cpu_mask <<= 1; - } - } - }; // class AffinityHelper::Initializer - - static Initializer m_initializer; - - public: - static cpu_set_t CpuAffinity ( int cpuIndex ) { - return m_affinities[cpuIndex % AffinitySetSize]; - } - - static const cpu_set_t& ProcessMask () { return m_processMask; } - }; // class AffinityHelper - - unsigned AffinityHelper::AffinitySetSize = 0; - cpu_set_t AffinityHelper::m_affinities[AffinityHelper::MaxAffinitySetSize + 1] = {0}; - cpu_set_t AffinityHelper::m_processMask = 0; - AffinityHelper::Initializer AffinityHelper::m_initializer; - - #define CPU_ZERO(cpu_mask) (*cpu_mask = 0) - #define CPU_SET(cpu_idx, cpu_mask) (*cpu_mask |= AffinityHelper::CpuAffinity(cpu_idx)) - #define CPU_CLR(cpu_idx, cpu_mask) (*cpu_mask &= ~AffinityHelper::CpuAffinity(cpu_idx)) - #define CPU_ISSET(cpu_idx, cpu_mask) ((*cpu_mask & AffinityHelper::CpuAffinity(cpu_idx)) != 0) - -#elif __linux__ /* end of _MSC_VER */ - - #include - #include - #include - - pid_t gettid() { return (pid_t)syscall(__NR_gettid); } - - #define GET_MASK(cpu_set) (*(unsigned*)(void*)&cpu_set) - #define RES_STAT(res) (res != 0 ? "failed" : "ok") - - class AffinityHelper { - static cpu_set_t m_processMask; - - class Initializer { - public: - Initializer () { - CPU_ZERO (&m_processMask); - int res = sched_getaffinity( getpid(), sizeof(cpu_set_t), &m_processMask ); - ASSERT ( res == 0, "sched_getaffinity failed" ); - } - }; // class AffinityHelper::Initializer - - static Initializer m_initializer; - - public: - static const cpu_set_t& ProcessMask () { return m_processMask; } - }; // class AffinityHelper - - cpu_set_t AffinityHelper::m_processMask; - AffinityHelper::Initializer AffinityHelper::m_initializer; -#endif /* __linux__ */ - - bool PinTheThread ( int cpu_idx, tbb::atomic& nThreads ) { - cpu_set_t orig_mask, target_mask; - CPU_ZERO( &target_mask ); - CPU_SET( cpu_idx, &target_mask ); - ASSERT ( CPU_ISSET(cpu_idx, &target_mask), "CPU_SET failed" ); - #if _MSC_VER - orig_mask = SetThreadAffinityMask( GetCurrentThread(), target_mask ); - if ( !orig_mask ) - return false; - #elif __linux__ - CPU_ZERO( &orig_mask ); - int res = sched_getaffinity( gettid(), sizeof(cpu_set_t), &orig_mask ); - ASSERT ( res == 0, "sched_getaffinity failed" ); - res = sched_setaffinity( gettid(), sizeof(cpu_set_t), &target_mask ); - ASSERT ( res == 0, "sched_setaffinity failed" ); - #endif /* _MSC_VER */ - --nThreads; - while ( nThreads ) - __TBB_Yield(); - #if _MSC_VER - SetThreadPriority (GetCurrentThread(), THREAD_PRIORITY_HIGHEST); - #endif - return true; - } - - class AffinitySetterTask : tbb::task { - static bool m_result; - static tbb::atomic m_nThreads; - int m_idx; - - tbb::task* execute () { - //TestAffinityOps(); - m_result = PinTheThread( m_idx, m_nThreads ); - return NULL; - } - - public: - AffinitySetterTask ( int idx ) : m_idx(idx) {} - - friend bool AffinitizeTBB ( int, int /*mode*/ ); - }; - - bool AffinitySetterTask::m_result = true; - tbb::atomic AffinitySetterTask::m_nThreads; - - bool AffinitizeTBB ( int p, int affMode ) { - #if _MSC_VER - SetThreadPriority (GetCurrentThread(), THREAD_PRIORITY_HIGHEST); - SetPriorityClass (GetCurrentProcess(), HIGH_PRIORITY_CLASS); - #endif - AffinitySetterTask::m_result = true; - AffinitySetterTask::m_nThreads = p; - tbb::task_list tl; - for ( int i = 0; i < p; ++i ) { - tbb::task &t = *new( tbb::task::allocate_root() ) AffinitySetterTask( affMode == amSparse ? i * NumCpus / p : i ); - t.set_affinity( tbb::task::affinity_id(i + 1) ); - tl.push_back( t ); - } - tbb::task::spawn_root_and_wait(tl); - return AffinitySetterTask::m_result; - } - - inline - void Affinitize ( int p, int affMode ) { - if ( !AffinitizeTBB (p, affMode) ) - REPORT("Warning: Failed to set affinity for %d TBB threads\n", p); - } - - class TbbWorkersTrapper { - tbb::atomic my_refcount; - tbb::task *my_root; - tbb::task_group_context my_context; - Harness::SpinBarrier my_barrier; - - friend class TrapperTask; - - class TrapperTask : public tbb::task { - TbbWorkersTrapper& my_owner; - - tbb::task* execute () { - my_owner.my_barrier.wait(); - my_owner.my_root->wait_for_all(); - my_owner.my_barrier.wait(); - return NULL; - } - public: - TrapperTask ( TbbWorkersTrapper& owner ) : my_owner(owner) {} - }; - - public: - TbbWorkersTrapper () - : my_context(tbb::task_group_context::bound, - tbb::task_group_context::default_traits | tbb::task_group_context::concurrent_wait) - { - my_root = new ( tbb::task::allocate_root(my_context) ) tbb::empty_task; - my_root->set_ref_count(2); - my_barrier.initialize(NumThreads); - for ( int i = 1; i < NumThreads; ++i ) - tbb::task::spawn( *new(tbb::task::allocate_root()) TrapperTask(*this) ); - my_barrier.wait(); // Wait util all workers are ready - } - - ~TbbWorkersTrapper () { - my_root->decrement_ref_count(); - my_barrier.wait(); // Make sure no tasks are referencing us - tbb::task::destroy(*my_root); - } - }; // TbbWorkersTrapper - - -#if __TBB_STATISTICS - static bool StatisticsMode = true; -#else - static bool StatisticsMode = false; -#endif - -//! Suppresses silly warning -inline bool __TBB_bool( bool b ) { return b; } - -#define START_WORKERS(needScheduler, p, a, setWorkersAffinity, trapWorkers) \ - tbb::task_scheduler_init init(tbb::task_scheduler_init::deferred); \ - TbbWorkersTrapper *trapper = NULL; \ - if ( theSettings.my_opts & UseTaskScheduler \ - && (needScheduler) && ((setWorkersAffinity) || (trapWorkers)) ) \ - { \ - init.initialize( p ); \ - if ( __TBB_bool(setWorkersAffinity) ) \ - Affinitize( p, a ); \ - if ( __TBB_bool(trapWorkers) ) \ - trapper = new TbbWorkersTrapper; \ - } - -#define STOP_WORKERS() \ - if ( theSettings.my_opts & UseTaskScheduler && init.is_active() ) { \ - if ( trapper ) \ - delete trapper; \ - init.terminate(); \ - /* Give asynchronous deinitialization time to complete */ \ - Harness::Sleep(50); \ - } - - typedef void (Test::*RunMemFnPtr)( Test::ThreadInfo& ); - - TimingSeries *TlsTimings; - Harness::SpinBarrier multipleMastersBarrier; - - class TimingFunctor { - Test* my_test; - RunConfig *my_cfg; - RunMemFnPtr my_fnRun; - size_t my_numRuns; - size_t my_numRepeats; - uintptr_t my_availableMethods; - - duration_t TimeSingleRun ( Test::ThreadInfo& ti ) const { - if ( my_availableMethods & idOnStart ) - my_test->OnStart(ti); - multipleMastersBarrier.wait(); - tbb::tick_count t0 = tbb::tick_count::now(); - (my_test->*my_fnRun)(ti); - duration_t t = (tbb::tick_count::now() - t0).seconds(); - if ( my_availableMethods & idOnFinish ) - my_test->OnFinish(ti); - return t; - } - - public: - TimingFunctor ( Test* test, RunConfig *cfg, RunMemFnPtr fnRun, - size_t numRuns, size_t nRepeats, uintptr_t availableMethods ) - : my_test(test), my_cfg(cfg), my_fnRun(fnRun) - , my_numRuns(numRuns), my_numRepeats(nRepeats), my_availableMethods(availableMethods) - {} - - void operator()( int tid ) const { - Test::ThreadInfo ti = { tid, NULL }; - durations_t &d = TlsTimings[tid].my_durations; - bool singleMaster = my_cfg->my_numMasters == 1; - START_WORKERS( !singleMaster || singleMaster && StatisticsMode, - my_cfg->my_numThreads, my_cfg->my_affinityMode, singleMaster, singleMaster ); - for ( uintptr_t k = 0; k < my_numRuns; ++k ) { - if ( my_numRepeats > 1 ) { - d[k] = 0; - if ( my_availableMethods & idPrePostProcess ) { - for ( uintptr_t i = 0; i < my_numRepeats; ++i ) - d[k] += TimeSingleRun(ti); - } - else { - multipleMastersBarrier.wait(); - tbb::tick_count t0 = tbb::tick_count::now(); - for ( uintptr_t i = 0; i < my_numRepeats; ++i ) - (my_test->*my_fnRun)(ti); - d[k] = (tbb::tick_count::now() - t0).seconds(); - } - d[k] /= my_numRepeats; - } - else - d[k] = TimeSingleRun(ti); - } - STOP_WORKERS(); - TlsTimings[tid].CalculateStatistics(); - } - }; // class TimingFunctor - - void DoTiming ( TestResults& tr, RunConfig &cfg, RunMemFnPtr fnRun, size_t nRepeats, TimingSeries& ts ) { - int numThreads = cfg.NumMasters(); - size_t numRuns = ts.my_durations.size() / numThreads; - TimingFunctor body( tr.my_test, &cfg, fnRun, numRuns, nRepeats, tr.my_availableMethods ); - multipleMastersBarrier.initialize(numThreads); - tr.my_test->SetWorkload(cfg.my_workloadID); - if ( numThreads == 1 ) { - TimingSeries *t = TlsTimings; - TlsTimings = &ts; - body(0); - TlsTimings = t; - } - else { - ts.my_durations.resize(numThreads * numRuns); - NativeParallelFor( numThreads, body ); - for ( int i = 0, j = 0; i < numThreads; ++i ) { - durations_t &d = TlsTimings[i].my_durations; - for ( size_t k = 0; k < numRuns; ++k, ++j ) - ts.my_durations[j] = d[k]; - } - ts.CalculateStatistics(); - } - } - - //! Runs the test function, does statistical processing, and, if title is nonzero, prints results. - /** If histogramFileName is a string, the histogram of individual runs is generated and stored - in a file with the given name. If it is NULL then the histogram is printed on the console. - By default no histogram is generated. - The histogram format is: "rate bucket start" "number of tests in this bucket". **/ - void RunTestImpl ( TestResults& tr, RunConfig &cfg, RunMemFnPtr pfnTest, TimingSeries& ts ) { - // nRepeats is a number of repeated calls to the test function made as - // part of the same run. It is determined experimentally by the following - // calibration process so that the total run time was approx. RunDuration. - // This is helpful to increase the measurement precision in case of very - // short tests. - size_t nRepeats = 1; - // A minimal stats is enough when doing calibration - CalibrationTiming.my_durations.resize( (NumRuns < 4 ? NumRuns : 3) * cfg.NumMasters() ); - // There's no need to be too precise when calculating nRepeats. And reasonably - // far extrapolation can speed up the process significantly. - for (;;) { - DoTiming( tr, cfg, pfnTest, nRepeats, CalibrationTiming ); - if ( CalibrationTiming.my_avgTime * nRepeats > 1e-4 ) - break; - nRepeats *= 2; - } - nRepeats *= (uintptr_t)ceil( RunDuration / (CalibrationTiming.my_avgTime * nRepeats) ); - - DoTiming(tr, cfg, pfnTest, nRepeats, ts); - - // No histogram for baseline measurements - if ( pfnTest != &Test::RunSerial && pfnTest != &Test::Baseline ) { - const char* histogramName = theSettings.my_histogramName; - if ( histogramName != NoHistogram && tr.my_test->HistogramName() != DefaultHistogram ) - histogramName = tr.my_test->HistogramName(); - if ( histogramName != NoHistogram ) - TraceHistogram( ts.my_durations, histogramName ); - } - } // RunTestImpl - - typedef void (*TestActionFn) ( TestResults&, int mastersRange, int w, int p, int m, int a, int& numTests ); - - int TestResultIndex ( int mastersRange, int w, int p, int m, int a ) { - return ((w * (MaxThread - MinThread + 1) + (p - MinThread)) * mastersRange + m) * NumActiveAffModes + a; - } - - void RunTest ( TestResults& tr, int mastersRange, int w, int p, int m, int a, int& numTests ) { - size_t r = TestResultIndex(mastersRange, w, p, m, a); - ASSERT( r < tr.my_results.size(), NULL ); - RunConfig &rc = tr.my_results[r].my_config; - rc.my_maxConcurrency = MaxConcurrency; - rc.my_numThreads = p; - rc.my_numMasters = m + tr.my_test->MinNumMasters(); - rc.my_affinityMode = a; - rc.my_workloadID = w; - RunTestImpl( tr, rc, &Test::Run, tr.my_results[r].my_timing ); - printf( "Running tests: %04.1f%%\r", ++numTests * 100. / TotalConfigs ); fflush(stdout); - } - - void WalkTests ( TestActionFn fn, int& numTests, bool setAffinity, bool trapWorkers, bool multipleMasters ) { - for ( int p = MinThread; p <= MaxThread; ++p ) { - NumThreads = p; - MaxConcurrency = p < NumCpus ? p : NumCpus; - for ( int a = 0; a < NumActiveAffModes; ++a ) { - START_WORKERS( multipleMasters || !StatisticsMode, p, a, setAffinity, trapWorkers ); - for ( size_t i = 0; i < theSession.size(); ++i ) { - TestResults &tr = theSession[i]; - Test *t = tr.my_test; - int mastersRange = t->MaxNumMasters() - t->MinNumMasters() + 1; - for ( int w = 0; w < t->NumWorkloads(); ++w ) { - if ( multipleMasters ) - for ( int m = 1; m < mastersRange; ++m ) - fn( tr, mastersRange, w, p, m, a, numTests ); - else - fn( tr, mastersRange, w, p, 0, a, numTests ); - } - } - STOP_WORKERS(); - } - } - } - - void RunTests () { - int numTests = 0; - WalkTests( &RunTest, numTests, !StatisticsMode, !StatisticsMode, false ); - if ( MaxTbbMasters > 1 ) - WalkTests( &RunTest, numTests, true, false, true ); - } - - void InitTestData ( TestResults& tr, int mastersRange, int w, int p, int m, int a, int& ) { - size_t r = TestResultIndex(mastersRange, w, p, m, a); - ASSERT( r < tr.my_results.size(), NULL ); - tr.my_results[r].my_timing.my_durations.resize( - (theSettings.my_opts & UseTaskScheduler ? tr.my_test->MinNumMasters() + m : p) * NumRuns ); - } - - char WorkloadName[MaxWorkloadNameLen + 1]; - - void PrepareTests () { - printf( "Initializing...\r" ); - NumActiveAffModes = theSettings.my_opts & UseAffinityModes ? NumAffinitizationModes : 1; - TotalConfigs = 0; - TitleFieldLen = strlen( TestNameColumnTitle ); - WorkloadFieldLen = strlen( WorkloadNameColumnTitle ); - int numThreads = MaxThread - MinThread + 1; - int numConfigsBase = numThreads * NumActiveAffModes; - int totalWorkloads = 0; - for ( size_t i = 0; i < theSession.size(); ++i ) { - TestResults &tr = theSession[i]; - Test &t = *tr.my_test; - int numWorkloads = t.NumWorkloads(); - int numConfigs = numConfigsBase * numWorkloads; - if ( t.MaxNumMasters() > 1 ) { - ASSERT( theSettings.my_opts & UseTaskScheduler, "Multiple masters mode is only valid for task scheduler tests" ); - if ( MaxTbbMasters < t.MaxNumMasters() ) - MaxTbbMasters = t.MaxNumMasters(); - numConfigs *= t.MaxNumMasters() - t.MinNumMasters() + 1; - } - totalWorkloads += numWorkloads; - TotalConfigs += numConfigs; - - const char* testName = t.Name(); - if ( testName ) - tr.my_testName = testName; - ASSERT( tr.my_testName, "Neither Test::Name() is implemented, nor RTTI is enabled" ); - TitleFieldLen = max( TitleFieldLen, strlen(tr.my_testName) ); - - tr.my_results.resize( numConfigs ); - tr.my_serialBaselines.resize( numWorkloads ); - tr.my_baselines.resize( numWorkloads ); - tr.my_workloadNames.resize( numWorkloads ); - } - TimingSeries tmpTiming; - TlsTimings = &tmpTiming; // All measurements are serial here - int n = 0; - for ( size_t i = 0; i < theSession.size(); ++i ) { - TestResults &tr = theSession[i]; - Test &t = *tr.my_test; - // Detect which methods are overridden by the test implementation - g_absentMethods = 0; - Test::ThreadInfo ti = { 0 }; - t.SetWorkload(0); - t.OnStart(ti); - t.RunSerial(ti); - t.OnFinish(ti); - if ( theSettings.my_opts & UseSerialBaseline && !(g_absentMethods & idRunSerial) ) - tr.my_availableMethods |= idRunSerial; - if ( !(g_absentMethods & idOnStart) ) - tr.my_availableMethods |= idOnStart; - - RunConfig rc = { 1, 1, 1, 0, 0 }; - for ( int w = 0; w < t.NumWorkloads(); ++w ) { - WorkloadName[0] = 0; - t.SetWorkload(w); - if ( !WorkloadName[0] ) - sprintf( WorkloadName, "%d", w ); - size_t len = strlen(WorkloadName); - tr.my_workloadNames[w] = new char[len + 1]; - strcpy ( (char*)tr.my_workloadNames[w], WorkloadName ); - WorkloadFieldLen = max( WorkloadFieldLen, len ); - - rc.my_workloadID = w; - if ( theSettings.my_opts & UseBaseline ) - RunTestImpl( tr, rc, &Test::Baseline, tr.my_baselines[w] ); - if ( tr.my_availableMethods & idRunSerial ) - RunTestImpl( tr, rc, &Test::RunSerial, tr.my_serialBaselines[w] ); - printf( "Measuring baselines: %04.1f%%\r", ++n * 100. / totalWorkloads ); fflush(stdout); - } - } - TlsTimings = new TimingSeries[MaxThread + MaxTbbMasters - 1]; - if ( theSettings.my_opts & UseTaskScheduler ? MaxTbbMasters : MaxThread ) - WalkTests( &InitTestData, n, false, false, theSettings.my_opts & UseTaskScheduler ? true : false ); - CalibrationTiming.my_durations.reserve( MaxTbbMasters * 3 ); - printf( " \r"); - } - - FILE* ResFile = NULL; - - void Report ( char const* fmt, ... ) { - va_list args; - va_start( args, fmt ); - if ( ResFile ) - vfprintf( ResFile, fmt, args ); - va_start( args, fmt ); - vprintf( fmt, args ); - } - - void PrintResults () { - if ( theSettings.my_resFile ) - ResFile = fopen( theSettings.my_resFile, "w" ); - Report( "%-*s %-*s %s", TitleFieldLen, "Test name", WorkloadFieldLen, "Workload", - MaxTbbMasters > 1 ? "W M " : "T " ); - if ( theSettings.my_opts & UseAffinityModes ) - Report( "Aff " ); - Report( "%-*s SD, %% %-*s %-*s %-*s ", - RateFieldLen, "Avg.time", OvhdFieldLen, "Par.ovhd,%", - RateFieldLen, "Min.time", RateFieldLen, "Max.time" ); - Report( " | Repeats = %lu, CPUs %d\n", (unsigned long)NumRuns, NumCpus ); - for ( size_t i = 0; i < theSession.size(); ++i ) { - TestResults &tr = theSession[i]; - for ( size_t j = 0; j < tr.my_results.size(); ++j ) { - RunResults &rr = tr.my_results[j]; - RunConfig &rc = rr.my_config; - int w = rc.my_workloadID; - TimingSeries &ts = rr.my_timing; - duration_t baselineTime = tr.my_baselines[w].my_avgTime, - cleanTime = ts.my_avgTime - baselineTime; - Report( "%-*s %-*s ", TitleFieldLen, tr.my_testName, WorkloadFieldLen, tr.my_workloadNames[w] ); - if ( MaxTbbMasters > 1 ) - Report( "%-4d %-4d ", rc.my_numThreads - 1, rc.my_numMasters ); - else - Report( "%-4d ", rc.my_numThreads ); - if ( theSettings.my_opts & UseAffinityModes ) - Report( "%%-8s ", AffinitizationModeNames[rc.my_affinityMode] ); - Report( "%-*.2e %-6.1f ", RateFieldLen, cleanTime, ts.my_stdDev); - if ( tr.my_availableMethods & idRunSerial ) { - duration_t serialTime = (tr.my_serialBaselines[w].my_avgTime - baselineTime) / rc.my_maxConcurrency; - Report( "%-*.1f ", OvhdFieldLen, 100*(cleanTime - serialTime)/serialTime ); - } - else - Report( "%*s%*s ", OvhdFieldLen/2, "-", OvhdFieldLen - OvhdFieldLen/2, "" ); - Report( "%-*.2e %-*.2e ", RateFieldLen, ts.my_minTime - baselineTime, RateFieldLen, ts.my_maxTime - baselineTime); - Report( "\n" ); - } - } - delete [] TlsTimings; - if ( ResFile ) - fclose(ResFile); - } - - __TBB_PERF_API void RegisterTest ( Test* t, const char* className, bool takeOwnership ) { - // Just collect test objects at this stage - theSession.push_back( TestResults(t, className, takeOwnership) ); - } - -} // namespace internal - -__TBB_PERF_API void Test::Baseline ( ThreadInfo& ) {} - -__TBB_PERF_API void Test::RunSerial ( ThreadInfo& ) { internal::g_absentMethods |= internal::idRunSerial; } - -__TBB_PERF_API void Test::OnStart ( ThreadInfo& ) { internal::g_absentMethods |= internal::idOnStart; } - -__TBB_PERF_API void Test::OnFinish ( ThreadInfo& ) { internal::g_absentMethods |= internal::idOnFinish; } - -__TBB_PERF_API void WipeCaches () { NativeParallelFor( NumCpus, internal::WiperBody() ); } - -__TBB_PERF_API void EmptyFunc () {} -__TBB_PERF_API void AnchorFunc ( void* ) {} -__TBB_PERF_API void AnchorFunc2 ( void*, void* ) {} - -__TBB_PERF_API void SetWorkloadName( const char* format, ... ) { - internal::WorkloadName[MaxWorkloadNameLen] = 0; - va_list args; - va_start(args, format); - vsnprintf( internal::WorkloadName, MaxWorkloadNameLen, format, args ); - va_end(args); -} - - -__TBB_PERF_API int TestMain( int argc, char* argv[], const SessionSettings* defaultSettings ) { -#if _MSC_VER - HANDLE hMutex = CreateMutex( NULL, FALSE, "Global\\TBB_OMP_PerfSession" ); - WaitForSingleObject( hMutex, INFINITE ); -#endif - MinThread = MaxThread = NumCpus; - if ( defaultSettings ) - theSettings = *defaultSettings; - ParseCommandLine( argc, argv ); // May override data in theSettings - - internal::PrepareTests (); - internal::RunTests (); - internal::PrintResults(); - REPORT("\n"); -#if _MSC_VER - ReleaseMutex( hMutex ); - CloseHandle( hMutex ); -#endif - return 0; -} - -} // namespace Perf diff --git a/deal.II/bundled/tbb30_104oss/src/perf/perf.h b/deal.II/bundled/tbb30_104oss/src/perf/perf.h deleted file mode 100644 index a4b0858915..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/perf/perf.h +++ /dev/null @@ -1,265 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - - -#ifndef __tbb_perf_h__ -#define __tbb_perf_h__ - -#ifndef TBB_PERF_TYPEINFO -#define TBB_PERF_TYPEINFO 1 -#endif - -#if TBB_PERF_TYPEINFO - #include - #define __TBB_PERF_TEST_CLASS_NAME(T) typeid(T).name() -#else /* !TBB_PERF_TYPEINFO */ - #define __TBB_PERF_TEST_CLASS_NAME(T) NULL -#endif /* !TBB_PERF_TYPEINFO */ - - -#include "tbb/tick_count.h" - -// TODO: Fix build scripts to provide more reliable build phase identification means -#ifndef __TBB_PERF_API -#if _USRDLL - #if _MSC_VER - #define __TBB_PERF_API __declspec(dllexport) - #else /* !_MSC_VER */ - #define __TBB_PERF_API - #endif /* !_MSC_VER */ -#else /* !_USRDLL */ - #if _MSC_VER - #define __TBB_PERF_API __declspec(dllimport) - #else /* !_MSC_VER */ - #define __TBB_PERF_API - #endif /* !_MSC_VER */ -#endif /* !_USRDLL */ -#endif /* !__TBB_PERF_API */ - -#if _WIN32||_WIN64 - -namespace Perf { - typedef unsigned __int64 tick_t; - #if defined(_M_AMD64) - inline tick_t rdtsc () { return __rdtsc(); } - #elif _M_IX86 - inline tick_t rdtsc () { __asm { rdtsc } } - #else - #error Unsupported ISA - #endif -} // namespace Perf - -#elif __linux__ || __APPLE__ - -#include - -namespace Perf { - typedef uint64_t tick_t; - #if __x86_64__ || __i386__ || __i386 - inline tick_t rdtsc () { - uint32_t lo, hi; - __asm__ __volatile__ ( "rdtsc" : "=a" (lo), "=d" (hi) ); - return (tick_t)lo | ((tick_t)hi) << 32; - } - #else - #error Unsupported ISA - #endif -} // namespace Perf - -#else - #error Unsupported OS -#endif /* OS */ - -__TBB_PERF_API extern int NumThreads, - MaxConcurrency, - NumCpus; - -// Functions and global variables provided by the benchmarking framework -namespace Perf { - -typedef double duration_t; - -static const int MaxWorkloadNameLen = 64; - -static const char* NoHistogram = (char*)-1; -static const char* DefaultHistogram = (char*)-2; - -__TBB_PERF_API void AnchorFunc ( void* ); -__TBB_PERF_API void AnchorFunc2 ( void*, void* ); - -//! Helper that can be used in the preprocess handler to clean caches -/** Cleaning caches is necessary to obtain reproducible results when a test - accesses significant ranges of memory. **/ -__TBB_PERF_API void WipeCaches (); - -//! Specifies the name to be used to designate the current workload in output -/** Should be used from Test::SetWorkload(). If necessary workload name will be - truncated to MaxWorkloadNameLen characters. **/ -__TBB_PERF_API void SetWorkloadName( const char* format, ... ); - -class __TBB_PERF_API Test { -public: - virtual ~Test () {} - - //! Struct used by tests running in multiple masters mode - struct ThreadInfo { - //! Zero based thread ID - int tid; - //! Pointer to test specific data - /** If used by the test, should be initialized by OnStartLocal(), and - finalized by OnFinishLocal(). **/ - void* data; - }; - - //////////////////////////////////////////////////////////////////////////////// - // Mandatory methods - - //! Returns the number of workloads supported - virtual int NumWorkloads () = 0; - - //! Set workload info for the subsequent calls to Run() and RunSerial() - /** This method can use global helper function Perf::SetWorkloadName() in order - to specify the name of the current workload, which will be used in output - to designate the workload. If SetWorkloadName is not called, workloadIndex - will be used for this purpose. - - When testing task scheduler, make sure that this method does not trigger - its automatic initialization. **/ - virtual void SetWorkload ( int workloadIndex ) = 0; - - //! Test implementation - /** Called by the timing framework several times in a loop to achieve approx. - RunDuration time, and this loop is timed NumRuns times to collect statistics. - Argument ti specifies information about the master thread calling this method. **/ - virtual void Run ( ThreadInfo& ti ) = 0; - - //////////////////////////////////////////////////////////////////////////////// - // Optional methods - - //! Returns short title string to be used in the regular output to identify the test - /** Should uniquely identify the test among other ones in the given benchmark suite. - If not implemented, the test implementation class' RTTI name is used. **/ - virtual const char* Name () { return NULL; }; - - //! Returns minimal number of master threads - /** Used for task scheduler tests only (when UseTbbScheduler option is specified - in session settings). **/ - virtual int MinNumMasters () { return 1; } - - //! Returns maximal number of master threads - /** Used for task scheduler tests only (when UseTbbScheduler option is specified - in session settings). **/ - virtual int MaxNumMasters () { return 1; } - - //! Executes serial workload equivalent to the one processed by Run() - /** Called by the timing framework several times in a loop to collect statistics. **/ - virtual void RunSerial ( ThreadInfo& ti ); - - //! Invoked before each call to Run() - /** Can be used to preinitialize data necessary for the test, clean up - caches (see Perf::WipeCaches), etc. - In multiple masters mode this method is called on each thread. **/ - virtual void OnStart ( ThreadInfo& ti ); - - //! Invoked after each call to Run() - /** Can be used to free resources allocated by OnStart(). - Note that this method must work correctly independently of whether Run(), - RunSerial() or nothing is called between OnStart() and OnFinish(). - In multiple masters mode this method is called on each thread. **/ - virtual void OnFinish ( ThreadInfo& ti ); - - //! Functionality, the cost of which has to be factored out from timing results - /** Applies to both parallel and serial versions. **/ - virtual void Baseline ( ThreadInfo& ); - - //! Returns description string to be used in the benchmark info/summary output - virtual const char* Description () { return NULL; } - - //! Specifies if the histogram of individual run times in a series - /** If the method is not overridden, histogramName argument of TestMain is used. **/ - virtual const char* HistogramName () { return DefaultHistogram; } -}; // class Test - -namespace internal { - __TBB_PERF_API void RegisterTest ( Test*, const char* testClassName, bool takeOwnership ); -} - -template -void RegisterTest() { internal::RegisterTest( new T, __TBB_PERF_TEST_CLASS_NAME(T), true ); } - -template -void RegisterTest( T& t ) { internal::RegisterTest( &t, __TBB_PERF_TEST_CLASS_NAME(T), false ); } - -enum SessionOptions { - //! Use Test::RunSerial if present - UseBaseline = 0x01, - UseSerialBaseline = 0x02, - UseBaselines = UseBaseline | UseSerialBaseline, - UseTaskScheduler = 0x10, - UseAffinityModes = 0x20 -}; - -struct SessionSettings { - //! A combination of SessionOptions flags - uintptr_t my_opts; - - //! Name of a file to store performance results - /** These results are duplicates of what is printed on the console. **/ - const char* my_resFile; - - //! Output destination for the histogram of individual run times in a series - /** If it is a string, the histogram is stored in a file with such name. - If it is NULL, the histogram is printed on the console. By default histograms - are suppressed. - - The histogram is formatted as two column table: - "time bucket start" "number of tests in this bucket" - - When this setting enables histogram generation, an individual test - can override it by implementing HistogramName method. **/ - const char* my_histogramName; - - SessionSettings ( uintptr_t opts = 0, const char* resFile = NULL, const char* histogram = NoHistogram ) - : my_opts(opts) - , my_resFile(resFile) - , my_histogramName(histogram) - {} -}; // struct SessionSettings - -//! Benchmarking session entry point -/** Executes all the individual tests registered previously by means of - RegisterTest **/ -__TBB_PERF_API int TestMain( int argc, char* argv[], - const SessionSettings* defaultSettings = NULL ); - - -} // namespace Perf - -#endif /* __tbb_perf_h__ */ - - diff --git a/deal.II/bundled/tbb30_104oss/src/perf/perf_sched.cpp b/deal.II/bundled/tbb30_104oss/src/perf/perf_sched.cpp deleted file mode 100644 index 5cf228647b..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/perf/perf_sched.cpp +++ /dev/null @@ -1,423 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#include "perf.h" - -#include - -#include "tbb/blocked_range.h" -#include "tbb/parallel_for.h" -#include "tbb/parallel_reduce.h" - -#define NUM_CHILD_TASKS 2096 -#define NUM_ROOT_TASKS 256 - -#define N 100000000 -#define FINEST_GRAIN 10 -#define FINE_GRAIN 50 -#define MED_GRAIN 200 -#define COARSE_GRAIN 1000 - - -typedef int count_t; - -const count_t N_finest = (count_t)(N/log((double)N)/10); -const count_t N_fine = N_finest * 20; -const count_t N_med = N_fine * (count_t)log((double)N) / 5; - -class StaticTaskHolder { -public: - tbb::task *my_SimpleLeafTaskPtr; - StaticTaskHolder (); -}; - -static StaticTaskHolder s_tasks; - -static count_t NumIterations; -static count_t NumLeafTasks; -static count_t NumRootTasks; - -class SimpleLeafTask : public tbb::task { - task* execute () { - volatile count_t anchor = 0; - for ( count_t i=0; i < NumIterations; ++i ) - anchor += i; - return NULL; - } -public: - SimpleLeafTask ( count_t ) {} -}; - -StaticTaskHolder::StaticTaskHolder () { - static SimpleLeafTask s_t1(0); - my_SimpleLeafTaskPtr = &s_t1; -} - -class Test_SPMC : public Perf::Test { -protected: - static const int numWorkloads = 4; - static const count_t workloads[numWorkloads]; - - const char* Name () { return "SPMC"; } - - int NumWorkloads () { return numWorkloads; } - - void SetWorkload ( int idx ) { - NumRootTasks = 1; - NumIterations = workloads[idx]; - NumLeafTasks = NUM_CHILD_TASKS * NUM_ROOT_TASKS / (NumIterations > 1000 ? 32 : 8); - Perf::SetWorkloadName( "%d x %d", NumLeafTasks, NumIterations ); - } - - void Run ( ThreadInfo& ) { - tbb::empty_task &r = *new( tbb::task::allocate_root() ) tbb::empty_task; - r.set_ref_count( NumLeafTasks + 1 ); - for ( count_t i = 0; i < NumLeafTasks; ++i ) - r.spawn( *new(r.allocate_child()) SimpleLeafTask(0) ); - r.wait_for_all(); - tbb::task::destroy(r); - } - - void RunSerial ( ThreadInfo& ) { - const count_t n = NumLeafTasks * NumRootTasks; - for ( count_t i=0; i < n; ++i ) - s_tasks.my_SimpleLeafTaskPtr->execute(); - } -}; // class Test_SPMC - -const count_t Test_SPMC::workloads[Test_SPMC::numWorkloads] = { 1, 50, 500, 5000 }; - -template -class LeavesLauncherTask : public tbb::task { - count_t my_groupId; - - task* execute () { - count_t base = my_groupId * NumLeafTasks; - set_ref_count(NumLeafTasks + 1); - for ( count_t i = 0; i < NumLeafTasks; ++i ) - spawn( *new(allocate_child()) LeafTask(base + i) ); - wait_for_all(); - return NULL; - } -public: - LeavesLauncherTask ( count_t groupId ) : my_groupId(groupId) {} -}; - -template -void RunShallowTree () { - tbb::empty_task &r = *new( tbb::task::allocate_root() ) tbb::empty_task; - r.set_ref_count( NumRootTasks + 1 ); - for ( count_t i = 0; i < NumRootTasks; ++i ) - r.spawn( *new(r.allocate_child()) LeavesLauncherTask(i) ); - r.wait_for_all(); - tbb::task::destroy(r); -} - -class Test_ShallowTree : public Test_SPMC { - const char* Name () { return "ShallowTree"; } - - void SetWorkload ( int idx ) { - NumRootTasks = NUM_ROOT_TASKS; - NumIterations = workloads[idx]; - NumLeafTasks = NumIterations > 200 ? NUM_CHILD_TASKS / 10 : - (NumIterations > 50 ? NUM_CHILD_TASKS / 2 : NUM_CHILD_TASKS * 2); - Perf::SetWorkloadName( "%d x %d", NumRootTasks * NumLeafTasks, NumIterations ); - } - - void Run ( ThreadInfo& ) { - RunShallowTree(); - } -}; // class Test_ShallowTree - -class LeafTaskSkewed : public tbb::task { - count_t my_ID; - - task* execute () { - volatile count_t anchor = 0; - double K = (double)NumRootTasks * NumLeafTasks; - count_t n = count_t(sqrt(double(my_ID)) * double(my_ID) * my_ID / (4 * K * K)); - for ( count_t i = 0; i < n; ++i ) - anchor += i; - return NULL; - } -public: - LeafTaskSkewed ( count_t id ) : my_ID(id) {} -}; - -class Test_ShallowTree_Skewed : public Perf::Test { - const char* Name () { return "ShallowTree_Skewed"; } - - int NumWorkloads () { return 1; } - - void SetWorkload ( int ) { - NumRootTasks = NUM_ROOT_TASKS; - NumLeafTasks = NUM_CHILD_TASKS; - Perf::SetWorkloadName( "%d", NumRootTasks * NumLeafTasks ); - } - - void Run ( ThreadInfo& ) { - RunShallowTree(); - } -}; // class Test_ShallowTree_Skewed - -typedef tbb::blocked_range range_t; - -static count_t IterRange = N, - IterGrain = 1; - -enum PartitionerType { - SimplePartitioner = 0, - AutoPartitioner = 1 -}; - -class Test_Algs : public Perf::Test { -protected: - static const int numWorkloads = 4; - static const count_t algRanges[numWorkloads]; - static const count_t algGrains[numWorkloads]; - - tbb::simple_partitioner my_simplePartitioner; - tbb::auto_partitioner my_autoPartitioner; - PartitionerType my_partitionerType; - - bool UseAutoPartitioner () const { return my_partitionerType == AutoPartitioner; } - - int NumWorkloads () { return UseAutoPartitioner() ? 3 : numWorkloads; } - - void SetWorkload ( int idx ) { - if ( UseAutoPartitioner() ) { - IterRange = algRanges[idx ? numWorkloads - 1 : 0]; - IterGrain = idx > 1 ? algGrains[numWorkloads - 1] : 1; - } - else { - IterRange = algRanges[idx]; - IterGrain = algGrains[idx]; - } - Perf::SetWorkloadName( "%d / %d", IterRange, IterGrain ); - } -public: - Test_Algs ( PartitionerType pt = SimplePartitioner ) : my_partitionerType(pt) {} -}; // class Test_Algs - -const count_t Test_Algs::algRanges[] = {N_finest, N_fine, N_med, N}; -const count_t Test_Algs::algGrains[] = {1, FINE_GRAIN, MED_GRAIN, COARSE_GRAIN}; - -template -class Test_PFor : public Test_Algs { -protected: - void Run ( ThreadInfo& ) { - if ( UseAutoPartitioner() ) - tbb::parallel_for( range_t(0, IterRange, IterGrain), Body(), my_autoPartitioner ); - else - tbb::parallel_for( range_t(0, IterRange, IterGrain), Body(), my_simplePartitioner ); - } - - void RunSerial ( ThreadInfo& ) { - Body body; - body( range_t(0, IterRange, IterGrain) ); - } -public: - Test_PFor ( PartitionerType pt = SimplePartitioner ) : Test_Algs(pt) {} -}; // class Test_PFor - -class SimpleForBody { -public: - void operator()( const range_t& r ) const { - count_t end = r.end(); - volatile count_t anchor = 0; - for( count_t i = r.begin(); i < end; ++i ) - anchor += i; - } -}; // class SimpleForBody - -class Test_PFor_Simple : public Test_PFor { -protected: - const char* Name () { return UseAutoPartitioner() ? "PFor-AP" : "PFor"; } -public: - Test_PFor_Simple ( PartitionerType pt = SimplePartitioner ) : Test_PFor(pt) {} -}; // class Test_PFor_Simple - -class SkewedForBody { -public: - void operator()( const range_t& r ) const { - count_t end = r.end() * r.end(); - volatile count_t anchor = 0; - for( count_t i = r.begin() * r.begin(); i < end; ++i ) - anchor += i; - } -}; // class SkewedForBody - -class Test_PFor_Skewed : public Test_PFor { - typedef Test_PFor base_type; -protected: - const char* Name () { return UseAutoPartitioner() ? "PFor-Skewed-AP" : "PFor-Skewed"; } - - void SetWorkload ( int idx ) { - base_type::SetWorkload(idx); - IterRange = (count_t)(sqrt((double)IterRange) * sqrt(sqrt((double)N / IterRange))); - Perf::SetWorkloadName( "%d", IterRange ); - } - -public: - Test_PFor_Skewed ( PartitionerType pt = SimplePartitioner ) : base_type(pt) {} -}; // class Test_PFor_Skewed - -PartitionerType gPartitionerType; -count_t NestingRange; -count_t NestingGrain; - -class NestingForBody { - count_t my_depth; - tbb::simple_partitioner my_simplePartitioner; - tbb::auto_partitioner my_autoPartitioner; - - template - void run ( const range_t& r, Partitioner& p ) const { - count_t end = r.end(); - if ( my_depth > 1 ) - for ( count_t i = r.begin(); i < end; ++i ) - tbb::parallel_for( range_t(0, IterRange, IterGrain), NestingForBody(my_depth - 1), p ); - else - for ( count_t i = r.begin(); i < end; ++i ) - tbb::parallel_for( range_t(0, IterRange, IterGrain), SimpleForBody(), p ); - } -public: - void operator()( const range_t& r ) const { - if ( gPartitionerType == AutoPartitioner ) - run( r, my_autoPartitioner ); - else - run( r, my_simplePartitioner ); - } - NestingForBody ( count_t depth = 1 ) : my_depth(depth) {} -}; // class NestingForBody - -enum NestingType { - HollowNesting, - ShallowNesting, - DeepNesting -}; - -class Test_PFor_Nested : public Test_Algs { - typedef Test_Algs base_type; - - NestingType my_nestingType; - count_t my_nestingDepth; - -protected: - const char* Name () { - static const char* names[] = { "PFor-HollowNested", "PFor-HollowNested-AP", - "PFor-ShallowNested", "PFor-ShallowNested-AP", - "PFor-DeeplyNested", "PFor-DeeplyNested-AP" }; - return names[my_nestingType * 2 + my_partitionerType]; - } - - int NumWorkloads () { return my_nestingType == ShallowNesting ? (UseAutoPartitioner() ? 3 : 2) : 1; } - - void SetWorkload ( int idx ) { - gPartitionerType = my_partitionerType; - if ( my_nestingType == DeepNesting ) { - NestingRange = 1024; - IterGrain = NestingGrain = 1; - IterRange = 4; - my_nestingDepth = 4; - } - else if ( my_nestingType == ShallowNesting ) { - int i = idx ? numWorkloads - 1 : 0; - count_t baseRange = algRanges[i]; - count_t baseGrain = !UseAutoPartitioner() || idx > 1 ? algGrains[i] : 1; - NestingRange = IterRange = (count_t)sqrt((double)baseRange); - NestingGrain = IterGrain = (count_t)sqrt((double)baseGrain); - } - else { - NestingRange = N / 100; - NestingGrain = COARSE_GRAIN / 10; - IterRange = 2; - IterGrain = 1; - } - Perf::SetWorkloadName( "%d / %d", NestingRange, NestingGrain ); - } - - void Run ( ThreadInfo& ) { - if ( UseAutoPartitioner() ) - tbb::parallel_for( range_t(0, NestingRange, NestingGrain), NestingForBody(my_nestingDepth), my_autoPartitioner ); - else - tbb::parallel_for( range_t(0, NestingRange, NestingGrain), NestingForBody(my_nestingDepth), my_simplePartitioner ); - } - - void RunSerial ( ThreadInfo& ) { - for ( int i = 0; i < NestingRange; ++i ) { - SimpleForBody body; - body( range_t(0, IterRange, IterGrain) ); - } - } -public: - Test_PFor_Nested ( NestingType nt, PartitionerType pt ) : base_type(pt), my_nestingType(nt), my_nestingDepth(1) {} -}; // class Test_PFor_Nested - -class SimpleReduceBody { -public: - count_t my_sum; - SimpleReduceBody () : my_sum(0) {} - SimpleReduceBody ( SimpleReduceBody&, tbb::split ) : my_sum(0) {} - void join( SimpleReduceBody& rhs ) { my_sum += rhs.my_sum;} - void operator()( const range_t& r ) { - count_t end = r.end(); - volatile count_t anchor = 0; - for( count_t i = r.begin(); i < end; ++i ) - anchor += i; - my_sum = anchor; - } -}; // class SimpleReduceBody - -class Test_PReduce : public Test_Algs { -protected: - const char* Name () { return UseAutoPartitioner() ? "PReduce-AP" : "PReduce"; } - - void Run ( ThreadInfo& ) { - SimpleReduceBody body; - if ( UseAutoPartitioner() ) - tbb::parallel_reduce( range_t(0, IterRange, IterGrain), body, my_autoPartitioner ); - else - tbb::parallel_reduce( range_t(0, IterRange, IterGrain), body, my_simplePartitioner ); - } - - void RunSerial ( ThreadInfo& ) { - SimpleReduceBody body; - body( range_t(0, IterRange, IterGrain) ); - } -public: - Test_PReduce ( PartitionerType pt = SimplePartitioner ) : Test_Algs(pt) {} -}; // class Test_PReduce - -int main( int argc, char* argv[] ) { - Perf::SessionSettings opts (Perf::UseTaskScheduler); // Perf::UseBaseline | Perf::MeasureOverhead - Test_PFor_Nested pf_dn_sp(DeepNesting, SimplePartitioner), pf_dn_ap(DeepNesting, AutoPartitioner); - Perf::RegisterTest(pf_dn_sp); - Perf::RegisterTest(pf_dn_ap); - return Perf::TestMain(argc, argv, &opts); -} diff --git a/deal.II/bundled/tbb30_104oss/src/perf/run_statistics.sh b/deal.II/bundled/tbb30_104oss/src/perf/run_statistics.sh deleted file mode 100644 index 5e1ed14fff..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/perf/run_statistics.sh +++ /dev/null @@ -1,40 +0,0 @@ -#!/bin/bash -# -# Copyright 2005-2010 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. -# -# Threading Building Blocks is free software; you can redistribute it -# and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. -# -# Threading Building Blocks is distributed in the hope that it will be -# useful, but WITHOUT ANY WARRANTY; without even the implied warranty -# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Threading Building Blocks; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software -# library without restriction. Specifically, if other files instantiate -# templates or use macros or inline functions from this file, or you compile -# this file and link it with other files to produce an executable, this -# file does not by itself cause the resulting executable to be covered by -# the GNU General Public License. This exception does not however -# invalidate any other reasons why the executable file might be covered by -# the GNU General Public License. - -export LD_LIBRARY_PATH=.:$LD_LIBRARY_PATH -#setting output format .csv, 'pivot' - is pivot table mode, ++ means append -export STAT_FORMAT=pivot-csv++ -#check existing files because of apend mode -ls *.csv -rm -i *.csv -#setting a delimiter in txt or csv file -#export STAT_DELIMITER=, -export STAT_RUNINFO1=Host=`hostname -s` -#append a suffix after the filename -#export STAT_SUFFIX=$STAT_RUNINFO1 -for ((i=1;i<=${repeat:=100};++i)); do echo $i of $repeat: && STAT_RUNINFO2=Run=$i $* || break; done diff --git a/deal.II/bundled/tbb30_104oss/src/perf/statistics.cpp b/deal.II/bundled/tbb30_104oss/src/perf/statistics.cpp deleted file mode 100644 index 830b89fb68..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/perf/statistics.cpp +++ /dev/null @@ -1,452 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#include "statistics.h" -#include "statistics_xml.h" - -#define COUNT_PARAMETERS 3 - -#ifdef _MSC_VER -#define snprintf _snprintf -#endif - -void GetTime(char* buff,int size_buff) -{ - tm *newtime; - time_t timer; - time(&timer); - newtime=localtime(&timer); - strftime(buff,size_buff,"%H:%M:%S",newtime); -} - -void GetDate(char* buff,int size_buff) -{ - tm *newtime; - time_t timer; - time(&timer); - newtime=localtime(&timer); - strftime(buff,size_buff,"%Y-%m-%d",newtime); -} - - -StatisticsCollector::TestCase StatisticsCollector::SetTestCase(const char *name, const char *mode, int threads) -{ - string KeyName(name); - switch (SortMode) - { - case ByThreads: KeyName += Format("_%02d_%s", threads, mode); break; - default: - case ByAlg: KeyName += Format("_%s_%02d", mode, threads); break; - } - CurrentKey = Statistics[KeyName]; - if(!CurrentKey) { - CurrentKey = new StatisticResults; - CurrentKey->Mode = mode; - CurrentKey->Name = name; - CurrentKey->Threads = threads; - CurrentKey->Results.reserve(RoundTitles.size()); - Statistics[KeyName] = CurrentKey; - } - return TestCase(CurrentKey); -} - -StatisticsCollector::~StatisticsCollector() -{ - for(Statistics_t::iterator i = Statistics.begin(); i != Statistics.end(); i++) - delete i->second; -} - -void StatisticsCollector::ReserveRounds(size_t index) -{ - size_t i = RoundTitles.size(); - if (i > index) return; - char buf[16]; - RoundTitles.resize(index+1); - for(; i <= index; i++) { - snprintf( buf, 15, "%u", unsigned(i+1) ); - RoundTitles[i] = buf; - } - for(Statistics_t::iterator i = Statistics.begin(); i != Statistics.end(); i++) { - if(!i->second) printf("!!!'%s' = NULL\n", i->first.c_str()); - else i->second->Results.reserve(index+1); - } -} - -void StatisticsCollector::AddRoundResult(const TestCase &key, value_t v) -{ - ReserveRounds(key.access->Results.size()); - key.access->Results.push_back(v); -} - -void StatisticsCollector::SetRoundTitle(size_t index, const char *fmt, ...) -{ - vargf2buff(buff, 128, fmt); - ReserveRounds(index); - RoundTitles[index] = buff; -} - -void StatisticsCollector::AddStatisticValue(const TestCase &key, const char *type, const char *fmt, ...) -{ - vargf2buff(buff, 128, fmt); - AnalysisTitles.insert(type); - key.access->Analysis[type] = buff; -} - -void StatisticsCollector::AddStatisticValue(const char *type, const char *fmt, ...) -{ - vargf2buff(buff, 128, fmt); - AnalysisTitles.insert(type); - CurrentKey->Analysis[type] = buff; -} - -void StatisticsCollector::SetRunInfo(const char *title, const char *fmt, ...) -{ - vargf2buff(buff, 256, fmt); - RunInfo.push_back(make_pair(title, buff)); -} - -void StatisticsCollector::SetStatisticFormula(const char *name, const char *formula) -{ - Formulas[name] = formula; -} - -void StatisticsCollector::SetTitle(const char *fmt, ...) -{ - vargf2buff(buff, 256, fmt); - Title = buff; -} - -string ExcelFormula(const string &fmt, size_t place, size_t rounds, bool is_horizontal) -{ - char buff[16]; - if(is_horizontal) - snprintf(buff, 15, "RC[%u]:RC[%u]", unsigned(place), unsigned(place+rounds-1)); - else - snprintf(buff, 15, "R[%u]C:R[%u]C", unsigned(place+1), unsigned(place+rounds)); - string result(fmt); size_t pos = 0; - while ( (pos = result.find("ROUNDS", pos, 6)) != string::npos ) - result.replace(pos, 6, buff); - return result; -} - -void StatisticsCollector::Print(int dataOutput, const char *ModeName) -{ - FILE *OutputFile; - const char *file_suffix = getenv("STAT_SUFFIX"); - if( !file_suffix ) file_suffix = ""; - const char *file_format = getenv("STAT_FORMAT"); - if( file_format ) { - dataOutput = 0; - if( strstr(file_format, "con")||strstr(file_format, "std") ) dataOutput |= StatisticsCollector::Stdout; - if( strstr(file_format, "txt")||strstr(file_format, "csv") ) dataOutput |= StatisticsCollector::TextFile; - if( strstr(file_format, "excel")||strstr(file_format, "xml") ) dataOutput |= StatisticsCollector::ExcelXML; - if( strstr(file_format, "htm") ) dataOutput |= StatisticsCollector::HTMLFile; - if( strstr(file_format, "pivot") ) dataOutput |= StatisticsCollector::PivotMode; - } - for(int i = 1; i < 10; i++) { - string env = Format("STAT_RUNINFO%d", i); - const char *info = getenv(env.c_str()); - if( info ) { - string title(info); - size_t pos = title.find('='); - if( pos != string::npos ) { - env = title.substr(pos+1); - title.resize(pos); - } else env = title; - RunInfo.push_back(make_pair(title, env)); - } - } - - if (dataOutput & StatisticsCollector::Stdout) - { - printf("\n-=# %s #=-\n", Title.c_str()); - if(SortMode == ByThreads) - printf(" Name | # | %s ", ModeName); - else - printf(" Name | %s | # ", ModeName); - for (AnalysisTitles_t::iterator i = AnalysisTitles.begin(); i != AnalysisTitles.end(); i++) - printf("|%s", i->c_str()+1); - - for (Statistics_t::iterator i = Statistics.begin(); i != Statistics.end(); i++) - { - if(SortMode == ByThreads) - printf("\n%12s|% 5d|%6s", i->second->Name.c_str(), i->second->Threads, i->second->Mode.c_str()); - else - printf("\n%12s|%6s|% 5d", i->second->Name.c_str(), i->second->Mode.c_str(), i->second->Threads); - Analysis_t &analisis = i->second->Analysis; - AnalysisTitles_t::iterator t = AnalysisTitles.begin(); - for (Analysis_t::iterator a = analisis.begin(); a != analisis.end(); t++) - { - char fmt[8]; snprintf(fmt, 7, "|%% %us", unsigned(max(size_t(3), t->size()))); - if(*t != a->first) - printf(fmt, ""); - else { - printf(fmt, a->second.c_str()); a++; - } - } - } - printf("\n"); - } - if (dataOutput & StatisticsCollector::TextFile) - { - bool append = false; - const char *file_ext = ".txt"; - if( file_format && strstr(file_format, "++") ) append = true; - if( file_format && strstr(file_format, "csv") ) file_ext = ".csv"; - if ((OutputFile = fopen((Name+file_suffix+file_ext).c_str(), append?"at":"wt")) == NULL) { - printf("Can't open .txt file\n"); - } else { - const char *delim = getenv("STAT_DELIMITER"); - if( !delim || !delim[0] ) { - if( file_format && strstr(file_format, "csv") ) delim = ","; - else delim = "\t"; - } - if( !append || !ftell(OutputFile) ) { // header needed - append = false; - if(SortMode == ByThreads) fprintf(OutputFile, "Name%s#%s%s", delim, delim, ModeName); - else fprintf(OutputFile, "Name%s%s%s#", delim, ModeName, delim); - for( size_t k = 0; k < RunInfo.size(); k++ ) - fprintf(OutputFile, "%s%s", delim, RunInfo[k].first.c_str()); - } - if(dataOutput & StatisticsCollector::PivotMode) { - if( !append) fprintf(OutputFile, "%sColumn%sValue", delim, delim); - for (Statistics_t::iterator i = Statistics.begin(); i != Statistics.end(); i++) - { - string RowHead; - if(SortMode == ByThreads) - RowHead = Format("\n%s%s%d%s%s%s", i->second->Name.c_str(), delim, i->second->Threads, delim, i->second->Mode.c_str(), delim); - else - RowHead = Format("\n%s%s%s%s%d%s", i->second->Name.c_str(), delim, i->second->Mode.c_str(), delim, i->second->Threads, delim); - for( size_t k = 0; k < RunInfo.size(); k++ ) - RowHead.append(RunInfo[k].second + delim); - Analysis_t &analisis = i->second->Analysis; - for (Analysis_t::iterator a = analisis.begin(); a != analisis.end(); ++a) - fprintf(OutputFile, "%s%s%s%s", RowHead.c_str(), a->first.c_str(), delim, a->second.c_str()); - Results_t &r = i->second->Results; - for (size_t k = 0; k < r.size(); k++) { - fprintf(OutputFile, "%s%s%s", RowHead.c_str(), RoundTitles[k].c_str(), delim); - fprintf(OutputFile, ResultsFmt, r[k]); - } - } - } else { - if( !append ) { - for( size_t k = 0; k < RunInfo.size(); k++ ) - fprintf(OutputFile, "%s%s", delim, RunInfo[k].first.c_str()); - for (AnalysisTitles_t::iterator i = AnalysisTitles.begin(); i != AnalysisTitles.end(); i++) - fprintf(OutputFile, "%s%s", delim, i->c_str()+1); - for (size_t i = 0; i < RoundTitles.size(); i++) - fprintf(OutputFile, "%s%s", delim, RoundTitles[i].c_str()); - } - for (Statistics_t::iterator i = Statistics.begin(); i != Statistics.end(); i++) - { - if(SortMode == ByThreads) - fprintf(OutputFile, "\n%s%s%d%s%s", i->second->Name.c_str(), delim, i->second->Threads, delim, i->second->Mode.c_str()); - else - fprintf(OutputFile, "\n%s%s%s%s%d", i->second->Name.c_str(), delim, i->second->Mode.c_str(), delim, i->second->Threads); - for( size_t k = 0; k < RunInfo.size(); k++ ) - fprintf(OutputFile, "%s%s", delim, RunInfo[k].second.c_str()); - Analysis_t &analisis = i->second->Analysis; - AnalysisTitles_t::iterator t = AnalysisTitles.begin(); - for (Analysis_t::iterator a = analisis.begin(); a != analisis.end(); ++t) { - fprintf(OutputFile, "%s", delim); - if(*t == a->first) { - fprintf(OutputFile, "%s", a->second.c_str()); ++a; - } - } - //data - Results_t &r = i->second->Results; - for (size_t k = 0; k < r.size(); k++) - { - fprintf(OutputFile, "%s", delim); - fprintf(OutputFile, ResultsFmt, r[k]); - } - } - } - fprintf(OutputFile, "\n"); - fclose(OutputFile); - } - } - if (dataOutput & StatisticsCollector::HTMLFile) - { - if ((OutputFile = fopen((Name+file_suffix+".html").c_str(), "w+t")) == NULL) { - printf("Can't open .html file\n"); - } else { - char TimerBuff[100], DateBuff[100]; - GetTime(TimerBuff,sizeof(TimerBuff)); - GetDate(DateBuff,sizeof(DateBuff)); - fprintf(OutputFile, "\n%s\n\n", Title.c_str()); - //----------------------- - fprintf(OutputFile, "\n"); - fprintf(OutputFile, "" - "\n", ModeName); - for (AnalysisTitles_t::iterator i = AnalysisTitles.begin(); i != AnalysisTitles.end(); i++) - fprintf(OutputFile, "", i->c_str()+1); - for (size_t i = 0; i < RoundTitles.size(); i++) - fprintf(OutputFile, "", RoundTitles[i].c_str()); - for (Statistics_t::iterator i = Statistics.begin(); i != Statistics.end(); i++) - { - fprintf(OutputFile, "\n", - i->second->Name.c_str(), i->second->Threads, i->second->Mode.c_str()); - //statistics - AnalysisTitles_t::iterator t = AnalysisTitles.begin(); - for (Analysis_t::iterator j = i->second->Analysis.begin(); j != i->second->Analysis.end(); t++) - { - fprintf(OutputFile, "", (*t != j->first)?" ":(i->second->Analysis[j->first]).c_str()); - if(*t == j->first) j++; - } - //data - Results_t &r = i->second->Results; - for (size_t k = 0; k < r.size(); k++) - { - fprintf(OutputFile, ""); - } - } - fprintf(OutputFile, "\n
Flip[H]%s%s%s", - DateBuff, TimerBuff, unsigned(AnalysisTitles.size() + RoundTitles.size()), Title.c_str()); - for( size_t k = 0; k < RunInfo.size(); k++ ) - fprintf(OutputFile, "; %s: %s", RunInfo[k].first.c_str(), RunInfo[k].second.c_str()); - fprintf(OutputFile, "
NameThreads%s%s%s
%s%d%4s%s"); - fprintf(OutputFile, ResultsFmt, r[k]); - fprintf(OutputFile, "
\n"); - ////////////////////////////////////////////////////// - fprintf(OutputFile, "\n"); - fprintf(OutputFile, "\n" - "", - DateBuff, TimerBuff, unsigned(max(Statistics.size()-2,size_t(1))), Title.c_str()); - - fprintf(OutputFile, "\n"); - for (Statistics_t::iterator i = Statistics.begin(); i != Statistics.end(); i++) - fprintf(OutputFile, "", i->second->Name.c_str()); - fprintf(OutputFile, "\n"); - for (Statistics_t::iterator n = Statistics.begin(); n != Statistics.end(); n++) - fprintf(OutputFile, "", n->second->Threads); - fprintf(OutputFile, "\n", ModeName); - for (Statistics_t::iterator m = Statistics.begin(); m != Statistics.end(); m++) - fprintf(OutputFile, "", m->second->Mode.c_str()); - - for (AnalysisTitles_t::iterator t = AnalysisTitles.begin(); t != AnalysisTitles.end(); t++) - { - fprintf(OutputFile, "\n", t->c_str()+1); - for (Statistics_t::iterator i = Statistics.begin(); i != Statistics.end(); i++) - fprintf(OutputFile, "", i->second->Analysis.count(*t)?i->second->Analysis[*t].c_str():" "); - } - - for (size_t r = 0; r < RoundTitles.size(); r++) - { - fprintf(OutputFile, "\n", RoundTitles[r].c_str()); - for (Statistics_t::iterator i = Statistics.begin(); i != Statistics.end(); i++) - { - Results_t &result = i->second->Results; - fprintf(OutputFile, ""); - } - } - fprintf(OutputFile, "\n
Flip[V]%s%s%s
Name%s
Threads%d
%s%s
%s%s
%s"); - if(result.size() > r) - fprintf(OutputFile, ResultsFmt, result[r]); - fprintf(OutputFile, "
\n\n"); - fclose(OutputFile); - } - } - if (dataOutput & StatisticsCollector::ExcelXML) - { - if ((OutputFile = fopen((Name+file_suffix+".xml").c_str(), "w+t")) == NULL) { - printf("Can't open .xml file\n"); - } else { - // TODO:PivotMode - char UserName[100]; - char TimerBuff[100], DateBuff[100]; -#if _WIN32 || _WIN64 - strcpy(UserName,getenv("USERNAME")); -#else - strcpy(UserName,getenv("USER")); -#endif - //-------------------------------- - GetTime(TimerBuff,sizeof(TimerBuff)); - GetDate(DateBuff,sizeof(DateBuff)); - //-------------------------- - fprintf(OutputFile, XMLHead, UserName, TimerBuff); - fprintf(OutputFile, XMLStyles); - fprintf(OutputFile, XMLBeginSheet, "Horizontal"); - fprintf(OutputFile, XMLNames,1,1,1,int(AnalysisTitles.size()+Formulas.size()+COUNT_PARAMETERS)); - fprintf(OutputFile, XMLBeginTable, int(RoundTitles.size()+Formulas.size()+AnalysisTitles.size()+COUNT_PARAMETERS+1/*title*/), int(Statistics.size()+1)); - fprintf(OutputFile, XMLBRow); - fprintf(OutputFile, XMLCellTopName); - fprintf(OutputFile, XMLCellTopThread); - fprintf(OutputFile, XMLCellTopMode, ModeName); - for (AnalysisTitles_t::iterator j = AnalysisTitles.begin(); j != AnalysisTitles.end(); j++) - fprintf(OutputFile, XMLAnalysisTitle, j->c_str()+1); - for (Formulas_t::iterator j = Formulas.begin(); j != Formulas.end(); j++) - fprintf(OutputFile, XMLAnalysisTitle, j->first.c_str()+1); - for (RoundTitles_t::iterator j = RoundTitles.begin(); j != RoundTitles.end(); j++) - fprintf(OutputFile, XMLAnalysisTitle, j->c_str()); - string Info = Title; - for( size_t k = 0; k < RunInfo.size(); k++ ) - Info.append("; " + RunInfo[k].first + "=" + RunInfo[k].second); - fprintf(OutputFile, XMLCellEmptyWhite, Info.c_str()); - fprintf(OutputFile, XMLERow); - //------------------------ - for (Statistics_t::iterator i = Statistics.begin(); i != Statistics.end(); i++) - { - fprintf(OutputFile, XMLBRow); - fprintf(OutputFile, XMLCellName, i->second->Name.c_str()); - fprintf(OutputFile, XMLCellThread,i->second->Threads); - fprintf(OutputFile, XMLCellMode, i->second->Mode.c_str()); - //statistics - AnalysisTitles_t::iterator at = AnalysisTitles.begin(); - for (Analysis_t::iterator j = i->second->Analysis.begin(); j != i->second->Analysis.end(); at++) - { - fprintf(OutputFile, XMLCellAnalysis, (*at != j->first)?"":(i->second->Analysis[j->first]).c_str()); - if(*at == j->first) j++; - } - //formulas - size_t place = 0; - Results_t &v = i->second->Results; - for (Formulas_t::iterator f = Formulas.begin(); f != Formulas.end(); f++, place++) - fprintf(OutputFile, XMLCellFormula, ExcelFormula(f->second, Formulas.size()-place, v.size(), true).c_str()); - //data - for (size_t k = 0; k < v.size(); k++) - { - fprintf(OutputFile, XMLCellData, v[k]); - } - if(v.size() < RoundTitles.size()) - fprintf(OutputFile, XMLMergeRow, int(RoundTitles.size() - v.size())); - fprintf(OutputFile, XMLERow); - } - //------------------------ - fprintf(OutputFile, XMLEndTable); - fprintf(OutputFile, XMLWorkSheetProperties,1,1,3,3,int(RoundTitles.size()+AnalysisTitles.size()+Formulas.size()+COUNT_PARAMETERS)); - fprintf(OutputFile, XMLAutoFilter,1,1,1,int(AnalysisTitles.size()+Formulas.size()+COUNT_PARAMETERS)); - fprintf(OutputFile, XMLEndWorkSheet); - //---------------------------------------- - fprintf(OutputFile, XMLEndWorkbook); - fclose(OutputFile); - } - } -} diff --git a/deal.II/bundled/tbb30_104oss/src/perf/statistics.h b/deal.II/bundled/tbb30_104oss/src/perf/statistics.h deleted file mode 100644 index 5279bd8f53..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/perf/statistics.h +++ /dev/null @@ -1,194 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -// Internal Intel tool - -#ifndef __STATISTICS_H__ -#define __STATISTICS_H__ - -#define _CRT_SECURE_NO_DEPRECATE 1 - -#include -#include -#include -#include -#include -#include -#include -#include - -using namespace std; -typedef double value_t; - -/* - Statistical collector class. - - Resulting table output: - +---------------------------------------------------------------------------+ - | [Date] ... | - +----------+----v----+--v---+----------------+------------+-..-+------------+ - | TestName | Threads | Mode | Rounds results | Stat_type1 | .. | Stat_typeN | - +----------+---------+------+-+-+-+-..-+-+-+-+------------+-..-+------------+ - | | | | | | | .. | | | | | | | - .. ... ... .................. ...... .. - | | | | | | | .. | | | | | | | - +----------+---------+------+-+-+-+-..-+-+-+-+------------+-..-+------------+ - - Iterating table output: - +---------------------------------------------------------------------------+ - | [Date] <TestName>, Threads: <N>, Mode: <M>; for <Title>... | - +----------+----v----+--v---+----------------+------------+-..-+------------+ - -*/ - -class StatisticsCollector -{ -public: - typedef map<string, string> Analysis_t; - typedef vector<value_t> Results_t; - -protected: - StatisticsCollector(const StatisticsCollector &); - - struct StatisticResults - { - string Name; - string Mode; - int Threads; - Results_t Results; - Analysis_t Analysis; - }; - - // internal members - //bool OpenFile; - StatisticResults *CurrentKey; - string Title; - const char /**Name,*/ *ResultsFmt; - string Name; - //! Data - typedef map<string, StatisticResults*> Statistics_t; - Statistics_t Statistics; - typedef vector<string> RoundTitles_t; - RoundTitles_t RoundTitles; - //TODO: merge those into one structure - typedef map<string, string> Formulas_t; - Formulas_t Formulas; - typedef set<string> AnalysisTitles_t; - AnalysisTitles_t AnalysisTitles; - typedef vector<pair<string, string> > RunInfo_t; - RunInfo_t RunInfo; - -public: - struct TestCase { - StatisticResults *access; - TestCase() : access(0) {} - TestCase(StatisticResults *link) : access(link) {} - const char *getName() const { return access->Name.c_str(); } - const char *getMode() const { return access->Mode.c_str(); } - int getThreads() const { return access->Threads; } - const Results_t &getResults() const { return access->Results; } - const Analysis_t &getAnalysis() const { return access->Analysis; } - }; - - enum Sorting { - ByThreads, ByAlg - }; - - //! Data and output types - enum DataOutput { - // Verbosity level enumeration - Statistic = 1, //< Analytical data - computed after all iterations and rounds passed - Result = 2, //< Testing data - collected after all iterations passed - Iteration = 3, //< Verbose data - collected at each iteration (for each size - in case of containers) - // ExtraVerbose is not applicabe yet :) be happy, but flexibility is always welcome - - // Next constants are bit-fields - Stdout = 1<<8, //< Output to the console - TextFile = 1<<9, //< Output to plain text file "name.txt" (delimiter is TAB by default) - ExcelXML = 1<<10, //< Output to Excel-readable XML-file "name.xml" - HTMLFile = 1<<11, //< Output to HTML file "name.html" - PivotMode= 1<<15 //< Puts all the rounds into one columt to better fit for pivot table in Excel - }; - - //! Constructor. Specify tests set name which used as name of output files - StatisticsCollector(const char *name, Sorting mode = ByThreads, const char *fmt = "%g") - : CurrentKey(NULL), ResultsFmt(fmt), Name(name), SortMode(mode) {} - - ~StatisticsCollector(); - - //! Set tests set title, supporting printf-like arguments - void SetTitle(const char *fmt, ...); - - //! Specify next test key - TestCase SetTestCase(const char *name, const char *mode, int threads); - //! Specify next test key - void SetTestCase(const TestCase &t) { SetTestCase(t.getName(), t.getMode(), t.getThreads()); } - //! Reserve specified number of rounds. Use for effeciency. Used mostly internally - void ReserveRounds(size_t index); - //! Add result of the measure - void AddRoundResult(const TestCase &, value_t v); - //! Add result of the current measure - void AddRoundResult(value_t v) { if(CurrentKey) AddRoundResult(TestCase(CurrentKey), v); } - //! Add title of round - void SetRoundTitle(size_t index, const char *fmt, ...); - //! Add numbered title of round - void SetRoundTitle(size_t index, int num) { SetRoundTitle(index, "%d", num); } - //! Get number of rounds - size_t GetRoundsCount() const { return RoundTitles.size(); } - // Set statistic value for the test - void AddStatisticValue(const TestCase &, const char *type, const char *fmt, ...); - // Set statistic value for the current test - void AddStatisticValue(const char *type, const char *fmt, ...); - //! Add Excel-processing formulas. @arg formula can contain more than one instances of - //! ROUNDS template which transforms into the range of cells with result values - //TODO://! #1 .. #n templates represent data cells from the first to the last - //TODO: merge with Analisis - void SetStatisticFormula(const char *name, const char *formula); - //! Add information about run or compile parameters - void SetRunInfo(const char *title, const char *fmt, ...); - void SetRunInfo(const char *title, int num) { SetRunInfo(title, "%d", num); } - - //! Data output - void Print(int dataOutput, const char *ModeName = "Mode"); - -private: - Sorting SortMode; -}; - -//! using: Func(const char *fmt, ...) { vargf2buff(buff, 128, fmt);... -#define vargf2buff(name, size, fmt) char name[size]; memset(name, 0, size); va_list args; va_start(args, fmt); vsnprintf( name, size-1, fmt, args) - -inline std::string Format(const char *fmt, ...) { - vargf2buff(buf, 1024, fmt); // from statistics.h - return std::string(buf); -} - -#ifdef STATISTICS_INLINE -#include "statistics.cpp" -#endif -#endif //__STATISTICS_H__ diff --git a/deal.II/bundled/tbb30_104oss/src/perf/statistics_xml.h b/deal.II/bundled/tbb30_104oss/src/perf/statistics_xml.h deleted file mode 100644 index 7be259e3ca..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/perf/statistics_xml.h +++ /dev/null @@ -1,208 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -const char XMLBRow[]= -" <Row>\n"; - -const char XMLERow[]= -" </Row>\n"; - -const char XMLHead[]= -"<?xml version=\"1.0\"?>\n" -"<?mso-application progid=\"Excel.Sheet\"?>\n\ -<Workbook xmlns=\"urn:schemas-microsoft-com:office:spreadsheet\"\n\ - xmlns:o=\"urn:schemas-microsoft-com:office:office\"\n\ - xmlns:x=\"urn:schemas-microsoft-com:office:excel\"\n\ - xmlns:ss=\"urn:schemas-microsoft-com:office:spreadsheet\"\n\ - xmlns:html=\"http://www.w3.org/TR/REC-html40\">\n\ - <DocumentProperties xmlns=\"urn:schemas-microsoft-com:office:office\">\n\ - <Author>%s</Author>\n\ - <Created>%s</Created>\n\ - <Company>Intel Corporation</Company>\n\ - </DocumentProperties>\n\ - <ExcelWorkbook xmlns=\"urn:schemas-microsoft-com:office:excel\">\n\ - <RefModeR1C1/>\n\ - </ExcelWorkbook>\n"; - - const char XMLStyles[]= - " <Styles>\n\ - <Style ss:ID=\"Default\" ss:Name=\"Normal\">\n\ - <Alignment ss:Vertical=\"Bottom\" ss:Horizontal=\"Left\" ss:WrapText=\"0\"/>\n\ - </Style>\n\ - <Style ss:ID=\"s26\">\n\ - <Alignment ss:Vertical=\"Top\" ss:Horizontal=\"Left\" ss:WrapText=\"0\"/>\n\ - <Borders>\n\ - <Border ss:Position=\"Bottom\" ss:LineStyle=\"Continuous\" ss:Weight=\"1\"/>\n\ - <Border ss:Position=\"Left\" ss:LineStyle=\"Continuous\" ss:Weight=\"1\"/>\n\ - <Border ss:Position=\"Right\" ss:LineStyle=\"Continuous\" ss:Weight=\"1\"/>\n\ - <Border ss:Position=\"Top\" ss:LineStyle=\"Continuous\" ss:Weight=\"1\"/>\n\ - </Borders>\n\ - <Interior ss:Color=\"#FFFF99\" ss:Pattern=\"Solid\"/>\n\ - </Style>\n\ - <Style ss:ID=\"s25\">\n\ - <Alignment ss:Vertical=\"Top\" ss:Horizontal=\"Left\" ss:WrapText=\"0\"/>\n\ - <Borders>\n\ - <Border ss:Position=\"Bottom\" ss:LineStyle=\"Continuous\" ss:Weight=\"1\"/>\n\ - <Border ss:Position=\"Left\" ss:LineStyle=\"Continuous\" ss:Weight=\"1\"/>\n\ - <Border ss:Position=\"Right\" ss:LineStyle=\"Continuous\" ss:Weight=\"1\"/>\n\ - <Border ss:Position=\"Top\" ss:LineStyle=\"Continuous\" ss:Weight=\"1\"/>\n\ - </Borders>\n\ - <Interior ss:Color=\"#CCFFFF\" ss:Pattern=\"Solid\"/>\n\ - </Style>\n\ - <Style ss:ID=\"s24\">\n\ - <Alignment ss:Vertical=\"Top\" ss:Horizontal=\"Left\" ss:WrapText=\"0\"/>\n\ - <Borders>\n\ - <Border ss:Position=\"Bottom\" ss:LineStyle=\"Continuous\" ss:Weight=\"1\"/>\n\ - <Border ss:Position=\"Left\" ss:LineStyle=\"Continuous\" ss:Weight=\"1\"/>\n\ - <Border ss:Position=\"Right\" ss:LineStyle=\"Continuous\" ss:Weight=\"1\"/>\n\ - <Border ss:Position=\"Top\" ss:LineStyle=\"Continuous\" ss:Weight=\"1\"/>\n\ - </Borders>\n\ - <Interior ss:Color=\"#CCFFCC\" ss:Pattern=\"Solid\"/>\n\ - </Style>\n\ - <Style ss:ID=\"s23\">\n\ - <Alignment ss:Vertical=\"Top\" ss:Horizontal=\"Left\" ss:WrapText=\"0\"/>\n\ - <Borders>\n\ - <Border ss:Position=\"Bottom\" ss:LineStyle=\"Continuous\" ss:Weight=\"1\"/>\n\ - <Border ss:Position=\"Left\" ss:LineStyle=\"Continuous\" ss:Weight=\"1\"/>\n\ - <Border ss:Position=\"Right\" ss:LineStyle=\"Continuous\" ss:Weight=\"1\"/>\n\ - <Border ss:Position=\"Top\" ss:LineStyle=\"Continuous\" ss:Weight=\"1\"/>\n\ - </Borders>\n\ - </Style>\n\ - </Styles>\n"; - -const char XMLBeginSheet[]= -" <Worksheet ss:Name=\"%s\">\n"; - -const char XMLNames[]= -" <Names>\n\ - <NamedRange ss:Name=\"_FilterDatabase\" ss:RefersTo=\"R%dC%d:R%dC%d\" ss:Hidden=\"1\"/>\n\ - </Names>\n"; - -const char XMLBeginTable[]= -" <Table ss:ExpandedColumnCount=\"%d\" ss:ExpandedRowCount=\"%d\" x:FullColumns=\"1\"\n\ - x:FullRows=\"1\">\n"; - -const char XMLColumsHorizontalTable[]= -" <Column ss:Index=\"1\" ss:Width=\"108.75\"/>\n\ - <Column ss:Index=\"%d\" ss:Width=\"77.25\" ss:Span=\"%d\"/>\n"; - -const char XMLColumsVerticalTable[]= -" <Column ss:Index=\"1\" ss:Width=\"77.25\" ss:Span=\"%d\"/>\n"; - -const char XMLNameAndTime[]= -" <Cell><Data ss:Type=\"String\">%s</Data></Cell>\n\ - <Cell><Data ss:Type=\"String\">%s</Data></Cell>\n\ - <Cell><Data ss:Type=\"String\">%s</Data></Cell>\n"; - -const char XMLTableParamAndTitle[]= -" <Cell><Data ss:Type=\"Number\">%d</Data></Cell>\n\ - <Cell><Data ss:Type=\"Number\">%d</Data></Cell>\n\ - <Cell><Data ss:Type=\"Number\">%d</Data></Cell>\n\ - <Cell><Data ss:Type=\"String\">%s</Data></Cell>\n"; - -//-------------- -const char XMLCellTopName[]= -" <Cell ss:StyleID=\"s25\"><Data ss:Type=\"String\">Name</Data></Cell>\n"; -const char XMLCellTopThread[]= -" <Cell ss:StyleID=\"s25\"><Data ss:Type=\"String\">Threads</Data></Cell>\n"; -const char XMLCellTopMode[]= -" <Cell ss:StyleID=\"s25\"><Data ss:Type=\"String\">%s</Data></Cell>\n"; -//--------------------- -const char XMLAnalysisTitle[]= -" <Cell ss:StyleID=\"s25\"><Data ss:Type=\"String\">%s</Data></Cell>\n"; - -const char XMLCellName[]= -" <Cell ss:StyleID=\"s24\"><Data ss:Type=\"String\">%s</Data></Cell>\n"; - -const char XMLCellThread[]= -" <Cell ss:StyleID=\"s24\"><Data ss:Type=\"Number\">%d</Data></Cell>\n"; - -const char XMLCellMode[]= -" <Cell ss:StyleID=\"s24\"><Data ss:Type=\"String\">%s</Data></Cell>\n"; - -const char XMLCellAnalysis[]= -" <Cell ss:StyleID=\"s26\"><Data ss:Type=\"String\">%s</Data></Cell>\n"; - -const char XMLCellFormula[]= -" <Cell ss:StyleID=\"s26\" ss:Formula=\"%s\"><Data ss:Type=\"Number\"></Data></Cell>\n"; - -const char XMLCellData[]= -" <Cell ss:StyleID=\"s23\"><Data ss:Type=\"Number\">%g</Data></Cell>\n"; - -const char XMLMergeRow[]= -" <Cell ss:StyleID=\"s23\" ss:MergeAcross=\"%d\" ><Data ss:Type=\"String\"></Data></Cell>\n"; - -const char XMLCellEmptyWhite[]= -" <Cell><Data ss:Type=\"String\">%s</Data></Cell>\n"; - -const char XMLCellEmptyTitle[]= -" <Cell ss:StyleID=\"s25\"><Data ss:Type=\"String\"></Data></Cell>\n"; - -const char XMLEndTable[]= -" </Table>\n"; - -const char XMLAutoFilter[]= -" <AutoFilter x:Range=\"R%dC%d:R%dC%d\" xmlns=\"urn:schemas-microsoft-com:office:excel\">\n\ - </AutoFilter>\n"; - -const char XMLEndWorkSheet[]= - " </Worksheet>\n"; - -const char XMLWorkSheetProperties[]= -" <WorksheetOptions xmlns=\"urn:schemas-microsoft-com:office:excel\">\n\ - <Unsynced/>\n\ - <Selected/>\n\ - <FreezePanes/>\n\ - <FrozenNoSplit/>\n\ - <SplitHorizontal>%d</SplitHorizontal>\n\ - <TopRowBottomPane>%d</TopRowBottomPane>\n\ - <SplitVertical>%d</SplitVertical>\n\ - <LeftColumnRightPane>%d</LeftColumnRightPane>\n\ - <ActivePane>0</ActivePane>\n\ - <Panes>\n\ - <Pane>\n\ - <Number>3</Number>\n\ - </Pane>\n\ - <Pane>\n\ - <Number>1</Number>\n\ - </Pane>\n\ - <Pane>\n\ - <Number>2</Number>\n\ - </Pane>\n\ - <Pane>\n\ - <Number>0</Number>\n\ - <ActiveRow>0</ActiveRow>\n\ - <ActiveCol>%d</ActiveCol>\n\ - </Pane>\n\ - </Panes>\n\ - <ProtectObjects>False</ProtectObjects>\n\ - <ProtectScenarios>False</ProtectScenarios>\n\ - </WorksheetOptions>\n"; - -const char XMLEndWorkbook[]= - "</Workbook>\n"; diff --git a/deal.II/bundled/tbb30_104oss/src/perf/time_framework.h b/deal.II/bundled/tbb30_104oss/src/perf/time_framework.h deleted file mode 100644 index adc9b79991..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/perf/time_framework.h +++ /dev/null @@ -1,359 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TIME_FRAMEWORK_H__ -#define __TIME_FRAMEWORK_H__ - -#include <cstdlib> -#include <math.h> -#include <vector> -#include <string> -#include <sstream> -#include "tbb/tbb_stddef.h" -#include "tbb/task_scheduler_init.h" -#include "tbb/tick_count.h" -#define HARNESS_CUSTOM_MAIN 1 -#include "../test/harness.h" -#include "../test/harness_barrier.h" -#define STATISTICS_INLINE -#include "statistics.h" - -#ifndef ARG_TYPE -typedef intptr_t arg_t; -#else -typedef ARG_TYPE arg_t; -#endif - -class Timer { - tbb::tick_count tick; -public: - Timer() { tick = tbb::tick_count::now(); } - double get_time() { return (tbb::tick_count::now() - tick).seconds(); } - double diff_time(const Timer &newer) { return (newer.tick - tick).seconds(); } - double mark_time() { tbb::tick_count t1(tbb::tick_count::now()), t2(tick); tick = t1; return (t1 - t2).seconds(); } - double mark_time(const Timer &newer) { tbb::tick_count t(tick); tick = newer.tick; return (tick - t).seconds(); } -}; - -class TesterBase /*: public tbb::internal::no_copy*/ { -protected: - friend class TestProcessor; - friend class TestRunner; - - //! it is barrier for synchronizing between threads - Harness::SpinBarrier *barrier; - - //! number of tests per this tester - const int tests_count; - - //! number of threads to operate - int threads_count; - - //! some value for tester - arg_t value; - - //! tester name - const char *tester_name; - - // avoid false sharing - char pad[128 - sizeof(arg_t) - sizeof(int)*2 - sizeof(void*)*2 ]; - -public: - //! init tester base. @arg ntests is number of embeded tests in this tester. - TesterBase(int ntests) - : barrier(NULL), tests_count(ntests) - {} - virtual ~TesterBase() {} - - //! internal function - void base_init(arg_t v, int t, Harness::SpinBarrier &b) { - threads_count = t; - barrier = &b; - value = v; - init(); - } - - //! optionally override to init after value and threads count were set. - virtual void init() { } - - //! Override to provide your names - virtual std::string get_name(int testn) { - return Format("test %d", testn); - } - - //! optionally override to init test mode just before execution for a given thread number. - virtual void test_prefix(int testn, int threadn) { } - - //! Override to provide main test's entry function returns a value to record - virtual value_t test(int testn, int threadn) = 0; - - //! Type of aggregation from results of threads - enum result_t { - SUM, AVG, MIN, MAX - }; - - //! Override to change result type for the test. Return postfix for test name or 0 if result type is not needed. - virtual const char *get_result_type(int /*testn*/, result_t type) const { - return type == AVG ? "" : 0; // only average result by default - } -}; - -/***** -a user's tester concept: - -class tester: public TesterBase { -public: - //! init tester with known amount of work - tester() : TesterBase(<user-specified tests count>) { ... } - - //! run a test with sequental number @arg test_number for @arg thread. - / *override* / value_t test(int test_number, int thread); -}; - -******/ - -template<typename Tester, int scale = 1> -class TimeTest : public Tester { - /*override*/ value_t test(int testn, int threadn) { - Timer timer; - Tester::test(testn, threadn); - return timer.get_time() * double(scale); - } -}; - -template<typename Tester> -class NanosecPerValue : public Tester { - /*override*/ value_t test(int testn, int threadn) { - Timer timer; - Tester::test(testn, threadn); - // return time (ns) per value - return timer.get_time()*1000000.0/double(Tester::value); - } -}; - -template<typename Tester, int scale = 1> -class ValuePerSecond : public Tester { - /*override*/ value_t test(int testn, int threadn) { - Timer timer; - Tester::test(testn, threadn); - // return value per seconds/scale - return double(Tester::value)/(timer.get_time()*scale); - } -}; - -template<typename Tester, int scale = 1> -class NumberPerSecond : public Tester { - /*override*/ value_t test(int testn, int threadn) { - Timer timer; - Tester::test(testn, threadn); - // return a scale per seconds - return double(scale)/timer.get_time(); - } -}; - -// operate with single tester -class TestRunner { - friend class TestProcessor; - friend struct RunArgsBody; - TestRunner(const TestRunner &); // don't copy - - const char *tester_name; - StatisticsCollector *stat; - std::vector<std::vector<StatisticsCollector::TestCase> > keys; - -public: - TesterBase &tester; - - template<typename Test> - TestRunner(const char *name, Test *test) - : tester_name(name), tester(*static_cast<TesterBase*>(test)) - { - test->tester_name = name; - } - - ~TestRunner() { delete &tester; } - - void init(arg_t value, int threads, Harness::SpinBarrier &barrier, StatisticsCollector *s) { - tester.base_init(value, threads, barrier); - stat = s; - keys.resize(tester.tests_count); - for(int testn = 0; testn < tester.tests_count; testn++) { - keys[testn].resize(threads); - std::string test_name(tester.get_name(testn)); - for(int threadn = 0; threadn < threads; threadn++) - keys[testn][threadn] = stat->SetTestCase(tester_name, test_name.c_str(), threadn); - } - } - - void run_test(int threadn) { - for(int testn = 0; testn < tester.tests_count; testn++) { - tester.test_prefix(testn, threadn); - tester.barrier->wait(); // <<<<<<<<<<<<<<<<< Barrier before running test mode - value_t result = tester.test(testn, threadn); - stat->AddRoundResult(keys[testn][threadn], result); - } - } - - void post_process(StatisticsCollector &report) { - const int threads = tester.threads_count; - for(int testn = 0; testn < tester.tests_count; testn++) { - size_t coln = keys[testn][0].getResults().size()-1; - value_t rsum = keys[testn][0].getResults()[coln]; - value_t rmin = rsum, rmax = rsum; - for(int threadn = 1; threadn < threads; threadn++) { - value_t result = keys[testn][threadn].getResults()[coln]; - rsum += result; // for both SUM or AVG - if(rmin > result) rmin = result; - if(rmax < result) rmax = result; - } - std::string test_name(tester.get_name(testn)); - const char *rname = tester.get_result_type(testn, TesterBase::SUM); - if( rname ) { - report.SetTestCase(tester_name, (test_name+rname).c_str(), threads); - report.AddRoundResult(rsum); - } - rname = tester.get_result_type(testn, TesterBase::MIN); - if( rname ) { - report.SetTestCase(tester_name, (test_name+rname).c_str(), threads); - report.AddRoundResult(rmin); - } - rname = tester.get_result_type(testn, TesterBase::AVG); - if( rname ) { - report.SetTestCase(tester_name, (test_name+rname).c_str(), threads); - report.AddRoundResult(rsum / threads); - } - rname = tester.get_result_type(testn, TesterBase::MAX); - if( rname ) { - report.SetTestCase(tester_name, (test_name+rname).c_str(), threads); - report.AddRoundResult(rmax); - } - } - } -}; - -struct RunArgsBody { - const vector<TestRunner*> &run_list; - RunArgsBody(const vector<TestRunner*> &a) : run_list(a) { } -#ifndef __TBB_parallel_for_H - void operator()(int thread) const { -#else - void operator()(const tbb::blocked_range<int> &r) const { - ASSERT( r.begin() + 1 == r.end(), 0); - int thread = r.begin(); -#endif - for(size_t i = 0; i < run_list.size(); i++) - run_list[i]->run_test(thread); - } -}; - -//! Main test processor. -/** Override or use like this: - class MyTestCollection : public TestProcessor { - void factory(arg_t value, int threads) { - process( value, threads, - run("my1", new tester<my1>() ), - run("my2", new tester<my2>() ), - end ); - if(value == threads) - stat->Print(); - } -}; -*/ - -class TestProcessor { - friend class TesterBase; - - // <threads, collector> - typedef std::map<int, StatisticsCollector *> statistics_collection; - statistics_collection stat_by_threads; - -protected: - // Members - const char *collection_name; - // current stat - StatisticsCollector *stat; - // token - size_t end; - -public: - StatisticsCollector report; - - // token of tests list - template<typename Test> - TestRunner *run(const char *name, Test *test) { - return new TestRunner(name, test); - } - - // iteration processing - void process(arg_t value, int threads, ...) { - // prepare items - stat = stat_by_threads[threads]; - if(!stat) { - stat_by_threads[threads] = stat = new StatisticsCollector((collection_name + Format("@%d", threads)).c_str(), StatisticsCollector::ByAlg); - stat->SetTitle("Detailed log of %s running with %d threads.", collection_name, threads); - } - Harness::SpinBarrier barrier(threads); - // init args - va_list args; va_start(args, threads); - vector<TestRunner*> run_list; run_list.reserve(16); - while(true) { - TestRunner *item = va_arg(args, TestRunner*); - if( !item ) break; - item->init(value, threads, barrier, stat); - run_list.push_back(item); - } - va_end(args); - std::ostringstream buf; - buf << value; - const size_t round_number = stat->GetRoundsCount(); - stat->SetRoundTitle(round_number, buf.str().c_str()); - report.SetRoundTitle(round_number, buf.str().c_str()); - // run them -#ifndef __TBB_parallel_for_H - NativeParallelFor(threads, RunArgsBody(run_list)); -#else - tbb::parallel_for(tbb::blocked_range<int>(0,threads,1), RunArgsBody(run_list)); -#endif - // destroy args - for(size_t i = 0; i < run_list.size(); i++) { - run_list[i]->post_process(report); - delete run_list[i]; - } - } - -public: - TestProcessor(const char *name, StatisticsCollector::Sorting sort_by = StatisticsCollector::ByAlg) - : collection_name(name), stat(NULL), end(0), report(collection_name, sort_by) - { } - - ~TestProcessor() { - for(statistics_collection::iterator i = stat_by_threads.begin(); i != stat_by_threads.end(); i++) - delete i->second; - } -}; - -#endif// __TIME_FRAMEWORK_H__ diff --git a/deal.II/bundled/tbb30_104oss/src/perf/time_hash_map.cpp b/deal.II/bundled/tbb30_104oss/src/perf/time_hash_map.cpp deleted file mode 100644 index 10205d5167..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/perf/time_hash_map.cpp +++ /dev/null @@ -1,268 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -// configuration: -#define TBB_USE_THREADING_TOOLS 0 - -//! enable/disable std::map tests -#define STDTABLE 0 - -//! enable/disable old implementation tests (correct include file also) -#define OLDTABLE 0 -#define OLDTABLEHEADER "tbb/concurrent_hash_map-5468.h"//-4329 - -//! enable/disable experimental implementation tests (correct include file also) -#define TESTTABLE 1 -#define TESTTABLEHEADER "tbb/concurrent_unordered_map.h" - -//! avoid erase() -#define TEST_ERASE 0 - -////////////////////////////////////////////////////////////////////////////////// - -#include <cstdlib> -#include <math.h> -#include "tbb/tbb_stddef.h" -#include <vector> -#include <map> -// needed by hash_maps -#include <stdexcept> -#include <iterator> -#include <algorithm> // std::swap -#include <utility> // Need std::pair from here -#include "tbb/cache_aligned_allocator.h" -#include "tbb/tbb_allocator.h" -#include "tbb/spin_rw_mutex.h" -#include "tbb/aligned_space.h" -#include "tbb/atomic.h" -#include "tbb/_concurrent_unordered_internal.h" -// for test -#include "tbb/spin_mutex.h" -#include "time_framework.h" - - -using namespace tbb; -using namespace tbb::internal; - -struct IntHashCompare { - size_t operator() ( int x ) const { return x; } - bool operator() ( int x, int y ) const { return x==y; } - static long hash( int x ) { return x; } - bool equal( int x, int y ) const { return x==y; } -}; - -namespace version_current { - namespace tbb { using namespace ::tbb; namespace internal { using namespace ::tbb::internal; } } - namespace tbb { namespace interface5 { using namespace ::tbb::interface5; namespace internal { using namespace ::tbb::interface5::internal; } } } - #include "tbb/concurrent_hash_map.h" -} -typedef version_current::tbb::concurrent_hash_map<int,int> IntTable; - -#if OLDTABLE -#undef __TBB_concurrent_hash_map_H -namespace version_base { - namespace tbb { using namespace ::tbb; namespace internal { using namespace ::tbb::internal; } } - namespace tbb { namespace interface5 { using namespace ::tbb::interface5; namespace internal { using namespace ::tbb::interface5::internal; } } } - #include OLDTABLEHEADER -} -typedef version_base::tbb::concurrent_hash_map<int,int> OldTable; -#endif - -#if TESTTABLE -#undef __TBB_concurrent_hash_map_H -namespace version_new { - namespace tbb { using namespace ::tbb; namespace internal { using namespace ::tbb::internal; } } - namespace tbb { namespace interface5 { using namespace ::tbb::interface5; namespace internal { using namespace ::tbb::interface5::internal; } } } - #include TESTTABLEHEADER -} -typedef version_new::tbb::concurrent_unordered_map<int,int> TestTable; -#define TESTTABLE 1 -#endif - -/////////////////////////////////////// - -static const char *map_testnames[] = { - "1.insert", "2.count1st", "3.count2nd", "4.insert existing", "5.erase" -}; - -template<typename TableType> -struct TestTBBMap : TesterBase { - TableType Table; - int n_items; - - TestTBBMap() : TesterBase(4+TEST_ERASE), Table(MaxThread*4) {} - void init() { n_items = value/threads_count; } - - std::string get_name(int testn) { - return std::string(map_testnames[testn]); - } - - double test(int test, int t) - { - switch(test) { - case 0: // fill - for(int i = t*n_items, e = (t+1)*n_items; i < e; i++) { - Table.insert( std::make_pair(i,i) ); - } - break; - case 1: // work1 - for(int i = t*n_items, e = (t+1)*n_items; i < e; i++) { - size_t c = Table.count( i ); - ASSERT( c == 1, NULL); - } - break; - case 2: // work2 - for(int i = t*n_items, e = (t+1)*n_items; i < e; i++) { - Table.count( i ); - } - break; - case 3: // work3 - for(int i = t*n_items, e = (t+1)*n_items; i < e; i++) { - Table.insert( std::make_pair(i,i) ); - } - break; -#if TEST_ERASE - case 4: // clean - for(int i = t*n_items, e = (t+1)*n_items; i < e; i++) { - ASSERT( Table.erase( i ), NULL); - } -#endif - } - return 0; - } -}; - -template<typename M> -struct TestSTLMap : TesterBase { - std::map<int, int> Table; - M mutex; - - int n_items; - TestSTLMap() : TesterBase(4+TEST_ERASE) {} - void init() { n_items = value/threads_count; } - - std::string get_name(int testn) { - return std::string(map_testnames[testn]); - } - - double test(int test, int t) - { - switch(test) { - case 0: // fill - for(int i = t*n_items, e = (t+1)*n_items; i < e; i++) { - typename M::scoped_lock with(mutex); - Table[i] = 0; - } - break; - case 1: // work1 - for(int i = t*n_items, e = (t+1)*n_items; i < e; i++) { - typename M::scoped_lock with(mutex); - size_t c = Table.count(i); - ASSERT( c == 1, NULL); - } - break; - case 2: // work2 - for(int i = t*n_items, e = (t+1)*n_items; i < e; i++) { - typename M::scoped_lock with(mutex); - Table.count(i); - } - break; - case 3: // work3 - for(int i = t*n_items, e = (t+1)*n_items; i < e; i++) { - typename M::scoped_lock with(mutex); - Table.insert(std::make_pair(i,i)); - } - break; - case 4: // clean - for(int i = t*n_items, e = (t+1)*n_items; i < e; i++) { - typename M::scoped_lock with(mutex); - Table.erase(i); - } - } - return 0; - } -}; - -class fake_mutex { -public: - class scoped_lock { - fake_mutex *p; - - public: - scoped_lock() {} - scoped_lock( fake_mutex &m ) { p = &m; } - ~scoped_lock() { } - void acquire( fake_mutex &m ) { p = &m; } - void release() { } - }; -}; - -class test_hash_map : public TestProcessor { -public: - test_hash_map() : TestProcessor("test_hash_map") {} - void factory(int value, int threads) { - if(Verbose) printf("Processing with %d threads: %d...\n", threads, value); - process( value, threads, -#if STDTABLE - run("std::map ", new NanosecPerValue<TestSTLMap<spin_mutex> >() ), -#endif -#if OLDTABLE - run("old::hmap", new NanosecPerValue<TestTBBMap<OldTable> >() ), -#endif - run("tbb::hmap", new NanosecPerValue<TestTBBMap<IntTable> >() ), -#if TESTTABLE - run("new::hmap", new NanosecPerValue<TestTBBMap<TestTable> >() ), -#endif - end ); - //stat->Print(StatisticsCollector::Stdout); - //if(value >= 2097152) stat->Print(StatisticsCollector::HTMLFile); - } -}; - -///////////////////////////////////////////////////////////////////////////////////////// - -int main(int argc, char* argv[]) { - if(argc>1) Verbose = true; - //if(argc>2) ExtraVerbose = true; - MinThread = 1; MaxThread = task_scheduler_init::default_num_threads(); - ParseCommandLine( argc, argv ); - - ASSERT(tbb_allocator<int>::allocator_type() == tbb_allocator<int>::scalable, "expecting scalable allocator library to be loaded. Please build it by:\n\t\tmake tbbmalloc"); - - { - test_hash_map the_test; - for( int t=MinThread; t <= MaxThread; t++) - for( int o=/*2048*/(1<<8)*8; o<2200000; o*=2 ) - the_test.factory(o, t); - the_test.report.SetTitle("Nanoseconds per operation of (Mode) for N items in container (Name)"); - the_test.report.SetStatisticFormula("1AVG per size", "=AVERAGE(ROUNDS)"); - the_test.report.Print(StatisticsCollector::HTMLFile|StatisticsCollector::ExcelXML); - } - return 0; -} - diff --git a/deal.II/bundled/tbb30_104oss/src/perf/time_hash_map_fill.cpp b/deal.II/bundled/tbb30_104oss/src/perf/time_hash_map_fill.cpp deleted file mode 100644 index 3ffcd8070e..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/perf/time_hash_map_fill.cpp +++ /dev/null @@ -1,170 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -// configuration: - -// Size of final table (must be multiple of STEP_*) -int MAX_TABLE_SIZE = 2000000; - -// Specify list of unique percents (5-30,100) to test against. Max 10 values -#define UNIQUE_PERCENTS PERCENT(5); PERCENT(10); PERCENT(20); PERCENT(30); PERCENT(100) - -// enable/disable tests for: -#define BOX1 "CHMap" -#define BOX1TEST ValuePerSecond<Uniques<tbb::concurrent_hash_map<int,int> >, 1000000/*ns*/> -#define BOX1HEADER "tbb/concurrent_hash_map.h" - -// enable/disable tests for: -#define BOX2 "CUMap" -#define BOX2TEST ValuePerSecond<Uniques<tbb::concurrent_unordered_map<int,int> >, 1000000/*ns*/> -#define BOX2HEADER "tbb/concurrent_unordered_map.h" - -// enable/disable tests for: -//#define BOX3 "OLD" -#define BOX3TEST ValuePerSecond<Uniques<tbb::concurrent_hash_map<int,int> >, 1000000/*ns*/> -#define BOX3HEADER "tbb/concurrent_hash_map-5468.h" - -#define TBB_USE_THREADING_TOOLS 0 -////////////////////////////////////////////////////////////////////////////////// - -#include <cstdlib> -#include <math.h> -#include "tbb/tbb_stddef.h" -#include <vector> -#include <map> -// needed by hash_maps -#include <stdexcept> -#include <iterator> -#include <algorithm> // std::swap -#include <utility> // Need std::pair -#include <cstring> // Need std::memset -#include <typeinfo> -#include "tbb/cache_aligned_allocator.h" -#include "tbb/tbb_allocator.h" -#include "tbb/spin_rw_mutex.h" -#include "tbb/aligned_space.h" -#include "tbb/atomic.h" -#include "tbb/_concurrent_unordered_internal.h" -// for test -#include "tbb/spin_mutex.h" -#include "time_framework.h" - - -using namespace tbb; -using namespace tbb::internal; - -///////////////////////////////////////////////////////////////////////////////////////// -// Input data built for test -int *Data; - -// Main test class used to run the timing tests. All overridden methods are called by the framework -template<typename TableType> -struct Uniques : TesterBase { - TableType Table; - int n_items; - - // Initializes base class with number of test modes - Uniques() : TesterBase(2), Table(MaxThread*16) { - //Table->max_load_factor(1); // add stub into hash_map to uncomment it - } - ~Uniques() {} - - // Returns name of test mode specified by number - /*override*/ std::string get_name(int testn) { - if(testn == 1) return "find"; - return "insert"; - } - - // Informs the class that value and threads number become known - /*override*/ void init() { - n_items = value/threads_count; // operations - } - - // Informs the class that the test mode for specified thread is about to start - /*override*/ void test_prefix(int testn, int t) { - barrier->wait(); - if(Verbose && !t && testn) printf("%s: inserted %u, %g%% of operations\n", tester_name, unsigned(Table.size()), 100.0*Table.size()/(value*testn)); - } - - // Executes test mode for a given thread. Return value is ignored when used with timing wrappers. - /*override*/ double test(int testn, int t) - { - if( testn != 1 ) { // do insertions - for(int i = testn*value+t*n_items, e = testn*value+(t+1)*n_items; i < e; i++) { - Table.insert( std::make_pair(Data[i],t) ); - } - } else { // do last finds - for(int i = t*n_items, e = (t+1)*n_items; i < e; i++) { - size_t c = - Table.count( Data[i] ); - ASSERT( c == 1, NULL ); // must exist - } - } - return 0; - } -}; - -///////////////////////////////////////////////////////////////////////////////////////// -#include <limits> - -// Using BOX declarations from configuration -#include "time_sandbox.h" - -int rounds = 0; -// Prepares the input data for given unique percent -void execute_percent(test_sandbox &the_test, int p) { - int input_size = MAX_TABLE_SIZE*100/p; - Data = new int[input_size]; - int uniques = p==100?std::numeric_limits<int>::max() : MAX_TABLE_SIZE; - ASSERT(p==100 || p <= 30, "Function is broken for %% > 30 except for 100%%"); - for(int i = 0; i < input_size; i++) - Data[i] = rand()%uniques; - for(int t = MinThread; t <= MaxThread; t++) - the_test.factory(input_size, t); // executes the tests specified in BOX-es for given 'value' and threads - the_test.report.SetRoundTitle(rounds++, "%d%%", p); -} -#define PERCENT(x) execute_percent(the_test, x) - -int main(int argc, char* argv[]) { - if(argc>1) Verbose = true; - //if(argc>2) ExtraVerbose = true; - MinThread = 1; MaxThread = task_scheduler_init::default_num_threads(); - ParseCommandLine( argc, argv ); - if(getenv("TABLE_SIZE")) - MAX_TABLE_SIZE = atoi(getenv("TABLE_SIZE")); - - ASSERT(tbb_allocator<int>::allocator_type() == tbb_allocator<int>::scalable, "expecting scalable allocator library to be loaded. Please build it by:\n\t\tmake tbbmalloc"); - // Declares test processor - test_sandbox the_test("time_hash_map_fill"/*, StatisticsCollector::ByThreads*/); - srand(10101); - UNIQUE_PERCENTS; // test the percents - the_test.report.SetTitle("Operations per nanosecond"); - the_test.report.SetRunInfo("Items", MAX_TABLE_SIZE); - the_test.report.Print(StatisticsCollector::HTMLFile|StatisticsCollector::ExcelXML); // Write files - return 0; -} diff --git a/deal.II/bundled/tbb30_104oss/src/perf/time_locked_work.cpp b/deal.II/bundled/tbb30_104oss/src/perf/time_locked_work.cpp deleted file mode 100644 index 13a939129b..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/perf/time_locked_work.cpp +++ /dev/null @@ -1,174 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -////// Test configuration //////////////////////////////////////////////////// -#define SECONDS_RATIO 1000000 // microseconds - -#ifndef REPEAT_K -#define REPEAT_K 50 // repeat coefficient -#endif - -int outer_work[] = {/*256,*/ 64, 16, 4, 0}; -int inner_work[] = {32, 8, 0 }; - -// keep it to calibrate the time of work without synchronization -#define BOX1 "baseline" -#define BOX1TEST TimeTest< TBB_Mutex<tbb::null_mutex>, SECONDS_RATIO > - -// enable/disable tests for: -#define BOX2 "spin_mutex" -#define BOX2TEST TimeTest< TBB_Mutex<tbb::spin_mutex>, SECONDS_RATIO > - -// enable/disable tests for: -#define BOX3 "spin_rw_mutex" -#define BOX3TEST TimeTest< TBB_Mutex<tbb::spin_rw_mutex>, SECONDS_RATIO > - -// enable/disable tests for: -#define BOX4 "queuing_mutex" -#define BOX4TEST TimeTest< TBB_Mutex<tbb::queuing_mutex>, SECONDS_RATIO > - -// enable/disable tests for: -//#define BOX5 "queuing_rw_mutex" -#define BOX5TEST TimeTest< TBB_Mutex<tbb::queuing_rw_mutex>, SECONDS_RATIO > - -////////////////////////////////////////////////////////////////////////////// - -#include <cstdlib> -#include <math.h> -#include <algorithm> // std::swap -#include <utility> // Need std::pair from here -#include <sstream> -#include "tbb/tbb_stddef.h" -#include "tbb/null_mutex.h" -#include "tbb/spin_rw_mutex.h" -#include "tbb/spin_mutex.h" -#include "tbb/queuing_mutex.h" -#include "tbb/queuing_rw_mutex.h" -#include "tbb/mutex.h" - -#if INTEL_TRIAL==2 -#include "tbb/parallel_for.h" // enable threading by TBB scheduler -#include "tbb/task_scheduler_init.h" -#include "tbb/blocked_range.h" -#endif -// for test -#include "time_framework.h" - -using namespace tbb; -using namespace tbb::internal; - -///////////////////////////////////////////////////////////////////////////////////////// - -//! base class for tests family -struct TestLocks : TesterBase { - // Inherits "value", "threads_count", and other variables - TestLocks() : TesterBase(/*number of modes*/sizeof(outer_work)/sizeof(int)) {} - //! returns name of test part/mode - /*override*/std::string get_name(int testn) { - std::ostringstream buf; - buf.width(4); buf.fill('0'); - buf << outer_work[testn]; // mode number - return buf.str(); - } - //! enables results types and returns theirs suffixes - /*override*/const char *get_result_type(int, result_t type) const { - switch(type) { - case MIN: return " min"; - case MAX: return " max"; - default: return 0; - } - } - //! repeats count - int repeat_until(int /*test_n*/) const { - return REPEAT_K*100;//TODO: suggest better? - } - //! fake work - void do_work(int work) volatile { - for(int i = 0; i < work; i++) { - volatile int x = i; - __TBB_Pause(0); // just to call inline assembler - x *= work/threads_count; - } - } -}; - -//! template test unit for any of TBB mutexes -template<typename M> -struct TBB_Mutex : TestLocks { - M mutex; - - double test(int testn, int /*threadn*/) - { - for(int r = 0; r < repeat_until(testn); ++r) { - do_work(outer_work[testn]); - { - typename M::scoped_lock with(mutex); - do_work(/*inner work*/value); - } - } - return 0; - } -}; - -///////////////////////////////////////////////////////////////////////////////////////// - -//Using BOX declarations -#include "time_sandbox.h" - -// run tests for each of inner work value -void RunLoops(test_sandbox &the_test, int thread) { - for( unsigned i=0; i<sizeof(inner_work)/sizeof(int); ++i ) - the_test.factory(inner_work[i], thread); -} - -int main(int argc, char* argv[]) { - if(argc>1) Verbose = true; - int DefThread = task_scheduler_init::default_num_threads(); - MinThread = 1; MaxThread = DefThread+1; - ParseCommandLine( argc, argv ); - ASSERT(MinThread <= MaxThread, 0); -#if INTEL_TRIAL && defined(__TBB_parallel_for_H) - task_scheduler_init me(MaxThread); -#endif - { - test_sandbox the_test("time_locked_work", StatisticsCollector::ByThreads); - //TODO: refactor this out as RunThreads(test&) - for( int t = MinThread; t < DefThread && t <= MaxThread; t *= 2) - RunLoops( the_test, t ); // execute undersubscribed threads - if( DefThread > MinThread && DefThread <= MaxThread ) - RunLoops( the_test, DefThread ); // execute on all hw threads - if( DefThread < MaxThread) - RunLoops( the_test, MaxThread ); // execute requested oversubscribed threads - - the_test.report.SetTitle("Time of lock/unlock for mutex Name with Outer and Inner work"); - //the_test.report.SetStatisticFormula("1AVG per size", "=AVERAGE(ROUNDS)"); - the_test.report.Print(StatisticsCollector::HTMLFile|StatisticsCollector::ExcelXML, /*ModeName*/ "Outer work"); - } - return 0; -} - diff --git a/deal.II/bundled/tbb30_104oss/src/perf/time_sandbox.h b/deal.II/bundled/tbb30_104oss/src/perf/time_sandbox.h deleted file mode 100644 index 950e6747c3..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/perf/time_sandbox.h +++ /dev/null @@ -1,179 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TIME_FRAMEWORK_H__ -#error time_framework.h must be included -#endif - -#define INJECT_TBB namespace tbb { using namespace ::tbb; namespace internal { using namespace ::tbb::internal; } } -#define INJECT_TBB5 namespace tbb { namespace interface5 { using namespace ::tbb::interface5; namespace internal { using namespace ::tbb::interface5::internal; } } } - -#ifndef INJECT_BOX_NAMES -#if defined(__TBB_task_H) || defined(__TBB_concurrent_unordered_internal_H) || defined(__TBB_reader_writer_lock_H) -#define INJECT_BOX_NAMES INJECT_TBB INJECT_TBB5 -#else -#define INJECT_BOX_NAMES INJECT_TBB -#endif -#endif - -#ifdef BOX1 -namespace sandbox1 { - INJECT_BOX_NAMES -# ifdef BOX1HEADER -# include BOX1HEADER -# endif - typedef ::BOX1TEST testbox; -} -#endif -#ifdef BOX2 -namespace sandbox2 { - INJECT_BOX_NAMES -# ifdef BOX2HEADER -# include BOX2HEADER -# endif - typedef ::BOX2TEST testbox; -} -#endif -#ifdef BOX3 -namespace sandbox3 { - INJECT_BOX_NAMES -# ifdef BOX3HEADER -# include BOX3HEADER -# endif - typedef ::BOX3TEST testbox; -} -#endif -#ifdef BOX4 -namespace sandbox4 { - INJECT_BOX_NAMES -# ifdef BOX4HEADER -# include BOX4HEADER -# endif - typedef ::BOX4TEST testbox; -} -#endif -#ifdef BOX5 -namespace sandbox5 { - INJECT_BOX_NAMES -# ifdef BOX5HEADER -# include BOX5HEADER -# endif - typedef ::BOX5TEST testbox; -} -#endif -#ifdef BOX6 -namespace sandbox6 { - INJECT_BOX_NAMES -# ifdef BOX6HEADER -# include BOX6HEADER -# endif - typedef ::BOX6TEST testbox; -} -#endif -#ifdef BOX7 -namespace sandbox7 { - INJECT_BOX_NAMES -# ifdef BOX7HEADER -# include BOX7HEADER -# endif - typedef ::BOX7TEST testbox; -} -#endif -#ifdef BOX8 -namespace sandbox8 { - INJECT_BOX_NAMES -# ifdef BOX8HEADER -# include BOX8HEADER -# endif - typedef ::BOX8TEST testbox; -} -#endif -#ifdef BOX9 -namespace sandbox9 { - INJECT_BOX_NAMES -# ifdef BOX9HEADER -# include BOX9HEADER -# endif - typedef ::BOX9TEST testbox; -} -#endif - -//if harness.h included -#if defined(ASSERT) && !HARNESS_NO_PARSE_COMMAND_LINE -#ifndef TEST_PREFIX -#define TEST_PREFIX if(Verbose) printf("Processing with %d threads: %ld...\n", threads, long(value)); -#endif -#endif//harness included - -#ifndef TEST_PROCESSOR_NAME -#define TEST_PROCESSOR_NAME test_sandbox -#endif - -class TEST_PROCESSOR_NAME : public TestProcessor { -public: - TEST_PROCESSOR_NAME(const char *name, StatisticsCollector::Sorting sort_by = StatisticsCollector::ByAlg) - : TestProcessor(name, sort_by) {} - void factory(arg_t value, int threads) { -#ifdef TEST_PREFIX - TEST_PREFIX -#endif - process( value, threads, -#define RUNBOX(n) run(#n"."BOX##n, new sandbox##n::testbox() ) -#ifdef BOX1 - RUNBOX(1), -#endif -#ifdef BOX2 - RUNBOX(2), -#endif -#ifdef BOX3 - RUNBOX(3), -#endif -#ifdef BOX4 - RUNBOX(4), -#endif -#ifdef BOX5 - RUNBOX(5), -#endif -#ifdef BOX6 - RUNBOX(6), -#endif -#ifdef BOX7 - RUNBOX(7), -#endif -#ifdef BOX8 - RUNBOX(8), -#endif -#ifdef BOX9 - RUNBOX(9), -#endif - end ); -#ifdef TEST_POSTFIX - TEST_POSTFIX -#endif - } -}; diff --git a/deal.II/bundled/tbb30_104oss/src/perf/time_vector.cpp b/deal.II/bundled/tbb30_104oss/src/perf/time_vector.cpp deleted file mode 100644 index 36442f1570..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/perf/time_vector.cpp +++ /dev/null @@ -1,257 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -//#define DO_SCALABLEALLOC - -#include <cstdlib> -#include <cmath> -#include <vector> -#include <algorithm> -#include <functional> -#include <numeric> -#include "tbb/tbb_stddef.h" -#include "tbb/spin_mutex.h" -#ifdef DO_SCALABLEALLOC -#include "tbb/scalable_allocator.h" -#endif -#include "tbb/concurrent_vector.h" -#include "tbb/tbb_allocator.h" -#include "tbb/cache_aligned_allocator.h" -#include "tbb/task_scheduler_init.h" -#include "tbb/parallel_for.h" -#include "tbb/tick_count.h" -#include "tbb/blocked_range.h" -#define HARNESS_CUSTOM_MAIN 1 -#include "../test/harness.h" -//#include "harness_barrier.h" -#include "../test/harness_allocator.h" -#define STATISTICS_INLINE -#include "statistics.h" - -using namespace tbb; -bool ExtraVerbose = false; - -class Timer { - tbb::tick_count tick; -public: - Timer() { tick = tbb::tick_count::now(); } - double get_time() { return (tbb::tick_count::now() - tick).seconds(); } - double diff_time(const Timer &newer) { return (newer.tick - tick).seconds(); } - double mark_time() { tick_count t1(tbb::tick_count::now()), t2(tick); tick = t1; return (t1 - t2).seconds(); } - double mark_time(const Timer &newer) { tick_count t(tick); tick = newer.tick; return (tick - t).seconds(); } -}; - -/************************************************************************/ -/* TEST1 */ -/************************************************************************/ -#define mk_vector_test1(v, a) vector_test1<v<Timer, static_counting_allocator<a<Timer> > >, v<double, static_counting_allocator<a<double> > > > -template<class timers_vector_t, class values_vector_t> -class vector_test1 { - const char *mode; - StatisticsCollector &stat; - StatisticsCollector::TestCase key[16]; - -public: - vector_test1(const char *m, StatisticsCollector &s) : mode(m), stat(s) {} - - vector_test1 &operator()(size_t len) { - if(Verbose) printf("test1<%s>(%u): collecting timing statistics\n", mode, unsigned(len)); - __TBB_ASSERT(sizeof(Timer) == sizeof(double), NULL); - static const char *test_names[] = { - "b)creation wholly", - "a)creation by push", - "c)operation time per item", - 0 }; - for(int i = 0; test_names[i]; ++i) key[i] = stat.SetTestCase(test_names[i], mode, len); - - Timer timer0; timers_vector_t::allocator_type::init_counters(); - timers_vector_t tv(len); - Timer timer1; values_vector_t::allocator_type::init_counters(); - values_vector_t dv; - for (size_t i = 0; i < len; ++i) - dv.push_back( i ); - Timer timer2; - for (size_t i = 0; i < len; ++i) - { - dv[len-i-1] = timer0.diff_time(tv[i]); - tv[i].mark_time(); - } - stat.AddStatisticValue( key[2], "1total, ms", "%.3f", timer2.get_time()*1000.0 ); - stat.AddStatisticValue( key[1], "1total, ms", "%.3f", timer1.diff_time(timer2)*1000.0 ); - stat.AddStatisticValue( key[0], "1total, ms", "%.3f", timer0.diff_time(timer1)*1000.0 ); - //allocator statistics - stat.AddStatisticValue( key[0], "2total allocations", "%d", int(timers_vector_t::allocator_type::allocations) ); - stat.AddStatisticValue( key[1], "2total allocations", "%d", int(values_vector_t::allocator_type::allocations) ); - stat.AddStatisticValue( key[2], "2total allocations", "%d", 0); - stat.AddStatisticValue( key[0], "3total alloc#items", "%d", int(timers_vector_t::allocator_type::items_allocated) ); - stat.AddStatisticValue( key[1], "3total alloc#items", "%d", int(values_vector_t::allocator_type::items_allocated) ); - stat.AddStatisticValue( key[2], "3total alloc#items", "%d", 0); - //remarks - stat.AddStatisticValue( key[0], "9note", "segment creation time, ns:"); - stat.AddStatisticValue( key[2], "9note", "average op-time per item, ns:"); - Timer last_timer(timer2); double last_value = 0; - for (size_t j = 0, i = 2; i < len; i *= 2, j++) { - stat.AddRoundResult( key[0], (dv[len-i-1]-last_value)*1000000.0 ); - last_value = dv[len-i-1]; - stat.AddRoundResult( key[2], last_timer.diff_time(tv[i])/double(i)*1000000.0 ); - last_timer = tv[i]; - stat.SetRoundTitle(j, i); - } - tv.clear(); dv.clear(); - //__TBB_ASSERT(timers_vector_t::allocator_type::items_allocated == timers_vector_t::allocator_type::items_freed, NULL); - //__TBB_ASSERT(values_vector_t::allocator_type::items_allocated == values_vector_t::allocator_type::items_freed, NULL); - return *this; - } -}; - -/************************************************************************/ -/* TEST2 */ -/************************************************************************/ -#define mk_vector_test2(v, a) vector_test2<v<size_t, a<size_t> > > -template<class vector_t> -class vector_test2 { - const char *mode; - static const int ntrial = 10; - StatisticsCollector &stat; - -public: - vector_test2(const char *m, StatisticsCollector &s) : mode(m), stat(s) {} - - vector_test2 &operator()(size_t len) { - if(Verbose) printf("test2<%s>(%u): performing standard transformation sequence on vector\n", mode, unsigned(len)); - StatisticsCollector::TestCase init_key = stat.SetTestCase("allocate", mode, len); - StatisticsCollector::TestCase fill_key = stat.SetTestCase("fill", mode, len); - StatisticsCollector::TestCase proc_key = stat.SetTestCase("process", mode, len); - StatisticsCollector::TestCase full_key = stat.SetTestCase("total time", mode, len); - for (int i = 0; i < ntrial; i++) { - Timer timer0; - vector_t v1(len); - vector_t v2(len); - Timer timer1; - std::generate(v1.begin(), v1.end(), values(0)); - std::generate(v2.begin(), v2.end(), values(size_t(-len))); - Timer timer2; - std::reverse(v1.rbegin(), v1.rend()); - std::inner_product(v1.begin(), v1.end(), v2.rbegin(), 1); - std::sort(v1.rbegin(), v1.rend()); - std::sort(v2.rbegin(), v2.rend()); - std::set_intersection(v1.begin(), v1.end(), v2.rbegin(), v2.rend(), v1.begin()); - Timer timer3; - stat.AddRoundResult( proc_key, timer2.diff_time(timer3)*1000.0 ); - stat.AddRoundResult( fill_key, timer1.diff_time(timer2)*1000.0 ); - stat.AddRoundResult( init_key, timer0.diff_time(timer1)*1000.0 ); - stat.AddRoundResult( full_key, timer0.diff_time(timer3)*1000.0 ); - } - stat.SetStatisticFormula("1Average", "=AVERAGE(ROUNDS)"); - stat.SetStatisticFormula("2+/-", "=(MAX(ROUNDS)-MIN(ROUNDS))/2"); - return *this; - } - - class values - { - size_t value; - public: - values(size_t i) : value(i) {} - size_t operator()() { - return value++%(1|(value^55)); - } - }; -}; - -/************************************************************************/ -/* TEST3 */ -/************************************************************************/ -#define mk_vector_test3(v, a) vector_test3<v<char, local_counting_allocator<a<char>, size_t > > > -template<class vector_t> -class vector_test3 { - const char *mode; - StatisticsCollector &stat; - -public: - vector_test3(const char *m, StatisticsCollector &s) : mode(m), stat(s) {} - - vector_test3 &operator()(size_t len) { - if(Verbose) printf("test3<%s>(%u): collecting allocator statistics\n", mode, unsigned(len)); - static const size_t sz = 1024; - vector_t V[sz]; - StatisticsCollector::TestCase vinst_key = stat.SetTestCase("instances number", mode, len); - StatisticsCollector::TestCase count_key = stat.SetTestCase("allocations count", mode, len); - StatisticsCollector::TestCase items_key = stat.SetTestCase("allocated items", mode, len); - //stat.ReserveRounds(sz-1); - for (size_t c = 0, i = 0, s = sz/2; s >= 1 && i < sz; s /= 2, c++) - { - const size_t count = c? 1<<(c-1) : 0; - for (size_t e = i+s; i < e; i++) { - //if(count >= 16) V[i].reserve(count); - for (size_t j = 0; j < count; j++) - V[i].push_back(j); - } - stat.SetRoundTitle ( c, count ); - stat.AddRoundResult( vinst_key, s ); - stat.AddRoundResult( count_key, V[i-1].get_allocator().allocations ); - stat.AddRoundResult( items_key, V[i-1].get_allocator().items_allocated ); - } - return *this; - } -}; - -/************************************************************************/ -/* TYPES SET FOR TESTS */ -/************************************************************************/ -#define types_set(n, title, op) { StatisticsCollector Collector("time_vector"#n); Collector.SetTitle title; \ - {mk_vector_test##n(tbb::concurrent_vector, tbb::cache_aligned_allocator) ("TBB:NFS", Collector)op;} \ - {mk_vector_test##n(tbb::concurrent_vector, tbb::tbb_allocator) ("TBB:TBB", Collector)op;} \ - {mk_vector_test##n(tbb::concurrent_vector, std::allocator) ("TBB:STD", Collector)op;} \ - {mk_vector_test##n(std::vector, tbb::cache_aligned_allocator) ("STL:NFS", Collector)op;} \ - {mk_vector_test##n(std::vector, tbb::tbb_allocator) ("STL:TBB", Collector)op;} \ - {mk_vector_test##n(std::vector, std::allocator) ("STL:STD", Collector)op;} \ - Collector.Print(StatisticsCollector::Stdout|StatisticsCollector::HTMLFile|StatisticsCollector::ExcelXML); } - - -/************************************************************************/ -/* MAIN DRIVER */ -/************************************************************************/ -int main(int argc, char* argv[]) { - if(argc>1) Verbose = true; - if(argc>2) ExtraVerbose = true; - MinThread = 0; MaxThread = 500000; // use in another meaning - test#:problem size - ParseCommandLine( argc, argv ); - - ASSERT(tbb_allocator<int>::allocator_type() == tbb_allocator<int>::scalable, "expecting scalable allocator library to be loaded"); - - if(!MinThread || MinThread == 1) - types_set(1, ("Vectors performance test #1 for %d", MaxThread), (MaxThread) ) - if(!MinThread || MinThread == 2) - types_set(2, ("Vectors performance test #2 for %d", MaxThread), (MaxThread) ) - if(!MinThread || MinThread == 3) - types_set(3, ("Vectors performance test #3 for %d", MaxThread), (MaxThread) ) - - if(!Verbose) printf("done\n"); - return 0; -} - diff --git a/deal.II/bundled/tbb30_104oss/src/rml/client/index.html b/deal.II/bundled/tbb30_104oss/src/rml/client/index.html deleted file mode 100644 index e92185cfa6..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/rml/client/index.html +++ /dev/null @@ -1,43 +0,0 @@ -<HTML> -<BODY> -<H2>Overview</H2> - -This directory has source code that must be statically linked into an RML client. - -<H2>Files</H2> - -<DL> -<DT><P><A HREF="rml_factory.h">rml_factory.h</A> -<DD>Text shared by <A HREF="rml_omp.cpp">rml_omp.cpp</A> and <A HREF="rml_tbb.cpp">rml_tbb.cpp</A>. - This is not an ordinary include file, so it does not have an #ifndef guard.</P> -</DL> - -<H3> Specific to client=OpenMP</H3> -<DL> -<DT><P><A HREF="rml_omp.cpp">rml_omp.cpp</A> -<DD>Source file for OpenMP client.</P> -<DT><P><A HREF="omp_dynamic_link.h">omp_dynamic_link.h</A> -<DT><A HREF="omp_dynamic_link.cpp">omp_dynamic_link.cpp</A> -<DD>Source files for dynamic linking support. - The code is the code from the TBB source directory, but adjusted so that it - appears in namespace <TT>__kmp</TT> instead of namespace <TT>tbb::internal</TT>. -</DL> -<H3> Specific to client=TBB</H3> -<DL> -<DT><P><A HREF="rml_tbb.cpp">rml_tbb.cpp</A> -<DD>Source file for TBB client. It uses the dynamic linking support from the TBB source directory. -</DL> - -<HR> -<A HREF="../index.html">Up to parent directory</A> -<p></p> -Copyright © 2005-2010 Intel Corporation. All Rights Reserved. -<p></p> -Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are -registered trademarks or trademarks of Intel Corporation or its -subsidiaries in the United States and other countries. -<p></p> -* Other names and brands may be claimed as the property of others. -</BODY> -</HTML> - diff --git a/deal.II/bundled/tbb30_104oss/src/rml/client/library_assert.h b/deal.II/bundled/tbb30_104oss/src/rml/client/library_assert.h deleted file mode 100644 index f198fa5e47..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/rml/client/library_assert.h +++ /dev/null @@ -1,41 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef LIBRARY_ASSERT_H -#define LIBRARY_ASSERT_H - -#ifndef LIBRARY_ASSERT -#ifdef KMP_ASSERT2 -#define LIBRARY_ASSERT(x,y) KMP_ASSERT2((x),(y)) -#else -#include <assert.h> -#define LIBRARY_ASSERT(x,y) assert(x) -#endif -#endif /* LIBRARY_ASSERT */ - -#endif /* LIBRARY_ASSERT_H */ diff --git a/deal.II/bundled/tbb30_104oss/src/rml/client/omp_dynamic_link.cpp b/deal.II/bundled/tbb30_104oss/src/rml/client/omp_dynamic_link.cpp deleted file mode 100644 index c40e9413f7..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/rml/client/omp_dynamic_link.cpp +++ /dev/null @@ -1,32 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#include "omp_dynamic_link.h" -#include "library_assert.h" -#include "tbb/dynamic_link.cpp" // Refers to src/tbb, not include/tbb - diff --git a/deal.II/bundled/tbb30_104oss/src/rml/client/omp_dynamic_link.h b/deal.II/bundled/tbb30_104oss/src/rml/client/omp_dynamic_link.h deleted file mode 100644 index 02f306c52c..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/rml/client/omp_dynamic_link.h +++ /dev/null @@ -1,37 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __KMP_omp_dynamic_link_H -#define __KMP_omp_dynamic_link_H - -#define OPEN_INTERNAL_NAMESPACE namespace __kmp { -#define CLOSE_INTERNAL_NAMESPACE } - -#include "tbb/dynamic_link.h" // Refers to src/tbb, not include/tbb - -#endif /* __KMP_omp_dynamic_link_H */ diff --git a/deal.II/bundled/tbb30_104oss/src/rml/client/rml_factory.h b/deal.II/bundled/tbb30_104oss/src/rml/client/rml_factory.h deleted file mode 100644 index 3fc86b9d77..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/rml/client/rml_factory.h +++ /dev/null @@ -1,111 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -// No ifndef guard because this file is not a normal include file. - -#if TBB_USE_DEBUG -#define DEBUG_SUFFIX "_debug" -#else -#define DEBUG_SUFFIX -#endif /* TBB_USE_DEBUG */ - -// RML_SERVER_NAME is the name of the RML server library. -#if _WIN32||_WIN64 -#define RML_SERVER_NAME "irml" DEBUG_SUFFIX ".dll" -#elif __APPLE__ -#define RML_SERVER_NAME "libirml" DEBUG_SUFFIX ".dylib" -#elif __linux__ -#define RML_SERVER_NAME "libirml" DEBUG_SUFFIX ".so.1" -#elif __NetBSD__ || __FreeBSD__ || __sun || _AIX -#define RML_SERVER_NAME "libirml" DEBUG_SUFFIX ".so" -#else -#error Unknown OS -#endif - -#include "library_assert.h" - -const ::rml::versioned_object::version_type CLIENT_VERSION = 2; - -#if __TBB_WEAK_SYMBOLS - #pragma weak __RML_open_factory - #pragma weak __TBB_make_rml_server - #pragma weak __RML_close_factory - #pragma weak __TBB_call_with_my_server_info - extern "C" { - ::rml::factory::status_type __RML_open_factory ( ::rml::factory&, ::rml::versioned_object::version_type&, ::rml::versioned_object::version_type ); - ::rml::factory::status_type __TBB_make_rml_server( tbb::internal::rml::tbb_factory& f, tbb::internal::rml::tbb_server*& server, tbb::internal::rml::tbb_client& client ); - void __TBB_call_with_my_server_info( ::rml::server_info_callback_t cb, void* arg ); - void __RML_close_factory( ::rml::factory& f ); - } -#endif /* __TBB_WEAK_SYMBOLS */ - -::rml::factory::status_type FACTORY::open() { - // Failure of following assertion indicates that factory is already open, or not zero-inited. - LIBRARY_ASSERT( !library_handle, NULL ); - status_type (*open_factory_routine)( factory&, version_type&, version_type ); - dynamic_link_descriptor server_link_table[4] = { - DLD(__RML_open_factory,open_factory_routine), - MAKE_SERVER(my_make_server_routine), - DLD(__RML_close_factory,my_wait_to_close_routine), - GET_INFO(my_call_with_server_info_routine), - }; - status_type result; - dynamic_link_handle h; - if( dynamic_link( RML_SERVER_NAME, server_link_table, 4, 4, &h ) ) { - library_handle = h; - version_type server_version; - result = (*open_factory_routine)( *this, server_version, CLIENT_VERSION ); - // server_version can be checked here for incompatibility here if necessary. - } else { - library_handle = NULL; - result = st_not_found; - } - return result; -} - -void FACTORY::close() { - if( library_handle ) - (*my_wait_to_close_routine)(*this); - if( (size_t)library_handle>FACTORY::c_dont_unload ) { - dynamic_link_handle h = library_handle; - dynamic_unlink(h); - library_handle = NULL; - } -} - -::rml::factory::status_type FACTORY::make_server( SERVER*& s, CLIENT& c) { - // Failure of following assertion means that factory was not successfully opened. - LIBRARY_ASSERT( my_make_server_routine, NULL ); - return (*my_make_server_routine)(*this,s,c); -} - -void FACTORY::call_with_server_info( ::rml::server_info_callback_t cb, void* arg ) const { - // Failure of following assertion means that factory was not successfully opened. - LIBRARY_ASSERT( my_call_with_server_info_routine, NULL ); - (*my_call_with_server_info_routine)( cb, arg ); -} diff --git a/deal.II/bundled/tbb30_104oss/src/rml/client/rml_omp.cpp b/deal.II/bundled/tbb30_104oss/src/rml/client/rml_omp.cpp deleted file mode 100644 index 336fd9c22f..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/rml/client/rml_omp.cpp +++ /dev/null @@ -1,44 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#include "rml_omp.h" -#include "omp_dynamic_link.h" -#include <assert.h> - -namespace __kmp { -namespace rml { - -#define MAKE_SERVER(x) DLD(__KMP_make_rml_server,x) -#define GET_INFO(x) DLD(__KMP_call_with_my_server_info,x) -#define SERVER omp_server -#define CLIENT omp_client -#define FACTORY omp_factory -#include "rml_factory.h" - -} // rml -} // __kmp diff --git a/deal.II/bundled/tbb30_104oss/src/rml/client/rml_tbb.cpp b/deal.II/bundled/tbb30_104oss/src/rml/client/rml_tbb.cpp deleted file mode 100644 index d62773786c..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/rml/client/rml_tbb.cpp +++ /dev/null @@ -1,46 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#include "../include/rml_tbb.h" -#include "tbb/dynamic_link.h" -#include <assert.h> - -namespace tbb { -namespace internal { -namespace rml { - -#define MAKE_SERVER(x) DLD(__TBB_make_rml_server,x) -#define GET_INFO(x) DLD(__TBB_call_with_my_server_info,x) -#define SERVER tbb_server -#define CLIENT tbb_client -#define FACTORY tbb_factory -#include "rml_factory.h" - -} // rml -} // internal -} // tbb diff --git a/deal.II/bundled/tbb30_104oss/src/rml/include/index.html b/deal.II/bundled/tbb30_104oss/src/rml/include/index.html deleted file mode 100644 index 6a47794448..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/rml/include/index.html +++ /dev/null @@ -1,30 +0,0 @@ -<HTML> -<BODY> -<H2>Overview</H2> - -This directory has the include files for the Resource Management Layer (RML). - -<H2>Files</H2> - -<DL> -<DT><P><A HREF="rml_base.h">rml_base.h</A> -<DD>Interfaces shared by TBB and OpenMP.</P> -<DT><P><A HREF="rml_omp.h">rml_omp.h</A> -<DD>Interface exclusive to OpenMP.</P> -<DT><P><A HREF="rml_tbb.h">rml_tbb.h</A> -<DD>Interface exclusive to TBB.</P> -</DL> - -<HR> -<A HREF="../index.html">Up to parent directory</A> -<p></p> -Copyright © 2005-2010 Intel Corporation. All Rights Reserved. -<p></p> -Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are -registered trademarks or trademarks of Intel Corporation or its -subsidiaries in the United States and other countries. -<p></p> -* Other names and brands may be claimed as the property of others. -</BODY> -</HTML> - diff --git a/deal.II/bundled/tbb30_104oss/src/rml/include/rml_base.h b/deal.II/bundled/tbb30_104oss/src/rml/include/rml_base.h deleted file mode 100644 index 6abdaf8c41..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/rml/include/rml_base.h +++ /dev/null @@ -1,196 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -// Header guard and namespace names follow rml conventions. - -#ifndef __RML_rml_base_H -#define __RML_rml_base_H - -#include <cstddef> - -#if _WIN32||_WIN64 -#include <windows.h> -#endif /* _WIN32||_WIN64 */ - -#ifdef RML_PURE_VIRTUAL_HANDLER -#define RML_PURE(T) {RML_PURE_VIRTUAL_HANDLER(); return (T)0;} -#else -#define RML_PURE(T) = 0; -#endif - -namespace rml { - -//! Base class for denying assignment and copy constructor. -class no_copy { - void operator=( no_copy& ); - no_copy( no_copy& ); -public: - no_copy() {} -}; - -class server; - -class versioned_object { -public: - //! A version number - typedef unsigned version_type; - - //! Get version of this object - /** The version number is incremented when a incompatible change is introduced. - The version number is invariant for the lifetime of the object. */ - virtual version_type version() const RML_PURE(version_type) -}; - -//! Represents a client's job for an execution context. -/** A job object is constructed by the client. - Not derived from versioned_object because version is same as for client. */ -class job { - friend class server; - - //! Word for use by server - /** Typically the server uses it to speed up internal lookup. - Clients must not modify the word. */ - void* scratch_ptr; -}; - -//! Information that client provides to server when asking for a server. -/** The instance must endure at least until acknowledge_close_connection is called. */ -class client: public versioned_object { -public: - //! Typedef for convenience of derived classes in other namespaces. - typedef ::rml::job job; - - //! Index of a job in a job pool - typedef unsigned size_type; - - //! Maximum number of threads that client can exploit profitably if nothing else is running on the machine. - /** The returned value should remain invariant for the lifetime of the connection. [idempotent] */ - virtual size_type max_job_count() const RML_PURE(size_type) - - //! Minimum stack size for each job. 0 means to use default stack size. [idempotent] - virtual std::size_t min_stack_size() const RML_PURE(std::size_t) - - //! Server calls this routine when it needs client to create a job object. - virtual job* create_one_job() RML_PURE(job*) - - //! Acknowledge that all jobs have been cleaned up. - /** Called by server in response to request_close_connection - after cleanup(job) has been called for each job. */ - virtual void acknowledge_close_connection() RML_PURE(void) - - enum policy_type {turnaround,throughput}; - - //! Inform server of desired policy. [idempotent] - virtual policy_type policy() const RML_PURE(policy_type) - - //! Inform client that server is done with *this. - /** Client should destroy the job. - Not necessarily called by execution context represented by *this. - Never called while any other thread is working on the job. */ - virtual void cleanup( job& ) RML_PURE(void) - - // In general, we should not add new virtual methods, because that would - // break derived classes. Think about reserving some vtable slots. -}; - -// Information that server provides to client. -// Virtual functions are routines provided by the server for the client to call. -class server: public versioned_object { -public: - //! Typedef for convenience of derived classes. - typedef ::rml::job job; - -#if _WIN32||_WIN64 - typedef void* execution_resource_t; -#endif - - //! Request that connection to server be closed. - /** Causes each job associated with the client to have its cleanup method called, - possibly by a thread different than the thread that created the job. - This method can return before all cleanup methods return. - Actions that have to wait after all cleanup methods return should be part of - client::acknowledge_close_connection. - Pass true as exiting if request_close_connection() is called because exit() is - called. In that case, it is the client's responsibility to make sure all threads - are terminated. In all other cases, pass false. */ - virtual void request_close_connection( bool exiting = false ) = 0; - - //! Called by client thread when it reaches a point where it cannot make progress until other threads do. - virtual void yield() = 0; - - //! Called by client to indicate a change in the number of non-RML threads that are running. - /** This is a performance hint to the RML to adjust how many threads it should let run - concurrently. The delta is the change in the number of non-RML threads that are running. - For example, a value of 1 means the client has started running another thread, and a value - of -1 indicates that the client has blocked or terminated one of its threads. */ - virtual void independent_thread_number_changed( int delta ) = 0; - - //! Default level of concurrency for which RML strives when there are no non-RML threads running. - /** Normally, the value is the hardware concurrency minus one. - The "minus one" accounts for the thread created by main(). */ - virtual unsigned default_concurrency() const = 0; - -protected: - static void*& scratch_ptr( job& j ) {return j.scratch_ptr;} -}; - -class factory { -public: - //! status results - enum status_type { - st_success=0, - st_connection_exists, - st_not_found, - st_incompatible - }; - - //! Scratch pointer for use by RML. - void* scratch_ptr; - -protected: - //! Pointer to routine that waits for server to indicate when client can close itself. - status_type (*my_wait_to_close_routine)( factory& ); - -public: - //! Library handle for use by RML. -#if _WIN32||_WIN64 - HMODULE library_handle; -#else - void* library_handle; -#endif /* _WIN32||_WIN64 */ - - //! Special marker to keep dll from being unloaded prematurely - static const std::size_t c_dont_unload = 1; -}; - -//! Typedef for callback functions to print server info -typedef void (*server_info_callback_t)( void* arg, const char* server_info ); - -} // namespace rml - -#endif /* __RML_rml_base_H */ diff --git a/deal.II/bundled/tbb30_104oss/src/rml/include/rml_omp.h b/deal.II/bundled/tbb30_104oss/src/rml/include/rml_omp.h deleted file mode 100644 index 8202ade45b..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/rml/include/rml_omp.h +++ /dev/null @@ -1,138 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -// Header guard and namespace names follow OpenMP runtime conventions. - -#ifndef KMP_RML_OMP_H -#define KMP_RML_OMP_H - -#include "rml_base.h" - -namespace __kmp { -namespace rml { - -class omp_client; - -//------------------------------------------------------------------------ -// Classes instantiated by the server -//------------------------------------------------------------------------ - -//! Represents a set of omp worker threads provided by the server. -class omp_server: public ::rml::server { -public: - //! A number of coins (i.e., threads) - typedef unsigned size_type; - - //! Return the number of coins in the bank. (negative if machine is oversubscribed). - virtual int current_balance() const = 0; - - //! Request n coins. Returns number of coins granted. Oversubscription amount if negative. - /** Always granted if is_strict is true. - - Positive or zero result indicates that the number of coins was taken from the bank. - - Negative result indicates that no coins were taken, and that the bank has deficit - by that amount and the caller (if being a good citizen) should return that many coins. - */ - virtual int try_increase_load( size_type /*n*/, bool /*strict*/ ) = 0; - - //! Return n coins into the bank. - virtual void decrease_load( size_type /*n*/ ) = 0; - - //! Convert n coins into n threads. - /** When a thread returns, it is converted back into a coin and the coin is returned to the bank. */ - virtual void get_threads( size_type /*m*/, void* /*cookie*/, job* /*array*/[] ) = 0; - - /** Putting a thread to sleep - convert a thread into a coin - Waking up a thread - convert a coin into a thread - - Note: conversion between a coin and a thread does not affect the accounting. - */ -#if _WIN32||_WIN64 - //! Inform server of a tbb master thread. - virtual void register_master( execution_resource_t& /*v*/ ) = 0; - - //! Inform server that the tbb master thread is done with its work. - virtual void unregister_master( execution_resource_t /*v*/ ) = 0; - - //! deactivate - /** give control to ConcRT RM */ - virtual void deactivate( job* ) = 0; - - //! reactivate - virtual void reactivate( job* ) = 0; -#endif /* _WIN32||_WIN64 */ -}; - - -//------------------------------------------------------------------------ -// Classes (or base classes thereof) instantiated by the client -//------------------------------------------------------------------------ - -class omp_client: public ::rml::client { -public: - //! Called by server thread when it delivers a thread to client - /** The index argument is a 0-origin index of the job for this thread within the array - returned by method get_threads. Server decreases the load by 1 (i.e., returning the coin - back to the bank) after this method returns. */ - virtual void process( job&, void* /*cookie*/, size_type /*index*/ ) RML_PURE(void) -}; - -/** Client must ensure that instance is zero-inited, typically by being a file-scope object. */ -class omp_factory: public ::rml::factory { - - //! Pointer to routine that creates an RML server. - status_type (*my_make_server_routine)( omp_factory&, omp_server*&, omp_client& ); - - //! Pointer to routine that calls callback function with server version info. - void (*my_call_with_server_info_routine)( ::rml::server_info_callback_t cb, void* arg ); - -public: - typedef ::rml::versioned_object::version_type version_type; - typedef omp_client client_type; - typedef omp_server server_type; - - //! Open factory. - /** Dynamically links against RML library. - Returns st_success, st_incompatible, or st_not_found. */ - status_type open(); - - //! Factory method to be called by client to create a server object. - /** Factory must be open. - Returns st_success or st_incompatible . */ - status_type make_server( server_type*&, client_type& ); - - //! Close factory. - void close(); - - //! Call the callback with the server build info. - void call_with_server_info( ::rml::server_info_callback_t cb, void* arg ) const; -}; - -} // namespace rml -} // namespace __kmp - -#endif /* KMP_RML_OMP_H */ diff --git a/deal.II/bundled/tbb30_104oss/src/rml/include/rml_tbb.h b/deal.II/bundled/tbb30_104oss/src/rml/include/rml_tbb.h deleted file mode 100644 index 6e9ea7e503..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/rml/include/rml_tbb.h +++ /dev/null @@ -1,108 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -// Header guard and namespace names follow TBB conventions. - -#ifndef __TBB_rml_tbb_H -#define __TBB_rml_tbb_H - -#include "rml_base.h" - -namespace tbb { -namespace internal { -namespace rml { - -class tbb_client; - -//------------------------------------------------------------------------ -// Classes instantiated by the server -//------------------------------------------------------------------------ - -//! Represents a set of tbb worker threads provided by the server. -class tbb_server: public ::rml::server { -public: - //! Inform server of adjustments in the number of workers that the client can profitably use. - virtual void adjust_job_count_estimate( int delta ) = 0; - -#if _WIN32||_WIN64 - //! Inform server of a tbb master thread. - virtual void register_master( execution_resource_t& v ) = 0; - - //! Inform server that the tbb master thread is done with its work. - virtual void unregister_master( execution_resource_t v ) = 0; -#endif /* _WIN32||_WIN64 */ -}; - -//------------------------------------------------------------------------ -// Classes instantiated by the client -//------------------------------------------------------------------------ - -class tbb_client: public ::rml::client { -public: - //! Defined by TBB to steal a task and execute it. - /** Called by server when it wants an execution context to do some TBB work. - The method should return when it is okay for the thread to yield indefinitely. */ - virtual void process( job& ) RML_PURE(void) -}; - -/** Client must ensure that instance is zero-inited, typically by being a file-scope object. */ -class tbb_factory: public ::rml::factory { - - //! Pointer to routine that creates an RML server. - status_type (*my_make_server_routine)( tbb_factory&, tbb_server*&, tbb_client& ); - - //! Pointer to routine that calls callback function with server version info. - void (*my_call_with_server_info_routine)( ::rml::server_info_callback_t cb, void* arg ); - -public: - typedef ::rml::versioned_object::version_type version_type; - typedef tbb_client client_type; - typedef tbb_server server_type; - - //! Open factory. - /** Dynamically links against RML library. - Returns st_success, st_incompatible, or st_not_found. */ - status_type open(); - - //! Factory method to be called by client to create a server object. - /** Factory must be open. - Returns st_success, or st_incompatible . */ - status_type make_server( server_type*&, client_type& ); - - //! Close factory - void close(); - - //! Call the callback with the server build info - void call_with_server_info( ::rml::server_info_callback_t cb, void* arg ) const; -}; - -} // namespace rml -} // namespace internal -} // namespace tbb - -#endif /*__TBB_rml_tbb_H */ diff --git a/deal.II/bundled/tbb30_104oss/src/rml/index.html b/deal.II/bundled/tbb30_104oss/src/rml/index.html deleted file mode 100644 index 1582714c11..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/rml/index.html +++ /dev/null @@ -1,32 +0,0 @@ -<HTML> -<BODY> -<H2>Overview</H2> - -The subdirectories pertain to the Resource Management Layer (RML). - -<H2>Directories</H2> - -<DL> -<DT><P><A HREF="include/index.html">include/</A> -<DD>Include files used by clients of RML.</P> -<DT><P><A HREF="client/index.html">client/</A> -<DD>Source files for code that must be statically linked with a client.</P> -<DT><P><A HREF="server/index.html">server/</A> -<DD>Source files for the RML server.</P> -<DT><P><A HREF="test">test/</A> -<DD>Unit tests for RML server and its components.</P> -</DL> - -<HR> -<A HREF="../index.html">Up to parent directory</A> -<p></p> -Copyright © 2005-2010 Intel Corporation. All Rights Reserved. -<p></p> -Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are -registered trademarks or trademarks of Intel Corporation or its -subsidiaries in the United States and other countries. -<p></p> -* Other names and brands may be claimed as the property of others. -</BODY> -</HTML> - diff --git a/deal.II/bundled/tbb30_104oss/src/rml/perfor/omp_nested.cpp b/deal.II/bundled/tbb30_104oss/src/rml/perfor/omp_nested.cpp deleted file mode 100644 index d72801b9d7..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/rml/perfor/omp_nested.cpp +++ /dev/null @@ -1,152 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#include <cstddef> -#include <cstdlib> -#include <cstdio> -#include <float.h> -#include <math.h> -#include <time.h> - -#include <omp.h> -#include <assert.h> - -#include "thread_level.h" - -#if _WIN32||_WIN64 -#include <Windows.h> /* Need Sleep */ -#else -#include <unistd.h> /* Need usleep */ -#endif - -void MilliSleep( unsigned milliseconds ) { -#if _WIN32||_WIN64 - Sleep( milliseconds ); -#else - usleep( milliseconds*1000 ); -#endif /* _WIN32||_WIN64 */ -} - -// Algorithm parameters -const int Max_OMP_Outer_Threads = 8; - -// Global variables -int max_outer_threads = Max_OMP_Outer_Threads; - -// Print help on command-line arguments -void help_message(char *prog_name) { - fprintf(stderr, "\n%s usage:\n", prog_name); - fprintf(stderr, - " Parameters:\n" - " -o<num> : max # of threads OMP should use at outer level\n" - "\n Help:\n" - " -h : print this help message\n"); -} - -// Process command-line arguments -void process_args(int argc, char *argv[], int *max_outer_t) { - (*max_outer_t) = omp_get_max_threads(); - for (int i=1; i<argc; ++i) { - if (argv[i][0] == '-') { - switch (argv[i][1]) { - case 'o': // set max_outer_threads - if (sscanf(&argv[i][2], "%d", max_outer_t) != 1 || *max_outer_t < 1) { - fprintf(stderr, "%s Warning: argument of -o option unacceptable: %s\n", argv[0], &argv[i][2]); - help_message(argv[0]); - } - break; - case 'h': // print help message - help_message(argv[0]); - exit(0); - break; - default: - fprintf(stderr, "%s: Warning: command-line option ignored: %s\n", argv[0], argv[i]); - help_message(argv[0]); - break; - } - } else { - fprintf(stderr, "%s: Warning: command-line option ignored: %s\n", argv[0], argv[i]); - help_message(argv[0]); - } - } -} - -int main(int argc, char *argv[]) { - process_args(argc, argv, &max_outer_threads); -#ifdef LOG_THREADS - TotalThreadLevel.init(); -#endif - - double start, end; - start = omp_get_wtime( ); - -#pragma omp parallel num_threads(max_outer_threads) - { - int omp_thread = omp_get_thread_num(); -#ifdef LOG_THREADS - if (omp_thread == 0) - TotalThreadLevel.change_level(omp_get_num_threads(), omp_outer); -#endif - if (omp_thread == 0) { - MilliSleep(3000); -#ifdef LOG_THREADS - TotalThreadLevel.change_level(-1, omp_outer); -#endif -#pragma omp parallel - { - int my_omp_thread = omp_get_thread_num(); -#ifdef LOG_THREADS - if (my_omp_thread == 0) - TotalThreadLevel.change_level(omp_get_num_threads(), omp_inner); -#endif - printf("Inner thread %d nested inside outer thread %d\n", my_omp_thread, omp_thread); -#ifdef LOG_THREADS - if (my_omp_thread == 0) - TotalThreadLevel.change_level(-omp_get_num_threads(), omp_inner); -#endif - } -#ifdef LOG_THREADS - TotalThreadLevel.change_level(1, omp_outer); -#endif - } - else { - MilliSleep(6000); - } -#ifdef LOG_THREADS - if (omp_thread == 0) - TotalThreadLevel.change_level(-omp_get_num_threads(), omp_outer); -#endif - } - end = omp_get_wtime( ); - printf("Simple test of nested OMP (%d outer threads max) took: %6.6f\n", - max_outer_threads, end-start); -#ifdef LOG_THREADS - TotalThreadLevel.dump(); -#endif - return 0; -} diff --git a/deal.II/bundled/tbb30_104oss/src/rml/perfor/omp_simple.cpp b/deal.II/bundled/tbb30_104oss/src/rml/perfor/omp_simple.cpp deleted file mode 100644 index ddfcef8e79..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/rml/perfor/omp_simple.cpp +++ /dev/null @@ -1,168 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#include <cstddef> -#include <cstdlib> -#include <cstdio> -#include <float.h> -#include <math.h> -#include <time.h> - -#include <omp.h> -#include <assert.h> - -#include "thread_level.h" - -#include "tbb/task.h" -#include "tbb/task_scheduler_init.h" -#include "tbb/parallel_for.h" -#include "tbb/blocked_range.h" - -#if _WIN32||_WIN64 -#include <Windows.h> /* Need Sleep */ -#else -#include <unistd.h> /* Need usleep */ -#endif - -void MilliSleep( unsigned milliseconds ) { -#if _WIN32||_WIN64 - Sleep( milliseconds ); -#else - usleep( milliseconds*1000 ); -#endif /* _WIN32||_WIN64 */ -} - -using namespace std; -using namespace tbb; - -// Algorithm parameters -const int Max_TBB_Threads = 16; -const int Max_OMP_Threads = 16; - -// Global variables -int max_tbb_threads = Max_TBB_Threads; -int max_omp_threads = Max_OMP_Threads; - -// Print help on command-line arguments -void help_message(char *prog_name) { - fprintf(stderr, "\n%s usage:\n", prog_name); - fprintf(stderr, - " Parameters:\n" - " -t<num> : max # of threads TBB should use\n" - " -o<num> : max # of threads OMP should use\n" - "\n Help:\n" - " -h : print this help message\n"); -} - -// Process command-line arguments -void process_args(int argc, char *argv[], int *max_tbb_t, int *max_omp_t) { - for (int i=1; i<argc; ++i) { - if (argv[i][0] == '-') { - switch (argv[i][1]) { - case 't': // set max_tbb_threads - if (sscanf(&argv[i][2], "%d", max_tbb_t) != 1 || *max_tbb_t < 1) { - fprintf(stderr, "%s Warning: argument of -t option unacceptable: %s\n", argv[0], &argv[i][2]); - help_message(argv[0]); - } - break; - case 'o': // set max_omp_threads - if (sscanf(&argv[i][2], "%d", max_omp_t) != 1 || *max_omp_t < 1) { - fprintf(stderr, "%s Warning: argument of -o option unacceptable: %s\n", argv[0], &argv[i][2]); - help_message(argv[0]); - } - break; - case 'h': // print help message - help_message(argv[0]); - exit(0); - break; - default: - fprintf(stderr, "%s: Warning: command-line option ignored: %s\n", argv[0], argv[i]); - help_message(argv[0]); - break; - } - } else { - fprintf(stderr, "%s: Warning: command-line option ignored: %s\n", argv[0], argv[i]); - help_message(argv[0]); - } - } -} - -int main(int argc, char *argv[]) { - process_args(argc, argv, &max_tbb_threads, &max_omp_threads); - TotalThreadLevel.init(); - - double start, end; - start = omp_get_wtime(); - -#pragma omp parallel num_threads(max_omp_threads) - { - int omp_thread = omp_get_thread_num(); -#ifdef LOG_THREADS - if (omp_thread == 0) - TotalThreadLevel.change_level(omp_get_num_threads(), omp_outer); -#endif - task_scheduler_init phase(max_tbb_threads); - if (omp_thread == 0) { - MilliSleep(3000); -#ifdef LOG_THREADS - TotalThreadLevel.change_level(-1, omp_outer); -#endif - parallel_for(blocked_range<size_t>(0, 1000), - [=](const blocked_range<size_t>& range) { -#ifdef LOG_THREADS - TotalThreadLevel.change_level(1, tbb_inner); -#endif -#pragma ivdep - for (size_t i=range.begin(); i!=range.end(); ++i) { - if (i==range.begin()) - printf("TBB range starting at %d on OMP thread %d\n", (int)i, omp_thread); - } -#ifdef LOG_THREADS - TotalThreadLevel.change_level(-1, tbb_inner); -#endif - }, auto_partitioner()); -#ifdef LOG_THREADS - TotalThreadLevel.change_level(1, omp_outer); -#endif - } - else { - MilliSleep(6000); - } -#ifdef LOG_THREADS - if (omp_thread == 0) - TotalThreadLevel.change_level(-omp_get_num_threads(), omp_outer); -#endif - } - end = omp_get_wtime(); - printf("Simple test of OMP (%d threads max) with TBB (%d threads max) inside took: %6.6f\n", - max_omp_threads, max_tbb_threads, end-start); -#ifdef LOG_THREADS - TotalThreadLevel.dump(); -#endif - return 0; -} diff --git a/deal.II/bundled/tbb30_104oss/src/rml/perfor/tbb_multi_omp.cpp b/deal.II/bundled/tbb30_104oss/src/rml/perfor/tbb_multi_omp.cpp deleted file mode 100644 index 9f4442b8ed..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/rml/perfor/tbb_multi_omp.cpp +++ /dev/null @@ -1,194 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#include <cstddef> -#include <cstdlib> -#include <cstdio> -#include <float.h> -#include <math.h> -#include <time.h> - -#include <omp.h> -#include <assert.h> - -#include "thread_level.h" - -#include "tbb/task.h" -#include "tbb/tick_count.h" -#include "tbb/task_scheduler_init.h" -#include "tbb/scalable_allocator.h" - -#if _WIN32||_WIN64 -#include <Windows.h> /* Need Sleep */ -#else -#include <unistd.h> /* Need usleep */ -#endif - -void MilliSleep( unsigned milliseconds ) { -#if _WIN32||_WIN64 - Sleep( milliseconds ); -#else - usleep( milliseconds*1000 ); -#endif /* _WIN32||_WIN64 */ -} - -using namespace std; -using namespace tbb; - -// Algorithm parameters -const int Max_TBB_Threads = 16; -const int Max_OMP_Threads = 16; - -// Global variables -int max_tbb_threads = Max_TBB_Threads; -int max_omp_threads = Max_OMP_Threads; - -// Print help on command-line arguments -void help_message(char *prog_name) { - fprintf(stderr, "\n%s usage:\n", prog_name); - fprintf(stderr, - " Parameters:\n" - " -t<num> : max # of threads TBB should use\n" - " -o<num> : max # of threads OMP should use\n" - "\n Help:\n" - " -h : print this help message\n"); -} - -// Process command-line arguments -void process_args(int argc, char *argv[], int *max_tbb_t, int *max_omp_t) { - for (int i=1; i<argc; ++i) { - if (argv[i][0] == '-') { - switch (argv[i][1]) { - case 't': // set max_tbb_threads - if (sscanf(&argv[i][2], "%d", max_tbb_t) != 1 || *max_tbb_t < 1) { - fprintf(stderr, "%s Warning: argument of -t option unacceptable: %s\n", argv[0], &argv[i][2]); - help_message(argv[0]); - } - break; - case 'o': // set max_omp_threads - if (sscanf(&argv[i][2], "%d", max_omp_t) != 1 || *max_omp_t < 1) { - fprintf(stderr, "%s Warning: argument of -o option unacceptable: %s\n", argv[0], &argv[i][2]); - help_message(argv[0]); - } - break; - case 'h': // print help message - help_message(argv[0]); - exit(0); - break; - default: - fprintf(stderr, "%s: Warning: command-line option ignored: %s\n", argv[0], argv[i]); - help_message(argv[0]); - break; - } - } else { - fprintf(stderr, "%s: Warning: command-line option ignored: %s\n", argv[0], argv[i]); - help_message(argv[0]); - } - } -} - -class SimpleTask : public task { - bool isLeaf; - int myId; -public: - SimpleTask(bool isLeaf_, int myId_) : isLeaf(isLeaf_), myId(myId_) {} - task* execute() { -#ifdef LOG_THREADS - TotalThreadLevel.change_level(1, tbb_outer); -#endif - omp_set_num_threads(max_omp_threads); - if (!isLeaf) { - set_ref_count(65); - for (int i=0; i<64; ++i) { - SimpleTask& st = *new(allocate_child()) SimpleTask(true, i); - spawn(st); - } -#ifdef LOG_THREADS - TotalThreadLevel.change_level(-1, tbb_outer); -#endif - wait_for_all(); -#ifdef LOG_THREADS - TotalThreadLevel.change_level(1, tbb_outer); -#endif - } - else { - if (myId%2 == 0) { - MilliSleep(3000); -#pragma omp parallel - { -#ifdef LOG_THREADS - if (omp_get_thread_num() == 0) - TotalThreadLevel.change_level(omp_get_num_threads()-1, omp_inner); -#endif - //printf("In OMP parallel region on TBB task with myId=0: thread %d of %d\n", omp_get_thread_num(), omp_get_num_threads()); -#ifdef LOG_THREADS - if (omp_get_thread_num() == 0) - TotalThreadLevel.change_level(-(omp_get_num_threads()-1), omp_inner); -#endif - } - } - else { - MilliSleep(6000); - } - } -#ifdef LOG_THREADS - TotalThreadLevel.change_level(-1, tbb_outer); -#endif - return NULL; - } -}; - - -int main(int argc, char *argv[]) { -#ifdef LOG_THREADS - TotalThreadLevel.init(); - TotalThreadLevel.change_level(1, tbb_outer); -#endif - process_args(argc, argv, &max_tbb_threads, &max_omp_threads); - - task_scheduler_init phase(max_tbb_threads); - tick_count start, end; - start = tick_count::now(); - SimpleTask& st = *new(task::allocate_root()) SimpleTask(false, -1); -#ifdef LOG_THREADS - TotalThreadLevel.change_level(-1, tbb_outer); -#endif - task::spawn_root_and_wait(st); -#ifdef LOG_THREADS - TotalThreadLevel.change_level(1, tbb_outer); -#endif - end = tick_count::now(); - printf("Simple Test of TBB (%d threads max) with OMP (%d threads max) inside took: %6.6f\n", - max_tbb_threads, max_omp_threads, (end-start).seconds()); - -#ifdef LOG_THREADS - TotalThreadLevel.change_level(-1, tbb_outer); - TotalThreadLevel.dump(); -#endif - return 0; -} diff --git a/deal.II/bundled/tbb30_104oss/src/rml/perfor/tbb_simple.cpp b/deal.II/bundled/tbb30_104oss/src/rml/perfor/tbb_simple.cpp deleted file mode 100644 index 0b01fa9867..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/rml/perfor/tbb_simple.cpp +++ /dev/null @@ -1,199 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#include <cstddef> -#include <cstdlib> -#include <cstdio> -#include <float.h> -#include <math.h> -#include <time.h> - -#include <omp.h> -#include <assert.h> - -#include "thread_level.h" - -#include "tbb/task.h" -#include "tbb/tick_count.h" -#include "tbb/task_scheduler_init.h" - -#if _WIN32||_WIN64 -#include <Windows.h> /* Need Sleep */ -#else -#include <unistd.h> /* Need usleep */ -#endif - -void MilliSleep( unsigned milliseconds ) { -#if _WIN32||_WIN64 - Sleep( milliseconds ); -#else - usleep( milliseconds*1000 ); -#endif /* _WIN32||_WIN64 */ -} - -using namespace std; -using namespace tbb; - -// Algorithm parameters -const int Max_TBB_Threads = 16; -const int Max_OMP_Threads = 16; - -// Global variables -int max_tbb_threads = Max_TBB_Threads; -int max_omp_threads = Max_OMP_Threads; - -// Print help on command-line arguments -void help_message(char *prog_name) { - fprintf(stderr, "\n%s usage:\n", prog_name); - fprintf(stderr, - " Parameters:\n" - " -t<num> : max # of threads TBB should use\n" - " -o<num> : max # of threads OMP should use\n" - "\n Help:\n" - " -h : print this help message\n"); -} - -// Process command-line arguments -void process_args(int argc, char *argv[], int *max_tbb_t, int *max_omp_t) { - for (int i=1; i<argc; ++i) { - if (argv[i][0] == '-') { - switch (argv[i][1]) { - case 't': // set max_tbb_threads - if (sscanf(&argv[i][2], "%d", max_tbb_t) != 1 || *max_tbb_t < 1) { - fprintf(stderr, "%s Warning: argument of -t option unacceptable: %s\n", argv[0], &argv[i][2]); - help_message(argv[0]); - } - break; - case 'o': // set max_omp_threads - if (sscanf(&argv[i][2], "%d", max_omp_t) != 1 || *max_omp_t < 1) { - fprintf(stderr, "%s Warning: argument of -o option unacceptable: %s\n", argv[0], &argv[i][2]); - help_message(argv[0]); - } - break; - case 'h': // print help message - help_message(argv[0]); - exit(0); - break; - default: - fprintf(stderr, "%s: Warning: command-line option ignored: %s\n", argv[0], argv[i]); - help_message(argv[0]); - break; - } - } else { - fprintf(stderr, "%s: Warning: command-line option ignored: %s\n", argv[0], argv[i]); - help_message(argv[0]); - } - } -} - -class SimpleTask : public task { - bool isLeaf; - int myId; -public: - SimpleTask(bool isLeaf_, int myId_) : isLeaf(isLeaf_), myId(myId_) {} - task* execute() { -#ifdef LOG_THREADS - TotalThreadLevel.change_level(1, tbb_outer); -#endif - omp_set_num_threads(max_omp_threads); - if (!isLeaf) { - set_ref_count(17); - for (int i=0; i<16; ++i) { - SimpleTask& st = *new(allocate_child()) SimpleTask(true, i); - spawn(st); - } -#ifdef LOG_THREADS - TotalThreadLevel.change_level(-1, tbb_outer); -#endif - wait_for_all(); -#ifdef LOG_THREADS - TotalThreadLevel.change_level(1, tbb_outer); -#endif - } - else { - if (myId == 0) { - MilliSleep(3000); -#ifdef LOG_THREADS - TotalThreadLevel.change_level(-1, tbb_outer); -#endif -#pragma omp parallel - { -#ifdef LOG_THREADS - if (omp_get_thread_num() == 0) - TotalThreadLevel.change_level(omp_get_num_threads(), omp_inner); -#endif - printf("In OMP parallel region on TBB task with myId=0: thread %d of %d\n", - omp_get_thread_num(), omp_get_num_threads()); -#ifdef LOG_THREADS - if (omp_get_thread_num() == 0) - TotalThreadLevel.change_level(-omp_get_num_threads(), omp_inner); -#endif - } -#ifdef LOG_THREADS - TotalThreadLevel.change_level(1, tbb_outer); -#endif - } - else { - MilliSleep(6000); - } - } -#ifdef LOG_THREADS - TotalThreadLevel.change_level(-1, tbb_outer); -#endif - return NULL; - } -}; - - -int main(int argc, char *argv[]) { -#ifdef LOG_THREADS - TotalThreadLevel.init(); - TotalThreadLevel.change_level(1, tbb_outer); -#endif - process_args(argc, argv, &max_tbb_threads, &max_omp_threads); - - task_scheduler_init phase(max_tbb_threads); - tick_count start, end; - start = tick_count::now(); - SimpleTask& st = *new(task::allocate_root()) SimpleTask(false, -1); -#ifdef LOG_THREADS - TotalThreadLevel.change_level(-1, tbb_outer); -#endif - task::spawn_root_and_wait(st); -#ifdef LOG_THREADS - TotalThreadLevel.change_level(1, tbb_outer); -#endif - end = tick_count::now(); - printf("Simple Test of TBB (%d threads max) with OMP (%d threads max) inside took: %6.6f\n", - max_tbb_threads, max_omp_threads, (end-start).seconds()); -#ifdef LOG_THREADS - TotalThreadLevel.change_level(-1, tbb_outer); - TotalThreadLevel.dump(); -#endif - return 0; -} diff --git a/deal.II/bundled/tbb30_104oss/src/rml/perfor/thread_level.h b/deal.II/bundled/tbb30_104oss/src/rml/perfor/thread_level.h deleted file mode 100644 index 339b72ea24..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/rml/perfor/thread_level.h +++ /dev/null @@ -1,142 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -// Thread level recorder -#ifndef __THREAD_LEVEL_H -#define __THREAD_LEVEL_H -#include <cstdio> -#include <omp.h> -#include <assert.h> -#include "tbb/atomic.h" -#include "tbb/tick_count.h" - -//#define LOG_THREADS // use this to ifdef out calls to this class -//#define NO_BAIL_OUT // continue execution after detecting oversubscription - -using namespace tbb; - -typedef enum {tbb_outer, tbb_inner, omp_outer, omp_inner} client_t; - -class ThreadLevelRecorder { - tbb::atomic<int> tbb_outer_level; - tbb::atomic<int> tbb_inner_level; - tbb::atomic<int> omp_outer_level; - tbb::atomic<int> omp_inner_level; - struct record { - tbb::tick_count time; - int n_tbb_outer_thread; - int n_tbb_inner_thread; - int n_omp_outer_thread; - int n_omp_inner_thread; - }; - tbb::atomic<unsigned> next; - /** Must be power of two */ - static const unsigned max_record_count = 1<<20; - record array[max_record_count]; - int max_threads; - bool fail; - public: - void change_level(int delta, client_t whichClient); - void dump(); - void init(); -}; - -void ThreadLevelRecorder::change_level(int delta, client_t whichClient) { - int tox=tbb_outer_level, tix=tbb_inner_level, oox=omp_outer_level, oix=omp_inner_level; - if (whichClient == tbb_outer) { - tox = tbb_outer_level+=delta; - } else if (whichClient == tbb_inner) { - tix = tbb_inner_level+=delta; - } else if (whichClient == omp_outer) { - oox = omp_outer_level+=delta; - } else if (whichClient == omp_inner) { - oix = omp_inner_level+=delta; - } else { - printf("WARNING: Bad client type; ignoring.\n"); - return; - } - // log non-negative entries - tbb::tick_count t = tbb::tick_count::now(); - unsigned k = next++; - if (k<max_record_count) { - record& r = array[k]; - r.time = t; - r.n_tbb_outer_thread = tox>=0?tox:0; - r.n_omp_outer_thread = oox>=0?oox:0; - r.n_tbb_inner_thread = tix>=0?tix:0; - r.n_omp_inner_thread = oix>=0?oix:0; - } - char errStr[100]; - int tot_threads; - tot_threads = tox+tix+oox+oix; - sprintf(errStr, "ERROR: Number of threads (%d+%d+%d+%d=%d) in use exceeds maximum (%d).\n", - tox, tix, oox, oix, tot_threads, max_threads); - if (tot_threads > max_threads) { -#ifdef NO_BAIL_OUT - if (!fail) { - printf("%sContinuing...\n", errStr); - fail = true; - } -#else - dump(); - printf("%s\n", errStr); - assert(tot_threads <= max_threads); -#endif - } -} - -void ThreadLevelRecorder::dump() { - FILE* f = fopen("time.txt","w"); - if (!f) { - perror("fopen(time.txt)\n"); - exit(1); - } - unsigned limit = next; - if (limit>max_record_count) { // Clip - limit = max_record_count; - } - for (unsigned i=0; i<limit; ++i) { - fprintf(f,"%f\t%d\t%d\t%d\t%d\n",(array[i].time-array[0].time).seconds(), array[i].n_tbb_outer_thread, - array[i].n_tbb_inner_thread, array[i].n_omp_outer_thread, array[i].n_omp_inner_thread); - } - fclose(f); - int tox=tbb_outer_level, tix=tbb_inner_level, oox=omp_outer_level, oix=omp_inner_level; - int tot_threads; - tot_threads = tox+tix+oox+oix; - if (!fail) printf("INFO: Passed.\n"); - else printf("INFO: Failed.\n"); -} - -void ThreadLevelRecorder::init() { - fail = false; - max_threads = omp_get_max_threads(); - printf("INFO: Getting maximum hardware threads... %d.\n", max_threads); -} - -ThreadLevelRecorder TotalThreadLevel; -#endif diff --git a/deal.II/bundled/tbb30_104oss/src/rml/server/index.html b/deal.II/bundled/tbb30_104oss/src/rml/server/index.html deleted file mode 100644 index 238b16645c..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/rml/server/index.html +++ /dev/null @@ -1,19 +0,0 @@ -<HTML> -<BODY> -<H2>Overview</H2> - -This directory has source code internal to the server. - -<HR> -<A HREF="../index.html">Up to parent directory</A> -<p></p> -Copyright © 2005-2010 Intel Corporation. All Rights Reserved. -<p></p> -Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are -registered trademarks or trademarks of Intel Corporation or its -subsidiaries in the United States and other countries. -<p></p> -* Other names and brands may be claimed as the property of others. -</BODY> -</HTML> - diff --git a/deal.II/bundled/tbb30_104oss/src/rml/server/irml.rc b/deal.II/bundled/tbb30_104oss/src/rml/server/irml.rc deleted file mode 100644 index 267c2f2c7b..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/rml/server/irml.rc +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright 2005-2010 Intel Corporation. All Rights Reserved. -// -// This file is part of Threading Building Blocks. -// -// Threading Building Blocks is free software; you can redistribute it -// and/or modify it under the terms of the GNU General Public License -// version 2 as published by the Free Software Foundation. -// -// Threading Building Blocks is distributed in the hope that it will be -// useful, but WITHOUT ANY WARRANTY; without even the implied warranty -// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with Threading Building Blocks; if not, write to the Free Software -// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -// -// As a special exception, you may use this file as part of a free software -// library without restriction. Specifically, if other files instantiate -// templates or use macros or inline functions from this file, or you compile -// this file and link it with other files to produce an executable, this -// file does not by itself cause the resulting executable to be covered by -// the GNU General Public License. This exception does not however -// invalidate any other reasons why the executable file might be covered by -// the GNU General Public License. - -// Microsoft Visual C++ generated resource script. -// -#ifdef APSTUDIO_INVOKED -#ifndef APSTUDIO_READONLY_SYMBOLS -#define _APS_NO_MFC 1 -#define _APS_NEXT_RESOURCE_VALUE 102 -#define _APS_NEXT_COMMAND_VALUE 40001 -#define _APS_NEXT_CONTROL_VALUE 1001 -#define _APS_NEXT_SYMED_VALUE 101 -#endif -#endif - -#define APSTUDIO_READONLY_SYMBOLS -///////////////////////////////////////////////////////////////////////////// -// -// Generated from the TEXTINCLUDE 2 resource. -// -#include <winresrc.h> -#define ENDL "\r\n" -#include "tbb/tbb_version.h" - -///////////////////////////////////////////////////////////////////////////// -#undef APSTUDIO_READONLY_SYMBOLS - -///////////////////////////////////////////////////////////////////////////// -// Neutral resources - -#if !defined(AFX_RESOURCE_DLL) || defined(AFX_TARG_NEU) -#ifdef _WIN32 -LANGUAGE LANG_NEUTRAL, SUBLANG_NEUTRAL -#pragma code_page(1252) -#endif //_WIN32 - -///////////////////////////////////////////////////////////////////////////// -// manifest integration -#ifdef TBB_MANIFEST -#include "winuser.h" -2 RT_MANIFEST tbbmanifest.exe.manifest -#endif - -///////////////////////////////////////////////////////////////////////////// -// -// Version -// - -VS_VERSION_INFO VERSIONINFO - FILEVERSION TBB_VERNUMBERS - PRODUCTVERSION TBB_VERNUMBERS - FILEFLAGSMASK 0x17L -#ifdef _DEBUG - FILEFLAGS 0x1L -#else - FILEFLAGS 0x0L -#endif - FILEOS 0x40004L - FILETYPE 0x2L - FILESUBTYPE 0x0L -BEGIN - BLOCK "StringFileInfo" - BEGIN - BLOCK "000004b0" - BEGIN - VALUE "CompanyName", "Intel Corporation\0" - VALUE "FileDescription", "Threading Building Blocks resource manager library\0" - VALUE "FileVersion", TBB_VERSION "\0" -//what is it? VALUE "InternalName", "irml\0" - VALUE "LegalCopyright", "Copyright 2005-2010 Intel Corporation. All Rights Reserved.\0" - VALUE "LegalTrademarks", "\0" -#ifndef TBB_USE_DEBUG - VALUE "OriginalFilename", "irml.dll\0" -#else - VALUE "OriginalFilename", "irml_debug.dll\0" -#endif - VALUE "ProductName", "Intel(R) Threading Building Blocks for Windows\0" - VALUE "ProductVersion", TBB_VERSION "\0" - VALUE "Comments", TBB_VERSION_STRINGS "\0" - VALUE "PrivateBuild", "\0" - VALUE "SpecialBuild", "\0" - END - END - BLOCK "VarFileInfo" - BEGIN - VALUE "Translation", 0x0, 1200 - END -END - -#endif // Neutral resources -///////////////////////////////////////////////////////////////////////////// - - -#ifndef APSTUDIO_INVOKED -///////////////////////////////////////////////////////////////////////////// -// -// Generated from the TEXTINCLUDE 3 resource. -// - - -///////////////////////////////////////////////////////////////////////////// -#endif // not APSTUDIO_INVOKED - diff --git a/deal.II/bundled/tbb30_104oss/src/rml/server/job_automaton.h b/deal.II/bundled/tbb30_104oss/src/rml/server/job_automaton.h deleted file mode 100644 index 514418e207..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/rml/server/job_automaton.h +++ /dev/null @@ -1,153 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __RML_job_automaton_H -#define __RML_job_automaton_H - -#include "rml_base.h" -#include "tbb/atomic.h" - -#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) - // Workaround for overzealous compiler warnings - #pragma warning (push) - #pragma warning (disable: 4244) -#endif - -namespace rml { - -namespace internal { - -//! Finite state machine. -/** /--------------\ - / V - 0 --> 1--> ptr --> -1 - ^ - | - | - V - ptr|1 - -"owner" = corresponding server_thread. -Odd states (except -1) indicate that someone is executing code on the job. -Most transitions driven only by owner. -Transition 0-->-1 is driven by non-owner. -Transition ptr->-1 is driven by owner or non-owner. -*/ -class job_automaton: no_copy { -private: - tbb::atomic<intptr_t> my_job; -public: - /** Created by non-owner */ - job_automaton() { - my_job = 0; - } - - ~job_automaton() { - __TBB_ASSERT( my_job==-1, "must plug before destroying" ); - } - - //! Try to transition 0-->1 or ptr-->ptr|1. - /** Should only be called by owner. */ - bool try_acquire() { - intptr_t snapshot = my_job; - if( snapshot==-1 ) { - return false; - } else { - __TBB_ASSERT( (snapshot&1)==0, "already marked that way" ); - intptr_t old = my_job.compare_and_swap( snapshot|1, snapshot ); - __TBB_ASSERT( old==snapshot || old==-1, "unexpected interference" ); - return old==snapshot; - } - } - //! Transition ptr|1-->ptr - /** Should only be called by owner. */ - void release() { - intptr_t snapshot = my_job; - __TBB_ASSERT( snapshot&1, NULL ); - // Atomic store suffices here. - my_job = snapshot&~1; - } - - //! Transition 1-->ptr - /** Should only be called by owner. */ - void set_and_release( rml::job& job ) { - intptr_t value = reinterpret_cast<intptr_t>(&job); - __TBB_ASSERT( (value&1)==0, "job misaligned" ); - __TBB_ASSERT( value!=0, "null job" ); - __TBB_ASSERT( my_job==1, "already set, or not marked busy?" ); - // Atomic store suffices here. - my_job = value; - } - - //! Transition 0-->-1 - /** If successful, return true. called by non-owner (for TBB and the likes) */ - bool try_plug_null() { - return my_job.compare_and_swap( -1, 0 )==0; - } - - //! Try to transition to -1. If successful, set j to contents and return true. - /** Called by owner or non-owner. (for OpenMP and the likes) */ - bool try_plug( rml::job*&j ) { - for(;;) { - intptr_t snapshot = my_job; - if( snapshot&1 ) { - j = NULL; - return false; - } - // Not busy - if( my_job.compare_and_swap( -1, snapshot )==snapshot ) { - j = reinterpret_cast<rml::job*>(snapshot); - return true; - } - // Need to retry, because current thread may be non-owner that read a 0, and owner might have - // caused transition 0->1->ptr after we took our snapshot. - } - } - - /** Called by non-owner to wait for transition to ptr. */ - rml::job& wait_for_job() const { - intptr_t snapshot; - for(;;) { - snapshot = my_job; - if( snapshot&~1 ) break; - __TBB_Yield(); - } - __TBB_ASSERT( snapshot!=-1, "wait on plugged job_automaton" ); - return *reinterpret_cast<rml::job*>(snapshot&~1); - } -}; - -} // namespace internal -} // namespace rml - - -#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) - #pragma warning (pop) -#endif // warning 4244 are back - -#endif /* __RML_job_automaton_H */ diff --git a/deal.II/bundled/tbb30_104oss/src/rml/server/lin-rml-export.def b/deal.II/bundled/tbb30_104oss/src/rml/server/lin-rml-export.def deleted file mode 100644 index 868b65a9a9..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/rml/server/lin-rml-export.def +++ /dev/null @@ -1,38 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -{ -global: -__RML_open_factory; -__RML_close_factory; -__TBB_make_rml_server; -__KMP_make_rml_server; -__TBB_call_with_my_server_info; -__KMP_call_with_my_server_info; -local:*; -}; diff --git a/deal.II/bundled/tbb30_104oss/src/rml/server/rml_server.cpp b/deal.II/bundled/tbb30_104oss/src/rml/server/rml_server.cpp deleted file mode 100644 index 774b6ebd80..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/rml/server/rml_server.cpp +++ /dev/null @@ -1,3330 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#include "rml_tbb.h" -#define private public /* Sleazy trick to avoid publishing internal names in public header. */ -#include "rml_omp.h" -#undef private - -#include "tbb/tbb_allocator.h" -#include "tbb/cache_aligned_allocator.h" -#include "tbb/aligned_space.h" -#include "tbb/atomic.h" -#include "tbb/spin_mutex.h" -#include "tbb/tbb_misc.h" // Get DetectNumberOfWorkers() from here. -#if _MSC_VER==1500 && !defined(__INTEL_COMPILER) -// VS2008/VC9 seems to have an issue; -#pragma warning( push ) -#pragma warning( disable: 4985 ) -#endif -#include "tbb/concurrent_vector.h" -#if _MSC_VER==1500 && !defined(__INTEL_COMPILER) -#pragma warning( pop ) -#endif -#if _MSC_VER && defined(_Wp64) -// Workaround for overzealous compiler warnings -#pragma warning (push) -#pragma warning (disable: 4244) -#endif - -#include "job_automaton.h" -#include "wait_counter.h" -#include "thread_monitor.h" - -#if RML_USE_WCRM -#include <concrt.h> -#include <concrtrm.h> -using namespace Concurrency; -#include <vector> -#include <hash_map> -#define __RML_REMOVE_VIRTUAL_PROCESSORS_DISABLED 0 -#endif /* RML_USE_WCRM */ - -#define STRINGIFY(x) #x -#define TOSTRING(x) STRINGIFY(x) - -namespace rml { -namespace internal { - -//! Number of hardware contexts -static inline unsigned hardware_concurrency() { - static unsigned DefaultNumberOfThreads = 0; - unsigned n = DefaultNumberOfThreads; - if( !n ) DefaultNumberOfThreads = n = tbb::internal::DetectNumberOfWorkers(); - return n; -} - -using tbb::internal::rml::tbb_client; -using tbb::internal::rml::tbb_server; - -using __kmp::rml::omp_client; -using __kmp::rml::omp_server; - -typedef versioned_object::version_type version_type; - -#define SERVER_VERSION 2 -#define EARLIEST_COMPATIBLE_CLIENT_VERSION 2 - -static const size_t cache_line_size = tbb::internal::NFS_MaxLineSize; - -template<typename Server, typename Client> class generic_connection; -class tbb_connection_v2; -class omp_connection_v2; - -#if RML_USE_WCRM -//! State of a server_thread -/** Below are diagrams of legal state transitions. - - ts_busy - ^ ^ - / \ - / V - ts_done <----- ts_asleep <------> ts_idle -*/ - -enum thread_state_t { - ts_idle, - ts_asleep, - ts_busy, - ts_done -}; - -//! Extra state of an omp server thread -enum thread_extra_state_t { - ts_none, - ts_removed, - ts_lent -}; - -//! Results from try_grab_for() -enum thread_grab_t { - wk_failed, - wk_from_asleep, - wk_from_idle -}; - -#else /* !RML_USE_WCRM */ - -//! State of a server_thread -/** Below are diagrams of legal state transitions. - - OMP - ts_omp_busy - ^ ^ - / \ - / V - ts_asleep <-----------> ts_idle - - - ts_deactivated - ^ ^ - / \ - V \ - ts_none <--------------> ts_reactivated - - TBB - ts_tbb_busy - ^ ^ - / \ - / V - ts_asleep <-----------> ts_idle --> ts_done - - For TBB only. Extra state transition. - - ts_created -> ts_started -> ts_visited - */ -enum thread_state_t { - //! Thread not doing anything useful, but running and looking for work. - ts_idle, - //! Thread not doing anything useful and is asleep */ - ts_asleep, - //! Thread is enlisted into OpenMP team - ts_omp_busy, - //! Thread is busy doing TBB work. - ts_tbb_busy, - //! For tbb threads only - ts_done, - ts_created, - ts_started, - ts_visited, - //! For omp threads only - ts_none, - ts_deactivated, - ts_reactivated -}; -#endif /* RML_USE_WCRM */ - -#if TBB_USE_ASSERT -#define PRODUCE_ARG(x) ,x -#else -#define PRODUCE_ARG(x) -#endif /* TBB_USE_ASSERT */ - -//! Synchronizes dispatch of OpenMP work. -class omp_dispatch_type { - typedef ::rml::job job_type; - omp_client* client; - void* cookie; - omp_client::size_type index; - tbb::atomic<job_type*> job; -#if TBB_USE_ASSERT - omp_connection_v2* server; -#endif /* TBB_USE_ASSERT */ -public: - omp_dispatch_type() {job=NULL;} - void consume(); - void produce( omp_client& c, job_type& j, void* cookie_, omp_client::size_type index_ PRODUCE_ARG( omp_connection_v2& s )) { - __TBB_ASSERT( &j, NULL ); - __TBB_ASSERT( !job, "job already set" ); - client = &c; -#if TBB_USE_ASSERT - server = &s; -#endif /* TBB_USE_ASSERT */ - cookie = cookie_; - index = index_; - // Must be last - job = &j; - } -}; - -//! A reference count. -/** No default constructor, because users of ref_count must be very careful about whether the - initial reference count is 0 or 1. */ -class ref_count: no_copy { - friend class thread_map; - tbb::atomic<int> my_ref_count; -public: - ref_count(int k ) {my_ref_count=k;} - ~ref_count() {__TBB_ASSERT( !my_ref_count, "premature destruction of refcounted object" );} - //! Add one and return new value. - int add_ref() { - int k = ++my_ref_count; - __TBB_ASSERT(k>=1,"reference count underflowed before add_ref"); - return k; - } - //! Subtract one and return new value. - int remove_ref() { - int k = --my_ref_count; - __TBB_ASSERT(k>=0,"reference count underflow"); - return k; - } -}; - -#if RML_USE_WCRM - -#if USE_UMS_THREAD -#define RML_THREAD_KIND UmsThreadDefault -#define RML_THREAD_KIND_STRING "UmsThread" -#else -#define RML_THREAD_KIND ThreadScheduler -#define RML_THREAD_KIND_STRING "WinThread" -#endif - -// Forward declaration -class thread_map; - -static const IExecutionResource* c_remove_prepare = (IExecutionResource*)0; -static const IExecutionResource* c_remove_returned = (IExecutionResource*)1; - -//! Server thread representation -class server_thread_rep : no_copy { - friend class thread_map; - friend class omp_connection_v2; - friend class server_thread; - friend class tbb_server_thread; - friend class omp_server_thread; - template<typename Connection> friend void make_job( Connection& c, typename Connection::server_thread_type& t ); - typedef int thread_state_rep_t; -public: - //! Ctor - server_thread_rep( bool assigned, IScheduler* s, IExecutionResource* r, thread_map& map, rml::client& cl ) : - uid( GetExecutionContextId() ), my_scheduler(s), my_proxy(NULL), - my_thread_map(map), my_client(cl), my_job(NULL) - { - my_state = assigned ? ts_busy : ts_idle; - my_extra_state = ts_none; - terminate = false; - my_execution_resource = r; - } - //! Dtor - ~server_thread_rep() {} - - //! Synchronization routine - inline rml::job* wait_for_job() { - if( !my_job ) my_job = &my_job_automaton.wait_for_job(); - return my_job; - } - - // Getters and setters - inline thread_state_t read_state() const { thread_state_rep_t s = my_state; return static_cast<thread_state_t>(s); } - inline void set_state( thread_state_t to ) {my_state = to;} - inline void set_removed() { __TBB_ASSERT( my_extra_state==ts_none, NULL ); my_extra_state = ts_removed; } - inline bool is_removed() const { return my_extra_state==ts_removed; } - inline bool is_lent() const {return my_extra_state==ts_lent;} - inline void set_lent() { my_extra_state=ts_lent; } - inline void set_returned() { my_extra_state=ts_none; } - inline IExecutionResource* get_execution_resource() { return my_execution_resource; } - inline IVirtualProcessorRoot* get_virtual_processor() { return (IVirtualProcessorRoot*)get_execution_resource(); } - - //! Enlist the thread for work - inline bool wakeup( thread_state_t to, thread_state_t from ) { - __TBB_ASSERT( from==ts_asleep && (to==ts_idle||to==ts_busy||to==ts_done), NULL ); - return my_state.compare_and_swap( to, from )==from; - } - - //! Enlist the thread for. - thread_grab_t try_grab_for(); - - //! Destroy the client job associated with the thread - template<typename Connection> bool destroy_job( Connection* c ); - - //! Try to re-use the thread - void revive( IScheduler* s, IExecutionResource* r, rml::client& c ) { - // the variables may not have been set before a thread was told to quit - __TBB_ASSERT( my_scheduler==s, "my_scheduler has been altered?\n" ); - my_scheduler = s; - __TBB_ASSERT( &my_client==&c, "my_client has been altered?\n" ); - if( r ) my_execution_resource = r; - my_client = c; - my_state = ts_idle; - __TBB_ASSERT( my_extra_state==ts_removed, NULL ); - my_extra_state = ts_none; - } - -protected: - const int uid; - IScheduler* my_scheduler; - IThreadProxy* my_proxy; - tbb::atomic<IExecutionResource*> my_execution_resource; /* for non-masters, it is IVirtualProcessorRoot */ - thread_map& my_thread_map; - rml::client& my_client; - job* my_job; - job_automaton my_job_automaton; - tbb::atomic<bool> terminate; - tbb::atomic<thread_state_rep_t> my_state; - tbb::atomic<thread_extra_state_t> my_extra_state; -}; - -//! Class that implements IExecutionContext -class server_thread : public IExecutionContext, public server_thread_rep { - friend class tbb_connection_v2; - friend class omp_connection_v2; - friend class tbb_server_thread; - friend class omp_server_thread; - friend class thread_map; - template<typename Connection> friend void make_job( Connection& c, typename Connection::server_thread_type& t ); -protected: - server_thread( bool is_tbb, bool assigned, IScheduler* s, IExecutionResource* r, thread_map& map, rml::client& cl ) : server_thread_rep(assigned,s,r,map,cl), tbb_thread(is_tbb) {} - ~server_thread() {} - /*override*/ unsigned int GetId() const { return uid; } - /*override*/ IScheduler* GetScheduler() { return my_scheduler; } - /*override*/ IThreadProxy* GetProxy() { return my_proxy; } - /*override*/ void SetProxy( IThreadProxy* thr_proxy ) { my_proxy = thr_proxy; } - -private: - bool tbb_thread; -}; - -// Forward declaration -class tbb_connection_v2; -class omp_connection_v2; - -//! TBB server thread -class tbb_server_thread : public server_thread { - friend class tbb_connection_v2; -public: - tbb_server_thread( bool assigned, IScheduler* s, IExecutionResource* r, tbb_connection_v2* con, thread_map& map, rml::client& cl ) : server_thread(true,assigned,s,r,map,cl), my_conn(con) { - activation_count = 0; - } - ~tbb_server_thread() {} - /*override*/ void Dispatch( DispatchState* ); - inline bool initiate_termination(); - bool sleep_perhaps(); - //! Switch out this thread - bool switch_out(); -private: - tbb_connection_v2* my_conn; -public: - tbb::atomic<int> activation_count; -}; - -//! OMP server thread -class omp_server_thread : public server_thread { - friend class omp_connection_v2; -public: - omp_server_thread( bool assigned, IScheduler* s, IExecutionResource* r, omp_connection_v2* con, thread_map& map, rml::client& cl ) : - server_thread(false,assigned,s,r,map,cl), my_conn(con), my_cookie(NULL), my_index(UINT_MAX) {} - ~omp_server_thread() {} - /*override*/ void Dispatch( DispatchState* ); - inline void* get_cookie() {return my_cookie;} - inline ::__kmp::rml::omp_client::size_type get_index() {return my_index;} - - inline IExecutionResource* get_execution_resource() { return get_execution_resource(); } - inline bool initiate_termination() { return destroy_job( (omp_connection_v2*) my_conn ); } - void sleep_perhaps(); -private: - omp_connection_v2* my_conn; - void* my_cookie; - ::__kmp::rml::omp_client::size_type my_index; - omp_dispatch_type omp_data; -}; - -//! Class that implements IScheduler -template<typename Connection> -class scheduler : no_copy, public IScheduler { -public: - /*override*/ unsigned int GetId() const {return uid;} - /*override*/ void Statistics( unsigned int* /*pTaskCompletionRate*/, unsigned int* /*pTaskArrivalRate*/, unsigned int* /*pNumberOfTaskEnqueued*/) {} - /*override*/ SchedulerPolicy GetPolicy() const { __TBB_ASSERT(my_policy,NULL); return *my_policy; } - /*override*/ void AddVirtualProcessors( IVirtualProcessorRoot** vproots, unsigned int count ) { if( !my_conn.is_closing() ) my_conn.add_virtual_processors( vproots, count); } - /*override*/ void RemoveVirtualProcessors( IVirtualProcessorRoot** vproots, unsigned int count ); - /*override*/ void NotifyResourcesExternallyIdle( IVirtualProcessorRoot** vproots, unsigned int count ) { __TBB_ASSERT( false, "This call is not allowed for TBB" ); } - /*override*/ void NotifyResourcesExternallyBusy( IVirtualProcessorRoot** vproots, unsigned int count ) { __TBB_ASSERT( false, "This call is not allowed for TBB" ); } -protected: - scheduler( Connection& conn ); - virtual ~scheduler() { __TBB_ASSERT( my_policy, NULL ); delete my_policy; } - -public: - static scheduler* create( Connection& conn ) {return new scheduler( conn );} - -private: - const int uid; - Connection& my_conn; - SchedulerPolicy* my_policy; -}; - - -/* - * --> ts_busy --> ts_done - */ -class thread_scavenger_thread : public IExecutionContext, no_copy { -public: - thread_scavenger_thread( IScheduler* s, IVirtualProcessorRoot* r, thread_map& map ) : - uid( GetExecutionContextId() ), my_scheduler(s), my_virtual_processor_root(r), my_proxy(NULL), my_thread_map(map) - { - my_state = ts_busy; -#if TBB_USE_ASSERT - activation_count = 0; -#endif - } - ~thread_scavenger_thread() {} - /*override*/ unsigned int GetId() const { return uid; } - /*override*/ IScheduler* GetScheduler() { return my_scheduler; } - /*override*/ IThreadProxy* GetProxy() { return my_proxy; } - /*override*/ void SetProxy( IThreadProxy* thr_proxy ) { my_proxy = thr_proxy; } - /*override*/ void Dispatch( DispatchState* ); - inline thread_state_t read_state() { return my_state; } - inline void set_state( thread_state_t s ) { my_state = s; } - inline IVirtualProcessorRoot* get_virtual_processor() { return my_virtual_processor_root; } -private: - const int uid; - IScheduler* my_scheduler; - IVirtualProcessorRoot* my_virtual_processor_root; - IThreadProxy* my_proxy; - thread_map& my_thread_map; - tbb::atomic<thread_state_t> my_state; -#if TBB_USE_ASSERT -public: - tbb::atomic<int> activation_count; -#endif -}; - -static const thread_scavenger_thread* c_claimed = reinterpret_cast<thread_scavenger_thread*>(1); - -struct garbage_connection_queue { - tbb::atomic<uintptr_t> head; - tbb::atomic<uintptr_t> tail; - static const uintptr_t empty = 0; // connection scavenger thread empty list - static const uintptr_t plugged = 1; // end of use of the list - static const uintptr_t plugged_acked = 2; // connection scavenger saw the plugged flag, and it freed all connections -}; - -//! Connection scavenger -/** It collects closed connection objects, wait for worker threads belonging to the connection to return to ConcRT RM - * then return the object to the memory manager. - */ -class connection_scavenger_thread { - friend void assist_cleanup_connections(); - /* - * connection_scavenger_thread's state - * ts_busy <----> ts_asleep <-- - */ - tbb::atomic<thread_state_t> state; - - /* We steal two bits from a connection pointer to encode - * whether the connection is for TBB or for OMP. - * - * ---------------------------------- - * | | | | - * ---------------------------------- - * ^ ^ - * / | - * 1 : tbb, 0 : omp | - * if set, terminate - */ - // FIXME: pad these? - thread_monitor monitor; - int default_concurrency; - HANDLE thr_handle; -#if TBB_USE_ASSERT - tbb::atomic<int> n_scavenger_threads; -#endif - -public: - connection_scavenger_thread() : thr_handle(NULL) { - state = ts_asleep; -#if TBB_USE_ASSERT - n_scavenger_threads = 0; -#endif - } - - ~connection_scavenger_thread() {} - - void wakeup() { - if( state.compare_and_swap( ts_busy, ts_asleep )==ts_asleep ) - monitor.notify(); - } - - void sleep_perhaps(); - - void process_requests( uintptr_t conn_ex ); - - static __RML_DECL_THREAD_ROUTINE thread_routine( void* arg ); - - void launch( int dc ) { - default_concurrency = dc; - thread_monitor::launch( connection_scavenger_thread::thread_routine, this, NULL ); - } - - template<typename Server, typename Client> - void add_request( generic_connection<Server,Client>* conn_to_close ); - - template<typename Server, typename Client> - uintptr_t grab_and_prepend( generic_connection<Server,Client>* last_conn_to_close ); -}; - -void free_all_connections( uintptr_t ); - -#endif /* RML_USE_WCRM */ - -#if !RML_USE_WCRM -class server_thread; - -//! thread_map_base; we need to make the iterator type available to server_thread -struct thread_map_base { - //! A value in the map - class value_type { - public: - server_thread& thread() { - __TBB_ASSERT( my_thread, "thread_map::value_type::thread() called when !my_thread" ); - return *my_thread; - } - rml::job& job() { - __TBB_ASSERT( my_job, "thread_map::value_type::job() called when !my_job" ); - return *my_job; - } - value_type() : my_thread(NULL), my_job(NULL) {} - server_thread& wait_for_thread() const { - for(;;) { - server_thread* ptr=const_cast<server_thread*volatile&>(my_thread); - if( ptr ) - return *ptr; - __TBB_Yield(); - } - } - /** Shortly after when a connection is established, it is possible for the server - to grab a server_thread that has not yet created a job object for that server. */ - rml::job& wait_for_job() const { - if( !my_job ) { - my_job = &my_automaton.wait_for_job(); - } - return *my_job; - } - private: - server_thread* my_thread; - /** Marked mutable because though it is physically modified, conceptually it is a duplicate of - the job held by job_automaton. */ - mutable rml::job* my_job; - job_automaton my_automaton; - // FIXME - pad out to cache line, because my_automaton is hit hard by thread() - friend class thread_map; - }; - typedef tbb::concurrent_vector<value_type,tbb::zero_allocator<value_type,tbb::cache_aligned_allocator> > array_type; -}; -#endif /* !RML_USE_WCRM */ - -#if _MSC_VER && !defined(__INTEL_COMPILER) - // Suppress overzealous compiler warnings about uninstantiatble class - #pragma warning(push) - #pragma warning(disable:4510 4610) -#endif - -template<typename T> -class padded: public T { - char pad[cache_line_size - sizeof(T)%cache_line_size]; -}; - -#if _MSC_VER && !defined(__INTEL_COMPILER) - #pragma warning(pop) -#endif - -// FIXME - should we pad out memory to avoid false sharing of our global variables? -static tbb::atomic<int> the_balance; -static tbb::atomic<int> the_balance_inited; - -#if !RML_USE_WCRM -//! Per thread information -/** ref_count holds number of clients that are using this, - plus 1 if a host thread owns this instance. */ -class server_thread: public ref_count { - friend class thread_map; - template<typename Server, typename Client> friend class generic_connection; - friend class tbb_connection_v2; - friend class omp_connection_v2; - //! Integral type that can hold a thread_state_t - typedef int thread_state_rep_t; - tbb::atomic<thread_state_rep_t> state; -public: - thread_monitor monitor; -private: - bool is_omp_thread; - tbb::atomic<thread_state_rep_t> my_extra_state; - server_thread* link; - thread_map_base::array_type::iterator my_map_pos; - rml::server *my_conn; - rml::job* my_job; - job_automaton* my_ja; - size_t my_index; - tbb::atomic<bool> terminate; - omp_dispatch_type omp_dispatch; - -#if TBB_USE_ASSERT - //! Flag used to check if thread is still using *this. - bool has_active_thread; -#endif /* TBB_USE_ASSERT */ - - //! Volunteer to sleep. - void sleep_perhaps( thread_state_t asleep ); - - //! Destroy job corresponding to given client - /** Return true if thread must quit. */ - template<typename Connection> - bool destroy_job( Connection& c ); - - //! Do terminate the thread - /** Return true if thread must quit. */ - bool do_termination(); - - void loop(); - static __RML_DECL_THREAD_ROUTINE thread_routine( void* arg ); - -public: - server_thread(); - - ~server_thread(); - - //! Read the thread state - thread_state_t read_state() const { - thread_state_rep_t s = state; - __TBB_ASSERT( unsigned(s)<=unsigned(ts_done), "corrupted server thread?" ); - return thread_state_t(s); - } - - //! Read the tbb-specific extra thread state - thread_state_t read_extra_state() const { - thread_state_rep_t s = my_extra_state; - return thread_state_t(s); - } - - //! Launch a thread that is bound to *this. - void launch( size_t stack_size ); - - //! Attempt to wakeup a thread - /** The value "to" is the new state for the thread, if it was woken up. - Returns true if thread was woken up, false otherwise. */ - bool wakeup( thread_state_t to, thread_state_t from ); - - //! Attempt to enslave a thread for OpenMP/TBB. - /** Returns true if state is successfully changed. 's' takes either ts_omp_busy or ts_tbb_busy */ - bool try_grab_for( thread_state_t s ); - -#if _WIN32||_WIN64 - //! Send the worker thread to sleep temporarily - void deactivate(); - - //! Wake the worker thread up - void reactivate(); -#endif /* _WIN32||_WIN64 */ -}; - -//! Bag of threads that are private to a client. -class private_thread_bag { - struct list_thread: server_thread { - list_thread* next; - }; - //! Root of atomic linked list of list_thread - /** ABA problem is avoided because items are only atomically pushed, never popped. */ - tbb::atomic<list_thread*> my_root; - tbb::cache_aligned_allocator<padded<list_thread> > my_allocator; -public: - //! Construct empty bag - private_thread_bag() {my_root=NULL;} - - //! Create a fresh server_thread object. - server_thread& add_one_thread() { - list_thread* t = my_allocator.allocate(1); - new( t ) list_thread; - // Atomically add to list - list_thread* old_root; - do { - old_root = my_root; - t->next = old_root; - } while( my_root.compare_and_swap( t, old_root )!=old_root ); - return *t; - } - - //! Destroy the bag and threads in it. - ~private_thread_bag() { - while( my_root ) { - // Unlink thread from list. - list_thread* t = my_root; - my_root = t->next; - // Destroy and deallocate the thread. - t->~list_thread(); - my_allocator.deallocate(static_cast<padded<list_thread>*>(t),1); - } - } -}; - -//! Forward declaration -void wakeup_some_tbb_threads(); - -//! Type-independent part of class generic_connection. -/** One to one map from server threads to jobs, and associated reference counting. */ -class thread_map : public thread_map_base { -public: - typedef rml::client::size_type size_type; - //! ctor - thread_map( wait_counter& fc, ::rml::client& client ) : - all_visited_at_least_once(false), my_min_stack_size(0), my_server_ref_count(1), - my_client_ref_count(1), my_client(client), my_factory_counter(fc) - { my_unrealized_threads = 0; } - //! dtor - ~thread_map() {} - typedef array_type::iterator iterator; - iterator begin() {return my_array.begin();} - iterator end() {return my_array.end();} - void bind(); - void unbind(); - void assist_cleanup( bool assist_null_only ); - - /** Returns number of unrealized threads to create. */ - size_type wakeup_tbb_threads( size_type n ); - bool wakeup_next_thread( iterator i, tbb_connection_v2& conn ); - void release_tbb_threads( server_thread* t ); - void adjust_balance( int delta ); - - //! Add a server_thread object to the map, but do not bind it. - /** Return NULL if out of unrealized threads. */ - value_type* add_one_thread( bool is_omp_thread_ ); - - void bind_one_thread( rml::server& server, value_type& x ); - - void remove_client_ref(); - int add_server_ref() {return my_server_ref_count.add_ref();} - int remove_server_ref() {return my_server_ref_count.remove_ref();} - - ::rml::client& client() const {return my_client;} - - size_type get_unrealized_threads() { return my_unrealized_threads; } - -private: - private_thread_bag my_private_threads; - bool all_visited_at_least_once; - array_type my_array; - size_t my_min_stack_size; - tbb::atomic<size_type> my_unrealized_threads; - - //! Number of threads referencing *this, plus one extra. - /** When it becomes zero, the containing server object can be safely deleted. */ - ref_count my_server_ref_count; - - //! Number of jobs that need cleanup, plus one extra. - /** When it becomes zero, acknowledge_close_connection is called. */ - ref_count my_client_ref_count; - - ::rml::client& my_client; - //! Counter owned by factory that produced this thread_map. - wait_counter& my_factory_counter; -}; - -void thread_map::bind_one_thread( rml::server& server, value_type& x ) { - // Add one to account for the thread referencing this map hereforth. - server_thread& t = x.thread(); - my_server_ref_count.add_ref(); - my_client_ref_count.add_ref(); -#if TBB_USE_ASSERT - __TBB_ASSERT( t.add_ref()==1, NULL ); -#else - t.add_ref(); -#endif - // Have responsibility to start the thread. - t.my_conn = &server; - t.my_ja = &x.my_automaton; - t.launch( my_min_stack_size ); - /* Must wake thread up so it can fill in its "my_job" field in *this. - Otherwise deadlock can occur where wait_for_job spins on thread that is sleeping. */ - __TBB_ASSERT( t.state!=ts_tbb_busy, NULL ); - t.wakeup( ts_idle, ts_asleep ); -} - -thread_map::value_type* thread_map::add_one_thread( bool is_omp_thread_ ) { - size_type u; - do { - u = my_unrealized_threads; - if( !u ) return NULL; - } while( my_unrealized_threads.compare_and_swap(u-1,u)!=u ); - server_thread& t = my_private_threads.add_one_thread(); - t.is_omp_thread = is_omp_thread_; - __TBB_ASSERT( u>=1, NULL ); - t.my_index = u - 1; - __TBB_ASSERT( t.state!=ts_tbb_busy, NULL ); - t.my_extra_state = t.is_omp_thread ? ts_none : ts_created; - - iterator i = t.my_map_pos = my_array.grow_by(1); - value_type& v = *i; - v.my_thread = &t; - return &v; -} - -void thread_map::bind() { - ++my_factory_counter; - my_min_stack_size = my_client.min_stack_size(); - __TBB_ASSERT( my_unrealized_threads==0, "already called bind?" ); - my_unrealized_threads = my_client.max_job_count(); -} - -void thread_map::unbind() { - // Ask each server_thread to cleanup its job for this server. - for( iterator i=begin(); i!=end(); ++i ) { - server_thread& t = i->thread(); - t.terminate = true; - t.wakeup( ts_idle, ts_asleep ); - } - // Remove extra ref to client. - remove_client_ref(); -} - -void thread_map::assist_cleanup( bool assist_null_only ) { - // To avoid deadlock, the current thread *must* help out with cleanups that have not started, - // becausd the thread that created the job may be busy for a long time. - for( iterator i = begin(); i!=end(); ++i ) { - rml::job* j=0; - job_automaton& ja = i->my_automaton; - if( assist_null_only ? ja.try_plug_null() : ja.try_plug(j) ) { - if( j ) { - my_client.cleanup(*j); - } else { - // server thread did not get a chance to create a job. - } - remove_client_ref(); - } - } -} - -thread_map::size_type thread_map::wakeup_tbb_threads( size_type n ) { - __TBB_ASSERT(n>0,"must specify positive number of threads to wake up"); - iterator e = end(); - for( iterator k=begin(); k!=e; ++k ) { - // If another thread added *k, there is a tiny timing window where thread() is invalid. - server_thread& t = k->wait_for_thread(); - thread_state_t thr_s = t.read_state(); - if( t.read_extra_state()==ts_created || thr_s==ts_tbb_busy || thr_s==ts_done ) - continue; - if( --the_balance>=0 ) { // try to withdraw a coin from the deposit - while( !t.try_grab_for( ts_tbb_busy ) ) { - thr_s = t.read_state(); - if( thr_s==ts_tbb_busy || thr_s==ts_done ) { - // we lost; move on to the next. - ++the_balance; - goto skip; - } - } - if( --n==0 ) - return 0; - } else { - // overdraft. - ++the_balance; - break; - } -skip: - ; - } - return n<my_unrealized_threads ? n : my_unrealized_threads; -} -#else /* RML_USE_WCRM */ - -class thread_map : no_copy { - friend class omp_connection_v2; - typedef ::std::hash_map<uintptr_t,server_thread*> hash_map_type; - size_t my_min_stack_size; - size_t my_unrealized_threads; - ::rml::client& my_client; - //! Counter owned by factory that produced this thread_map. - wait_counter& my_factory_counter; - //! Ref counters - ref_count my_server_ref_count; - ref_count my_client_ref_count; - // FIXME: pad this? - hash_map_type my_map; - bool shutdown_in_progress; - std::vector<IExecutionResource*> original_exec_resources; - tbb::cache_aligned_allocator<padded<tbb_server_thread> > my_tbb_allocator; - tbb::cache_aligned_allocator<padded<omp_server_thread> > my_omp_allocator; - tbb::cache_aligned_allocator<padded<thread_scavenger_thread> > my_scavenger_allocator; - IResourceManager* my_concrt_resource_manager; - IScheduler* my_scheduler; - ISchedulerProxy* my_scheduler_proxy; - tbb::atomic<thread_scavenger_thread*> my_thread_scavenger_thread; -#if TBB_USE_ASSERT - tbb::atomic<int> n_add_vp_requests; - tbb::atomic<int> n_thread_scavengers_created; -#endif -public: - thread_map( wait_counter& fc, ::rml::client& client ) : - my_min_stack_size(0), my_client(client), my_factory_counter(fc), - my_server_ref_count(1), my_client_ref_count(1), shutdown_in_progress(false), - my_concrt_resource_manager(NULL), my_scheduler(NULL), my_scheduler_proxy(NULL) - { - my_thread_scavenger_thread = NULL; -#if TBB_USE_ASSERT - n_add_vp_requests = 0; - n_thread_scavengers_created; -#endif - } - - ~thread_map() { - __TBB_ASSERT( n_thread_scavengers_created<=1, "too many scavenger thread created" ); - // if thread_scavenger_thread is launched, wait for it to complete - if( my_thread_scavenger_thread ) { - __TBB_ASSERT( my_thread_scavenger_thread!=c_claimed, NULL ); - while( my_thread_scavenger_thread->read_state()==ts_busy ) - __TBB_Yield(); - thread_scavenger_thread* tst = my_thread_scavenger_thread; - my_scavenger_allocator.deallocate(static_cast<padded<thread_scavenger_thread>*>(tst),1); - } - // deallocate thread contexts - for( hash_map_type::const_iterator hi=my_map.begin(); hi!=my_map.end(); ++hi ) { - server_thread* thr = hi->second; - if( thr->tbb_thread ) { - while( ((tbb_server_thread*)thr)->activation_count>1 ) - __TBB_Yield(); - ((tbb_server_thread*)thr)->~tbb_server_thread(); - my_tbb_allocator.deallocate(static_cast<padded<tbb_server_thread>*>(thr),1); - } else { - ((omp_server_thread*)thr)->~omp_server_thread(); - my_omp_allocator.deallocate(static_cast<padded<omp_server_thread>*>(thr),1); - } - } - if( my_scheduler_proxy ) { - my_scheduler_proxy->Shutdown(); - my_concrt_resource_manager->Release(); - __TBB_ASSERT( my_scheduler, NULL ); - delete my_scheduler; - } else { - __TBB_ASSERT( !my_scheduler, NULL ); - } - } - typedef hash_map_type::key_type key_type; - typedef hash_map_type::value_type value_type; - typedef hash_map_type::iterator iterator; - iterator begin() {return my_map.begin();} - iterator end() {return my_map.end();} - iterator find( key_type k ) {return my_map.find( k );} - iterator insert( key_type k, server_thread* v ) { - std::pair<iterator,bool> res = my_map.insert( value_type(k,v) ); - return res.first; - } - void bind( IScheduler* s ) { - ++my_factory_counter; - if( s ) { - my_unrealized_threads = s->GetPolicy().GetPolicyValue( MaxConcurrency ); - __TBB_ASSERT( my_unrealized_threads>0, NULL ); - my_scheduler = s; - my_concrt_resource_manager = CreateResourceManager(); // reference count==3 when first created. - my_scheduler_proxy = my_concrt_resource_manager->RegisterScheduler( s, CONCRT_RM_VERSION_1 ); - my_scheduler_proxy->RequestInitialVirtualProcessors( false ); - } - } - bool is_closing() { return shutdown_in_progress; } - void unbind( rml::server& server, ::tbb::spin_mutex& mtx ); - void add_client_ref() { my_server_ref_count.add_ref(); } - void remove_client_ref(); - void add_server_ref() {my_server_ref_count.add_ref();} - int remove_server_ref() {return my_server_ref_count.remove_ref();} - int get_server_ref_count() { int k = my_server_ref_count.my_ref_count; return k; } - void assist_cleanup( bool assist_null_only ); - void adjust_balance( int delta ); - int current_balance() const {int k = the_balance; return k;} - ::rml::client& client() const {return my_client;} - void register_as_master( server::execution_resource_t& v ) const { (IExecutionResource*&)v = my_scheduler_proxy ? my_scheduler_proxy->SubscribeCurrentThread() : NULL; } - // Rremove() should be called from the same thread that subscribed the current h/w thread (i.e., the one that - // called register_as_master() ). - void unregister( server::execution_resource_t v ) const {if( v ) ((IExecutionResource*)v)->Remove( my_scheduler );} - void add_virtual_processors( IVirtualProcessorRoot** vprocs, unsigned int count, tbb_connection_v2& conn, ::tbb::spin_mutex& mtx ); - void add_virtual_processors( IVirtualProcessorRoot** vprocs, unsigned int count, omp_connection_v2& conn, ::tbb::spin_mutex& mtx ); - void remove_virtual_processors( IVirtualProcessorRoot** vproots, unsigned count, ::tbb::spin_mutex& mtx ); - void mark_virtual_processors_as_lent( IVirtualProcessorRoot** vproots, unsigned count, ::tbb::spin_mutex& mtx ); - void create_oversubscribers( unsigned n, std::vector<server_thread*>& thr_vec, omp_connection_v2& conn, ::tbb::spin_mutex& mtx ); - void wakeup_tbb_threads( int c, ::tbb::spin_mutex& mtx ); - void mark_virtual_processors_as_returned( IVirtualProcessorRoot** vprocs, unsigned int count, tbb::spin_mutex& mtx ); - inline void addto_original_exec_resources( IExecutionResource* r, ::tbb::spin_mutex& mtx ) { - ::tbb::spin_mutex::scoped_lock lck(mtx); - __TBB_ASSERT( !is_closing(), "try to regster master while connection is being shutdown?" ); - original_exec_resources.push_back( r ); - } -#if !__RML_REMOVE_VIRTUAL_PROCESSORS_DISABLED - void allocate_thread_scavenger( IExecutionResource* v ); -#endif - inline thread_scavenger_thread* get_thread_scavenger() { return my_thread_scavenger_thread; } -}; - -garbage_connection_queue connections_to_reclaim; -connection_scavenger_thread connection_scavenger; - -#endif /* !RML_USE_WCRM */ - -//------------------------------------------------------------------------ -// generic_connection -//------------------------------------------------------------------------ - -template<typename Server, typename Client> -struct connection_traits {}; - -// head of the active tbb connections -static tbb::atomic<uintptr_t> active_tbb_connections; -static tbb::atomic<int> current_tbb_conn_readers; -static size_t current_tbb_conn_reader_epoch; -static tbb::atomic<size_t> close_tbb_connection_event_count; - -#if RML_USE_WCRM -template<typename Connection> -void make_job( Connection& c, server_thread& t ); -#endif - -template<typename Server, typename Client> -class generic_connection: public Server, no_copy { - /*override*/ version_type version() const {return SERVER_VERSION;} - /*override*/ void yield() {thread_monitor::yield();} - /*override*/ void independent_thread_number_changed( int delta ) { my_thread_map.adjust_balance( -delta ); } - /*override*/ unsigned default_concurrency() const {return hardware_concurrency()-1;} - friend void wakeup_some_tbb_threads(); - friend class connection_scavenger_thread; - -protected: - thread_map my_thread_map; - generic_connection* next_conn; - size_t my_ec; -#if RML_USE_WCRM - // FIXME: pad it? - tbb::spin_mutex map_mtx; - IScheduler* my_scheduler; - void do_open( IScheduler* s ) { - my_scheduler = s; - my_thread_map.bind( s ); - } - bool is_closing() { return my_thread_map.is_closing(); } - void request_close_connection( bool existing ); -#else - void do_open() {my_thread_map.bind();} - void request_close_connection( bool ); -#endif /* RML_USE_WCRM */ - //! Make destructor virtual - virtual ~generic_connection() {} -#if !RML_USE_WCRM - generic_connection( wait_counter& fc, Client& c ) : my_thread_map(fc,c), next_conn(NULL), my_ec(0) {} -#else - generic_connection( wait_counter& fc, Client& c ) : - my_thread_map(fc,c), next_conn(NULL), my_ec(0), map_mtx(), my_scheduler(NULL) {} - void add_virtual_processors( IVirtualProcessorRoot** vprocs, unsigned int count ); - void remove_virtual_processors( IVirtualProcessorRoot** vprocs, unsigned int count ); - void notify_resources_externally_busy( IVirtualProcessorRoot** vprocs, unsigned int count ) { my_thread_map.mark_virtual_processors_as_lent( vprocs, count, map_mtx ); } - void notify_resources_externally_idle( IVirtualProcessorRoot** vprocs, unsigned int count ) { - my_thread_map.mark_virtual_processors_as_returned( vprocs, count, map_mtx ); - } -#endif /* !RML_USE_WCRM */ - -public: - typedef Server server_type; - typedef Client client_type; - Client& client() const {return static_cast<Client&>(my_thread_map.client());} - void set_scratch_ptr( job& j, void* ptr ) { ::rml::server::scratch_ptr(j) = ptr; } -#if RML_USE_WCRM - template<typename Connection> - friend void make_job( Connection& c, server_thread& t ); - void add_server_ref () {my_thread_map.add_server_ref();} - void remove_server_ref() {if( my_thread_map.remove_server_ref()==0 ) delete this;} - void add_client_ref () {my_thread_map.add_client_ref();} - void remove_client_ref() {my_thread_map.remove_client_ref();} -#else /* !RML_USE_WCRM */ - int add_server_ref () {return my_thread_map.add_server_ref();} - void remove_server_ref() {if( my_thread_map.remove_server_ref()==0 ) delete this;} - void remove_client_ref() {my_thread_map.remove_client_ref();} - void make_job( server_thread& t, job_automaton& ja ); -#endif /* RML_USE_WCRM */ - static generic_connection* get_addr( uintptr_t addr_ex ) { - return reinterpret_cast<generic_connection*>( addr_ex&~(uintptr_t)3 ); - } -}; - -//------------------------------------------------------------------------ -// TBB server -//------------------------------------------------------------------------ - -template<> -struct connection_traits<tbb_server,tbb_client> { - static const bool assist_null_only = true; - static const bool is_tbb = true; -}; - -//! Represents a server/client binding. -/** The internal representation uses inheritance for the server part and a pointer for the client part. */ -class tbb_connection_v2: public generic_connection<tbb_server,tbb_client> { - /*override*/ void adjust_job_count_estimate( int delta ); -#if !RML_USE_WCRM -#if _WIN32||_WIN64 - /*override*/ void register_master ( rml::server::execution_resource_t& /*v*/ ) {} - /*override*/ void unregister_master ( rml::server::execution_resource_t /*v*/ ) {} -#endif -#else - /*override*/ void register_master ( rml::server::execution_resource_t& v ) { - my_thread_map.register_as_master(v); - if( v ) ++nesting; - } - /*override*/ void unregister_master ( rml::server::execution_resource_t v ) { - if( v ) { - __TBB_ASSERT( nesting>0, NULL ); - if( --nesting==0 ) { -#if !__RML_REMOVE_VIRTUAL_PROCESSORS_DISABLED - my_thread_map.allocate_thread_scavenger( (IExecutionResource*)v ); -#endif - } - } - my_thread_map.unregister(v); - } - IScheduler* create_scheduler() {return( scheduler<tbb_connection_v2>::create( *this ) );} - friend void free_all_connections( uintptr_t ); - friend class scheduler<tbb_connection_v2>; - friend class execution_context; - friend class connection_scavenger_thread; -#endif /* RML_USE_WCRM */ - friend void wakeup_some_tbb_threads(); - //! Estimate on number of jobs without threads working on them. - tbb::atomic<int> my_slack; - friend class dummy_class_to_shut_up_gratuitous_warning_from_gcc_3_2_3; -#if TBB_USE_ASSERT - tbb::atomic<int> my_job_count_estimate; -#endif /* TBB_USE_ASSERT */ - - tbb::atomic<int> n_adjust_job_count_requests; -#if RML_USE_WCRM - tbb::atomic<int> nesting; -#endif - - // dtor - ~tbb_connection_v2(); - -public: -#if RML_USE_WCRM - typedef tbb_server_thread server_thread_type; -#endif - //! True if there is slack that try_process can use. - bool has_slack() const {return my_slack>0;} - -#if RML_USE_WCRM - bool try_process( job& job ) -#else - bool try_process( server_thread& t, job& job ) -#endif - { - bool visited = false; - // No check for my_slack>0 here because caller is expected to do that check. - int k = --my_slack; - if( k>=0 ) { -#if !RML_USE_WCRM - t.my_extra_state = ts_visited; // remember the thread paid a trip to process() at least once -#endif - client().process(job); - visited = true; - } - ++my_slack; - return visited; - } - - tbb_connection_v2( wait_counter& fc, tbb_client& client ) : generic_connection<tbb_server,tbb_client>(fc,client) - { - my_slack = 0; -#if RML_USE_WCRM - nesting = 0; -#endif -#if TBB_USE_ASSERT - my_job_count_estimate = 0; -#endif /* TBB_USE_ASSERT */ - __TBB_ASSERT( !my_slack, NULL ); - -#if RML_USE_WCRM - do_open( client.max_job_count()>0 ? create_scheduler() : NULL ); -#else - do_open(); -#endif /* !RML_USE_WCRM */ - n_adjust_job_count_requests = 0; - - // Acquire head of active_tbb_connections & push the connection into the list - uintptr_t conn; - do { - for( ; (conn=active_tbb_connections)&1; ) - __TBB_Yield(); - } while( active_tbb_connections.compare_and_swap( conn|1, conn )!=conn ); - - this->next_conn = generic_connection<tbb_server,tbb_client>::get_addr(conn); - // Update and release head of active_tbb_connections - active_tbb_connections = (uintptr_t) this; // set and release - } - inline void wakeup_tbb_threads( unsigned n ) { - my_thread_map.wakeup_tbb_threads( n -#if RML_USE_WCRM - , map_mtx -#endif - ); - } -#if RML_USE_WCRM - inline int get_nesting_level() { return nesting; } -#else - inline bool wakeup_next_thread( thread_map::iterator i ) {return my_thread_map.wakeup_next_thread( i, *this );} - inline thread_map::size_type get_unrealized_threads () {return my_thread_map.get_unrealized_threads();} -#endif /* !RML_USE_WCRM */ -}; - -//------------------------------------------------------------------------ -// OpenMP server -//------------------------------------------------------------------------ - -template<> -struct connection_traits<omp_server,omp_client> { - static const bool assist_null_only = false; - static const bool is_tbb = false; -}; - -class omp_connection_v2: public generic_connection<omp_server,omp_client> { -#if !RML_USE_WCRM - /*override*/ int current_balance() const {return the_balance;} -#else - friend void free_all_connections( uintptr_t ); - friend class scheduler<omp_connection_v2>; - /*override*/ int current_balance() const {return my_thread_map.current_balance();} -#endif /* !RML_USE_WCRM */ - /*override*/ int try_increase_load( size_type n, bool strict ); - /*override*/ void decrease_load( size_type n ); - /*override*/ void get_threads( size_type request_size, void* cookie, job* array[] ); -#if !RML_USE_WCRM -#if _WIN32||_WIN64 - /*override*/ void register_master ( rml::server::execution_resource_t& /*v*/ ) {} - /*override*/ void unregister_master ( rml::server::execution_resource_t /*v*/ ) {} -#endif -#else - /*override*/ void register_master ( rml::server::execution_resource_t& v ) { - my_thread_map.register_as_master( v ); - my_thread_map.addto_original_exec_resources( (IExecutionResource*)v, map_mtx ); - } - /*override*/ void unregister_master ( rml::server::execution_resource_t v ) { my_thread_map.unregister(v); } -#endif /* !RML_USE_WCRM */ -#if _WIN32||_WIN64 - /*override*/ void deactivate( rml::job* j ); - /*override*/ void reactivate( rml::job* j ); -#endif /* _WIN32||_WIN64 */ -#if RML_USE_WCRM -public: - typedef omp_server_thread server_thread_type; -private: - IScheduler* create_scheduler() {return( scheduler<omp_connection_v2>::create( *this ) );} -#endif /* RML_USE_WCRM */ -public: -#if TBB_USE_ASSERT - //! Net change in delta caused by this connection. - /** Should be zero when connection is broken */ - tbb::atomic<int> net_delta; -#endif /* TBB_USE_ASSERT */ - - omp_connection_v2( wait_counter& fc, omp_client& client ) : generic_connection<omp_server,omp_client>(fc,client) { -#if TBB_USE_ASSERT - net_delta = 0; -#endif /* TBB_USE_ASSERT */ -#if RML_USE_WCRM - do_open( create_scheduler() ); -#else - do_open(); -#endif /* RML_USE_WCRM */ - } - ~omp_connection_v2() {__TBB_ASSERT( net_delta==0, "net increase/decrease of load is nonzero" );} -}; - -#if !RML_USE_WCRM -/* to deal with cases where the machine is oversubscribed; we want each thread to trip to try_process() at least once */ -/* this should not involve computing the_balance */ -bool thread_map::wakeup_next_thread( thread_map::iterator this_thr, tbb_connection_v2& conn ) { - if( all_visited_at_least_once ) - return false; - - iterator e = end(); -retry: - bool exist = false; - iterator k=this_thr; - for( ++k; k!=e; ++k ) { - // If another thread added *k, there is a tiny timing window where thread() is invalid. - server_thread& t = k->wait_for_thread(); - if( t.my_extra_state!=ts_visited ) - exist = true; - if( t.read_state()!=ts_tbb_busy && t.my_extra_state==ts_started ) - if( t.try_grab_for( ts_tbb_busy ) ) - return true; - } - for( k=begin(); k!=this_thr; ++k ) { - server_thread& t = k->wait_for_thread(); - if( t.my_extra_state!=ts_visited ) - exist = true; - if( t.read_state()!=ts_tbb_busy && t.my_extra_state==ts_started ) - if( t.try_grab_for( ts_tbb_busy ) ) - return true; - } - - if( exist ) - if( conn.has_slack() ) - goto retry; - else - all_visited_at_least_once = true; - return false; -} - -void thread_map::release_tbb_threads( server_thread* t ) { - for( ; t; t = t->link ) { - while( t->read_state()!=ts_asleep ) - __TBB_Yield(); - t->my_extra_state = ts_started; - } -} -#endif /* !RML_USE_WCRM */ - -void thread_map::adjust_balance( int delta ) { - int new_balance = the_balance += delta; - if( new_balance>0 && 0>=new_balance-delta /*== old the_balance*/ ) - wakeup_some_tbb_threads(); -} - -void thread_map::remove_client_ref() { - int k = my_client_ref_count.remove_ref(); - if( k==0 ) { - // Notify factory that thread has crossed back into RML. - --my_factory_counter; - // Notify client that RML is done with the client object. - my_client.acknowledge_close_connection(); - } -} - -#if RML_USE_WCRM -/** Not a member of generic_connection because we need Connection to be the derived class. */ -template<typename Connection> -void make_job( Connection& c, typename Connection::server_thread_type& t ) { - if( t.my_job_automaton.try_acquire() ) { - rml::job& j = *t.my_client.create_one_job(); - __TBB_ASSERT( &j!=NULL, "client:::create_one_job returned NULL" ); - __TBB_ASSERT( (intptr_t(&j)&1)==0, "client::create_one_job returned misaligned job" ); - t.my_job_automaton.set_and_release( j ); - c.set_scratch_ptr( j, (void*) &t ); - } -} -#endif /* RML_USE_WCRM */ - -#if _MSC_VER && !defined(__INTEL_COMPILER) -// Suppress "conditional expression is constant" warning. -#pragma warning( push ) -#pragma warning( disable: 4127 ) -#endif -#if RML_USE_WCRM -template<typename Server, typename Client> -void generic_connection<Server,Client>::request_close_connection( bool exiting ) { - // for TBB connections, exiting should always be false - if( connection_traits<Server,Client>::is_tbb ) - __TBB_ASSERT( !exiting, NULL); -#if TBB_USE_ASSERT - else if( exiting ) - reinterpret_cast<omp_connection_v2*>(this)->net_delta = 0; -#endif - if( exiting ) { - uintptr_t tail = connections_to_reclaim.tail; - while( connections_to_reclaim.tail.compare_and_swap( garbage_connection_queue::plugged, tail )!=tail ) - __TBB_Yield(); - my_thread_map.unbind( *this, map_mtx ); - my_thread_map.assist_cleanup( connection_traits<Server,Client>::assist_null_only ); - // It is assumed that the client waits for all other threads to terminate before - // calling request_close_connection with true. Thus, it is safe to return all - // outstanding connection objects that are reachable. It is possible that there may - // be some unreachable connection objects lying somewhere. - free_all_connections( connection_scavenger.grab_and_prepend( this ) ); - return; - } -#else /* !RML_USE_WCRM */ -template<typename Server, typename Client> -void generic_connection<Server,Client>::request_close_connection( bool ) { -#endif /* RML_USE_WCRM */ - if( connection_traits<Server,Client>::is_tbb ) { - // acquire the head of active tbb connections - uintptr_t conn; - do { - for( ; (conn=active_tbb_connections)&1; ) - __TBB_Yield(); - } while( active_tbb_connections.compare_and_swap( conn|1, conn )!=conn ); - - // Locate the current connection - generic_connection* pred_conn = NULL; - generic_connection* curr_conn = (generic_connection*) conn; - for( ; curr_conn && curr_conn!=this; curr_conn=curr_conn->next_conn ) - pred_conn = curr_conn; - __TBB_ASSERT( curr_conn==this, "the current connection is not in the list?" ); - - // Remove this from the list - if( pred_conn ) { - pred_conn->next_conn = curr_conn->next_conn; - active_tbb_connections = reinterpret_cast<uintptr_t>(generic_connection<tbb_server,tbb_client>::get_addr(active_tbb_connections)); // release it - } else - active_tbb_connections = (uintptr_t) curr_conn->next_conn; // update & release it - curr_conn->next_conn = NULL; - // Increment the tbb connection close event count - my_ec = ++close_tbb_connection_event_count; - // Wait happens in tbb_connection_v2::~tbb_connection_v2() - } -#if RML_USE_WCRM - my_thread_map.unbind( *this, map_mtx ); - my_thread_map.assist_cleanup( connection_traits<Server,Client>::assist_null_only ); - connection_scavenger.add_request( this ); -#else - my_thread_map.unbind(); - my_thread_map.assist_cleanup( connection_traits<Server,Client>::assist_null_only ); - // Remove extra reference - remove_server_ref(); -#endif -} -#if _MSC_VER && !defined(__INTEL_COMPILER) -#pragma warning( pop ) -#endif - -#if RML_USE_WCRM - -template<typename Server, typename Client> -void generic_connection<Server,Client>::add_virtual_processors( IVirtualProcessorRoot** vproots, unsigned int count ) -{} - -template<> -void generic_connection<tbb_server,tbb_client>::add_virtual_processors( IVirtualProcessorRoot** vproots, unsigned int count ) -{ - my_thread_map.add_virtual_processors( vproots, count, (tbb_connection_v2&)*this, map_mtx ); -} -template<> -void generic_connection<omp_server,omp_client>::add_virtual_processors( IVirtualProcessorRoot** vproots, unsigned int count ) -{ - // For OMP, since it uses ScheudlerPolicy of MinThreads==MaxThreads, this is called once when - // RequestInitialVirtualProcessors() is called. - my_thread_map.add_virtual_processors( vproots, count, (omp_connection_v2&)*this, map_mtx ); -} - -template<typename Server, typename Client> -void generic_connection<Server,Client>::remove_virtual_processors( IVirtualProcessorRoot** vproots, unsigned int count ) -{ - __TBB_ASSERT( false, "should not be called" ); -} -/* For OMP, RemoveVirtualProcessors() will never be called. */ - -template<> -void generic_connection<tbb_server,tbb_client>::remove_virtual_processors( IVirtualProcessorRoot** vproots, unsigned int count ) -{ - my_thread_map.remove_virtual_processors( vproots, count, map_mtx ); -} - -void tbb_connection_v2::adjust_job_count_estimate( int delta ) { -#if TBB_USE_ASSERT - my_job_count_estimate += delta; -#endif /* TBB_USE_ASSERT */ - // Atomically update slack. - int c = my_slack+=delta; - if( c>0 ) { - ++n_adjust_job_count_requests; - my_thread_map.wakeup_tbb_threads( c, map_mtx ); - --n_adjust_job_count_requests; - } -} -#endif /* RML_USE_WCRM */ - -tbb_connection_v2::~tbb_connection_v2() { -#if TBB_USE_ASSERT - if( my_job_count_estimate!=0 ) { - fprintf(stderr, "TBB client tried to disconnect with non-zero net job count estimate of %d\n", int(my_job_count_estimate )); - abort(); - } - __TBB_ASSERT( !my_slack, "attempt to destroy tbb_server with nonzero slack" ); - __TBB_ASSERT( this!=static_cast<tbb_connection_v2*>(generic_connection<tbb_server,tbb_client >::get_addr(active_tbb_connections)), "request_close_connection() must be called" ); -#endif /* TBB_USE_ASSERT */ -#if !RML_USE_WCRM - // If there are other threads ready for work, give them coins - if( the_balance>0 ) - wakeup_some_tbb_threads(); -#endif - // Someone might be accessing my data members - while( current_tbb_conn_readers>0 && (ptrdiff_t)(my_ec-current_tbb_conn_reader_epoch)>0 ) - __TBB_Yield(); -} - -#if !RML_USE_WCRM -template<typename Server, typename Client> -void generic_connection<Server,Client>::make_job( server_thread& t, job_automaton& ja ) { - if( ja.try_acquire() ) { - rml::job& j = *client().create_one_job(); - __TBB_ASSERT( &j!=NULL, "client:::create_one_job returned NULL" ); - __TBB_ASSERT( (intptr_t(&j)&1)==0, "client::create_one_job returned misaligned job" ); - ja.set_and_release( j ); - __TBB_ASSERT( t.my_conn && t.my_ja && t.my_job==NULL, NULL ); - t.my_job = &j; - set_scratch_ptr( j, (void*) &t ); - } -} - -void tbb_connection_v2::adjust_job_count_estimate( int delta ) { -#if TBB_USE_ASSERT - my_job_count_estimate += delta; -#endif /* TBB_USE_ASSERT */ - // Atomically update slack. - int c = my_slack+=delta; - if( c>0 ) { - ++n_adjust_job_count_requests; - // The client has work to do and there are threads available - thread_map::size_type n = my_thread_map.wakeup_tbb_threads(c); - - server_thread* new_threads_anchor = NULL; - thread_map::size_type i; - for( i=0; i<n; ++i ) { - // Obtain unrealized threads - thread_map::value_type* k = my_thread_map.add_one_thread( false ); - if( !k ) - // No unrealized threads left. - break; - // Eagerly start the thread off. - my_thread_map.bind_one_thread( *this, *k ); - server_thread& t = k->thread(); - __TBB_ASSERT( !t.link, NULL ); - t.link = new_threads_anchor; - new_threads_anchor = &t; - } - - thread_map::size_type j=0; - for( ; the_balance>0 && j<i; ++j ) { - if( --the_balance>=0 ) { - // Withdraw a coin from the bank - __TBB_ASSERT( new_threads_anchor, NULL ); - - server_thread* t = new_threads_anchor; - new_threads_anchor = t->link; - while( !t->try_grab_for( ts_tbb_busy ) ) - __TBB_Yield(); - t->my_extra_state = ts_started; - } else { - // Overdraft. return it to the bank - ++the_balance; - break; - } - } - __TBB_ASSERT( i-j!=0||new_threads_anchor==NULL, NULL ); - // Mark the ones that did not get started as eligible for being snatched. - if( new_threads_anchor ) - my_thread_map.release_tbb_threads( new_threads_anchor ); - - --n_adjust_job_count_requests; - } -} -#endif /* RML_USE_WCRM */ - -#if RML_USE_WCRM -int omp_connection_v2::try_increase_load( size_type n, bool strict ) { - __TBB_ASSERT(int(n)>=0,NULL); - if( strict ) { - the_balance -= int(n); - } else { - int avail, old; - do { - avail = the_balance; - if( avail<=0 ) { - // No atomic read-write-modify operation necessary. - return avail; - } - // Don't read the_system_balance; if it changes, compare_and_swap will fail anyway. - old = the_balance.compare_and_swap( int(n)<avail ? avail-n : 0, avail ); - } while( old!=avail ); - if( int(n)>avail ) - n=avail; - } -#if TBB_USE_ASSERT - net_delta += n; -#endif /* TBB_USE_ASSERT */ - return n; -} - -void omp_connection_v2::decrease_load( size_type /*n*/ ) {} - -void omp_connection_v2::get_threads( size_type request_size, void* cookie, job* array[] ) { - unsigned index = 0; - std::vector<omp_server_thread*> enlisted(request_size); - std::vector<thread_grab_t> to_activate(request_size); - - if( request_size==0 ) return; - - { - tbb::spin_mutex::scoped_lock lock(map_mtx); - - __TBB_ASSERT( !is_closing(), "try to get threads while connection is being shutdown?" ); - - for( int scan=0; scan<2; ++scan ) { - for( thread_map::iterator i=my_thread_map.begin(); i!=my_thread_map.end(); ++i ) { - omp_server_thread* thr = (omp_server_thread*) (*i).second; - // in the first scan, skip VPs that are lent - if( scan==0 && thr->is_lent() ) continue; - thread_grab_t res = thr->try_grab_for(); - if( res!=wk_failed ) {// && if is not busy by some other scheduler - to_activate[index] = res; - enlisted[index] = thr; - if( ++index==request_size ) - goto activate_threads; - } - } - } - } - -activate_threads: - - for( unsigned i=0; i<index; ++i ) { - omp_server_thread* thr = enlisted[i]; - if( to_activate[i]==wk_from_asleep ) - thr->get_virtual_processor()->Activate( thr ); - job* j = thr->wait_for_job(); - array[i] = j; - thr->omp_data.produce( client(), *j, cookie, i PRODUCE_ARG(*this) ); - } - - if( index==request_size ) - return; - - // If we come to this point, it must be becuase dynamic==false - // Create Oversubscribers.. - - // Note that our policy is such that MinConcurrency==MaxConcurrency. - // RM will deliver MaxConcurrency of VirtualProcessors and no more. - __TBB_ASSERT( request_size>index, NULL ); - unsigned n = request_size - index; - std::vector<server_thread*> thr_vec(n); - typedef std::vector<server_thread*>::iterator iterator_thr; - my_thread_map.create_oversubscribers( n, thr_vec, *this, map_mtx ); - for( iterator_thr ti=thr_vec.begin(); ti!=thr_vec.end(); ++ti ) { - omp_server_thread* thr = (omp_server_thread*) *ti; - __TBB_ASSERT( thr, "thread not created?" ); - // Thread is already grabbed; since it is nrewly created, we need to activate it. - thr->get_virtual_processor()->Activate( thr ); - job* j = thr->wait_for_job(); - array[index] = j; - thr->omp_data.produce( client(), *j, cookie, index PRODUCE_ARG(*this) ); - ++index; - } -} - -#if _WIN32||_WIN64 -void omp_connection_v2::deactivate( rml::job* j ) -{ - my_thread_map.adjust_balance(1); -#if TBB_USE_ASSERT - net_delta -= 1; -#endif - omp_server_thread* thr = (omp_server_thread*) scratch_ptr( *j ); - (thr->get_virtual_processor())->Deactivate( thr ); -} - -void omp_connection_v2::reactivate( rml::job* j ) -{ - // Should not adjust the_balance because OMP client is supposed to - // do try_increase_load() to reserve the threads to use. - omp_server_thread* thr = (omp_server_thread*) scratch_ptr( *j ); - (thr->get_virtual_processor())->Activate( thr ); -} -#endif /* !_WIN32||_WIN64 */ - -#endif /* RML_USE_WCRM */ - -//! Wake up some available tbb threads -void wakeup_some_tbb_threads() -{ - /* First, atomically grab the connection, then increase the server ref count to keep - it from being released prematurely. Second, check if the balance is available for TBB - and the tbb conneciton has slack to exploit. If the answer is true, go ahead and - try to wake some up. */ - if( generic_connection<tbb_server,tbb_client >::get_addr(active_tbb_connections)==0 ) - // the next connection will see the change; return. - return; - -start_it_over: - int n_curr_readers = ++current_tbb_conn_readers; - if( n_curr_readers>1 ) // I lost - return; - // if n_curr_readers==1, i am the first one, so I will take responsibility for waking tbb threads up. - - // update the current epoch - current_tbb_conn_reader_epoch = close_tbb_connection_event_count; - - // read and clear - // Newly added connection will not invalidate the pointer, and it will - // compete with the current one to claim coins. - // One that is about to close the connection increments the event count - // after it removes the connection from the list. But it will keep around - // the connection until all readers including this one catch up. So, reading - // the head and clearing the lock bit should be o.k. - generic_connection<tbb_server,tbb_client>* next_conn_wake_up = generic_connection<tbb_server,tbb_client>::get_addr( active_tbb_connections ); - - for( ; next_conn_wake_up; ) { - /* some threads are creating tbb server threads; they may not see my changes made to the_balance */ - /* When a thread is in adjust_job_count_estimate() to increase the slack - RML tries to activate worker threads on behalf of the requesting thread - by repeatedly drawing a coin from the bank optimistically and grabbing a - thread. If it finds the bank overdrafted, it returns the coin back to - the bank and returns the control to the thread (return from the method). - There lies a tiny timing hole. - - When the overdraft occurs (note that multiple masters may be in - adjust_job_count_estimate() so the_balance can be any negative value) and - a worker returns from the TBB work at that moment, its returning the coin - does not bump up the_balance over 0, so it happily returns from - wakeup_some_tbb_threads() without attempting to give coins to worker threads - that are ready. - */ - while( ((tbb_connection_v2*)next_conn_wake_up)->n_adjust_job_count_requests>0 ) - __TBB_Yield(); - - int bal = the_balance; - n_curr_readers = current_tbb_conn_readers; // get the snapshot - if( bal<=0 ) break; - // if the connection is deleted, the following will immediately return because its slack would be 0 or less. - - tbb_connection_v2* tbb_conn = (tbb_connection_v2*)next_conn_wake_up; - int my_slack = tbb_conn->my_slack; - if( my_slack>0 ) tbb_conn->wakeup_tbb_threads( my_slack ); - next_conn_wake_up = next_conn_wake_up->next_conn; - } - - int delta = current_tbb_conn_readers -= n_curr_readers; - //if delta>0, more threads entered the routine since this one took the snapshot - if( delta>0 ) { - current_tbb_conn_readers = 0; - if( the_balance>0 && generic_connection<tbb_server,tbb_client >::get_addr(active_tbb_connections)!=0 ) - goto start_it_over; - } - - // Signal any connection that is waiting for me to complete my access that I am done. - current_tbb_conn_reader_epoch = close_tbb_connection_event_count; -} - -#if !RML_USE_WCRM -int omp_connection_v2::try_increase_load( size_type n, bool strict ) { - __TBB_ASSERT(int(n)>=0,NULL); - if( strict ) { - the_balance -= int(n); - } else { - int avail, old; - do { - avail = the_balance; - if( avail<=0 ) { - // No atomic read-write-modify operation necessary. - return avail; - } - // don't read the_balance; if it changes, compare_and_swap will fail anyway. - old = the_balance.compare_and_swap( int(n)<avail ? avail-n : 0, avail ); - } while( old!=avail ); - if( int(n)>avail ) - n=avail; - } -#if TBB_USE_ASSERT - net_delta += n; -#endif /* TBB_USE_ASSERT */ - return n; -} - -void omp_connection_v2::decrease_load( size_type n ) { - __TBB_ASSERT(int(n)>=0,NULL); - my_thread_map.adjust_balance(int(n)); -#if TBB_USE_ASSERT - net_delta -= n; -#endif /* TBB_USE_ASSERT */ -} - -void omp_connection_v2::get_threads( size_type request_size, void* cookie, job* array[] ) { - - if( !request_size ) - return; - - unsigned index = 0; - for(;;) { // don't return until all request_size threads are grabbed. - // Need to grab some threads - thread_map::iterator k_end=my_thread_map.end(); - for( thread_map::iterator k=my_thread_map.begin(); k!=k_end; ++k ) { - // If another thread added *k, there is a tiny timing window where thread() is invalid. - server_thread& t = k->wait_for_thread(); - if( t.try_grab_for( ts_omp_busy ) ) { - // The preincrement instead of post-increment of index is deliberate. - job& j = k->wait_for_job(); - array[index] = &j; - t.omp_dispatch.produce( client(), j, cookie, index PRODUCE_ARG(*this) ); - if( ++index==request_size ) - return; - } - } - // Need to allocate more threads - for( unsigned i=index; i<request_size; ++i ) { - __TBB_ASSERT( index<request_size, NULL ); - thread_map::value_type* k = my_thread_map.add_one_thread( true ); -#if TBB_USE_ASSERT - if( !k ) { - // Client erred - __TBB_ASSERT(false, "server::get_threads: exceeded job_count\n"); - } -#endif - my_thread_map.bind_one_thread( *this, *k ); - server_thread& t = k->thread(); - if( t.try_grab_for( ts_omp_busy ) ) { - job& j = k->wait_for_job(); - array[index] = &j; - // The preincrement instead of post-increment of index is deliberate. - t.omp_dispatch.produce( client(), j, cookie, index PRODUCE_ARG(*this) ); - if( ++index==request_size ) - return; - } // else someone else snatched it. - } - } -} -#endif /* !RML_USE_WCRM */ - -//------------------------------------------------------------------------ -// Methods of omp_dispatch_type -//------------------------------------------------------------------------ -void omp_dispatch_type::consume() { - job_type* j = job; - // Wait for short window between when master sets state of this thread to ts_omp_busy - // and master thread calls produce. - if( !j ) { - tbb::internal::atomic_backoff bo; - do { - bo.pause(); - j = job; - } while( !j ); - } - job = static_cast<job_type*>(NULL); - client->process(*j,cookie,index); -#if TBB_USE_ASSERT - // Return of method process implies "decrease_load" from client's viewpoint, even though - // the actual adjustment of the_balance only happens when this thread really goes to sleep. - --server->net_delta; -#endif /* TBB_USE_ASSERT */ -} - -#if !RML_USE_WCRM -#if _WIN32||_WIN64 -void omp_connection_v2::deactivate( rml::job* j ) -{ -#if TBB_USE_ASSERT - net_delta -= 1; -#endif - __TBB_ASSERT( j, NULL ); - server_thread* thr = (server_thread*) scratch_ptr( *j ); - thr->deactivate(); -} - -void omp_connection_v2::reactivate( rml::job* j ) -{ - // Should not adjust the_balance because OMP client is supposed to - // do try_increase_load() to reserve the threads to use. - __TBB_ASSERT( j, NULL ); - server_thread* thr = (server_thread*) scratch_ptr( *j ); - thr->reactivate(); -} -#endif /* _WIN32||_WIN64 */ - -//------------------------------------------------------------------------ -// Methods of server_thread -//------------------------------------------------------------------------ - -server_thread::server_thread() : - ref_count(0), - link(NULL), - my_map_pos(), - my_conn(NULL), my_job(NULL), my_ja(NULL) -{ - state = ts_idle; - terminate = false; -#if TBB_USE_ASSERT - has_active_thread = false; -#endif /* TBB_USE_ASSERT */ -} - -server_thread::~server_thread() { - __TBB_ASSERT( !has_active_thread, NULL ); -} - -#if _MSC_VER && !defined(__INTEL_COMPILER) - // Suppress overzealous compiler warnings about an initialized variable 'sink_for_alloca' not referenced - #pragma warning(push) - #pragma warning(disable:4189) -#endif -__RML_DECL_THREAD_ROUTINE server_thread::thread_routine( void* arg ) { - server_thread* self = static_cast<server_thread*>(arg); - AVOID_64K_ALIASING( self->my_index ); -#if TBB_USE_ASSERT - __TBB_ASSERT( !self->has_active_thread, NULL ); - self->has_active_thread = true; -#endif /* TBB_USE_ASSERT */ - self->loop(); - return 0; -} -#if _MSC_VER && !defined(__INTEL_COMPILER) - #pragma warning(pop) -#endif - -void server_thread::launch( size_t stack_size ) { - thread_monitor::launch( thread_routine, this, stack_size ); -} - -void server_thread::sleep_perhaps( thread_state_t asleep ) { - if( terminate ) return; - __TBB_ASSERT( asleep==ts_asleep, NULL ); - thread_monitor::cookie c; - monitor.prepare_wait(c); - if( state.compare_and_swap( asleep, ts_idle )==ts_idle ) { - if( !terminate ) { - monitor.commit_wait(c); - // Someone else woke me up. The compare_and_swap further below deals with spurious wakeups. - } else { - monitor.cancel_wait(); - } - thread_state_t s = read_state(); - if( s==ts_asleep ) { - state.compare_and_swap( ts_idle, ts_asleep ); - // I woke myself up, either because I cancelled the wait or suffered a spurious wakeup. - } else { - // Someone else woke me up; there the_balance is decremented by 1. -- tbb only - if( !is_omp_thread ) { - __TBB_ASSERT( s==ts_tbb_busy||s==ts_idle, NULL ); - } - } - } else { - // someone else made it busy ; see try_grab_for when state==ts_idle. - __TBB_ASSERT( state==ts_omp_busy||state==ts_tbb_busy, NULL ); - monitor.cancel_wait(); - } - __TBB_ASSERT( read_state()!=asleep, "a thread can only put itself to sleep" ); -} - -bool server_thread::wakeup( thread_state_t to, thread_state_t from ) { - bool success = false; - __TBB_ASSERT( from==ts_asleep && (to==ts_idle||to==ts_omp_busy||to==ts_tbb_busy), NULL ); - if( state.compare_and_swap( to, from )==from ) { - if( !is_omp_thread ) __TBB_ASSERT( to==ts_idle||to==ts_tbb_busy, NULL ); - // There is a small timing window that permits balance to become negative, - // but such occurrences are probably rare enough to not worry about, since - // at worst the result is slight temporary oversubscription. - monitor.notify(); - success = true; - } - return success; -} - -//! Attempt to change a thread's state to ts_omp_busy, and waking it up if necessary. -bool server_thread::try_grab_for( thread_state_t target_state ) { - bool success = false; - switch( read_state() ) { - case ts_asleep: - success = wakeup( target_state, ts_asleep ); - break; - case ts_idle: - success = state.compare_and_swap( target_state, ts_idle )==ts_idle; - break; - default: - // Thread is not available to be part of an OpenMP thread team. - break; - } - return success; -} - -#if _WIN32||_WIN64 -void server_thread::deactivate() { - thread_state_t es = (thread_state_t) my_extra_state.fetch_and_store( ts_deactivated ); - __TBB_ASSERT( my_extra_state==ts_deactivated, "someone else tampered with my_extra_state?" ); - if( es==ts_none ) - state = ts_idle; - else - __TBB_ASSERT( es==ts_reactivated, "Cannot call deactivate() while in ts_deactivated" ); - // only the thread can transition itself from ts_deactivted to ts_none - __TBB_ASSERT( my_extra_state==ts_deactivated, "someone else tampered with my_extra_state?" ); - my_extra_state = ts_none; // release the critical section - int bal = ++the_balance; - if( bal>0 ) - wakeup_some_tbb_threads(); - if( es==ts_none ) - sleep_perhaps( ts_asleep ); -} - -void server_thread::reactivate() { - thread_state_t es; - do { - while( (es=read_extra_state())==ts_deactivated ) - __TBB_Yield(); - if( es==ts_reactivated ) { - __TBB_ASSERT( false, "two Reactivate() calls in a row. Should not happen" ); - return; - } - __TBB_ASSERT( es==ts_none, NULL ); - } while( (thread_state_t)my_extra_state.compare_and_swap( ts_reactivated, ts_none )!=ts_none ); - if( state!=ts_omp_busy ) { - my_extra_state = ts_none; - while( !try_grab_for( ts_omp_busy ) ) - __TBB_Yield(); - } -} -#endif /* _WIN32||_WIN64 */ - - -template<typename Connection> -bool server_thread::destroy_job( Connection& c ) { - __TBB_ASSERT( !is_omp_thread||(state==ts_idle||state==ts_omp_busy), NULL ); - __TBB_ASSERT( is_omp_thread||(state==ts_idle||state==ts_tbb_busy), NULL ); - if( !is_omp_thread ) { - __TBB_ASSERT( state==ts_idle||state==ts_tbb_busy, NULL ); - if( state==ts_idle ) - state.compare_and_swap( ts_done, ts_idle ); - // 'state' may be set to ts_tbb_busy by another thread. - - if( state==ts_tbb_busy ) { // return the coin to the deposit - // need to deposit first to let the next connection see the change - ++the_balance; - state = ts_done; // no other thread changes the state when it is ts_*_busy - } - } - if( job_automaton* ja = my_ja ) { - rml::job* j; - if( ja->try_plug(j) ) { - __TBB_ASSERT( j, NULL ); - c.client().cleanup(*j); - c.remove_client_ref(); - } else { - // Some other thread took responsibility for cleaning up the job. - } - } - // Must do remove client reference first, because execution of - // c.remove_ref() can cause *this to be destroyed. - int k = remove_ref(); - __TBB_ASSERT_EX( k==0, "more than one references?" ); -#if TBB_USE_ASSERT - has_active_thread = false; -#endif /* TBB_USE_ASSERT */ - c.remove_server_ref(); - return true; -} - -bool server_thread::do_termination() { - if( is_omp_thread ) - return destroy_job( *static_cast<omp_connection_v2*>(my_conn) ); - else - return destroy_job( *static_cast<tbb_connection_v2*>(my_conn) ); -} - -//! Loop that each thread executes -void server_thread::loop() { - if( is_omp_thread ) - static_cast<omp_connection_v2*>(my_conn)->make_job( *this, *my_ja ); - else - static_cast<tbb_connection_v2*>(my_conn)->make_job( *this, *my_ja ); - for(;;) { - __TBB_Yield(); - if( state==ts_idle ) - sleep_perhaps( ts_asleep ); - - // Check whether I should quit. - if( terminate ) - if( do_termination() ) - return; - - // read the state - thread_state_t s = read_state(); - __TBB_ASSERT( s==ts_idle||s==ts_omp_busy||s==ts_tbb_busy, NULL ); - - if( s==ts_omp_busy ) { - // Enslaved by OpenMP team. - omp_dispatch.consume(); - /* here wake tbb threads up if feasible */ - if( ++the_balance>0 ) - wakeup_some_tbb_threads(); - state = ts_idle; - } else if( s==ts_tbb_busy ) { - // do some TBB work. - __TBB_ASSERT( my_conn && my_job, NULL ); - tbb_connection_v2& conn = *static_cast<tbb_connection_v2*>(my_conn); - // give openmp higher priority - bool has_coin = true; - if( conn.has_slack() ) { - // it has the coin, it should trip to the scheduler at least once as long as its slack is positive - do { - if( conn.try_process( *this, *my_job ) ) - if( conn.has_slack() && the_balance>=0 ) - has_coin = !conn.wakeup_next_thread( my_map_pos ); - } while( has_coin && conn.has_slack() && the_balance>=0 ); - } - state = ts_idle; - if( has_coin ) { - ++the_balance; // return the coin back to the deposit - if( conn.has_slack() ) { // a new adjust_job_request_estimate() is in progress - // it may have missed my changes to state and/or the_balance - if( --the_balance>=0 ) { // try to grab the coin back - // I got the coin - if( state.compare_and_swap( ts_tbb_busy, ts_idle )!=ts_idle ) - ++the_balance; // someone else enlisted me. - } else { - // overdraft. return the coin - ++the_balance; - } - } // else the new request will see my changes to state & the_balance. - } - /* here wake tbb threads up if feasible */ - if( the_balance>0 ) - wakeup_some_tbb_threads(); - } - } -} -#endif /* !RML_USE_WCRM */ - -#if RML_USE_WCRM - -class tbb_connection_v2; -class omp_connection_v2; - -#define CREATE_SCHEDULER_POLICY(policy,min_thrs,max_thrs,stack_size) \ - try { \ - policy = new SchedulerPolicy (7, \ - SchedulerKind, RML_THREAD_KIND, /*defined in _rml_serer_msrt.h*/ \ - MinConcurrency, min_thrs, \ - MaxConcurrency, max_thrs, \ - TargetOversubscriptionFactor, 1, \ - ContextStackSize, stack_size/1000, /*ConcRT:kB, iRML:bytes*/ \ - ContextPriority, THREAD_PRIORITY_NORMAL, \ - DynamicProgressFeedback, ProgressFeedbackDisabled ); \ - } catch ( invalid_scheduler_policy_key & ) { \ - __TBB_ASSERT( false, "invalid scheduler policy key exception caught" );\ - } catch ( invalid_scheduler_policy_value & ) { \ - __TBB_ASSERT( false, "invalid scheduler policy value exception caught" );\ - } - -static unsigned int core_count; -static tbb::atomic<int> core_count_inited; - - -static unsigned int get_processor_count() -{ - if( core_count_inited!=2 ) { - if( core_count_inited.compare_and_swap( 1, 0 )==0 ) { - core_count = GetProcessorCount(); - core_count_inited = 2; - } else { - tbb::internal::spin_wait_until_eq( core_count_inited, 2 ); - } - } - return core_count; -} - -template<typename Connection> -scheduler<Connection>::scheduler( Connection& conn ) : uid(GetSchedulerId()), my_conn(conn) {} - -template<> -scheduler<tbb_connection_v2>::scheduler( tbb_connection_v2& conn ) : uid(GetSchedulerId()), my_conn(conn) -{ - rml::client& cl = my_conn.client(); - unsigned max_job_count = cl.max_job_count(); - unsigned count = get_processor_count(); - __TBB_ASSERT( max_job_count>0, "max job count must be positive" ); - __TBB_ASSERT( count>1, "The processor count must be greater than 1" ); - if( max_job_count>count-1) max_job_count = count-1; - CREATE_SCHEDULER_POLICY( my_policy, 0, max_job_count, cl.min_stack_size() ); -} - -#if __RML_REMOVE_VIRTUAL_PROCESSORS_DISABLED -template<> -void scheduler<tbb_connection_v2>::RemoveVirtualProcessors( IVirtualProcessorRoot**, unsigned int) -{ -} -#else -template<> -void scheduler<tbb_connection_v2>::RemoveVirtualProcessors( IVirtualProcessorRoot** vproots, unsigned int count ) -{ - if( !my_conn.is_closing() ) - my_conn.remove_virtual_processors( vproots, count ); -} -#endif - -template<> -void scheduler<tbb_connection_v2>::NotifyResourcesExternallyIdle( IVirtualProcessorRoot** /*vproots*/, unsigned int /*count*/) -{ - __TBB_ASSERT( false, "NotifyResourcesExternallyIdle() is not allowed for TBB" ); -} - -template<> -void scheduler<tbb_connection_v2>::NotifyResourcesExternallyBusy( IVirtualProcessorRoot** /*vproots*/, unsigned int /*count*/ ) -{ - __TBB_ASSERT( false, "NotifyResourcesExternallyBusy() is not allowed for TBB" ); -} - -template<> -scheduler<omp_connection_v2>::scheduler( omp_connection_v2& conn ) : uid(GetSchedulerId()), my_conn(conn) -{ - unsigned count = get_processor_count(); - rml::client& cl = my_conn.client(); - __TBB_ASSERT( count>1, "The processor count must be greater than 1" ); - CREATE_SCHEDULER_POLICY( my_policy, count-1, count-1, cl.min_stack_size() ); -} - -template<> -void scheduler<omp_connection_v2>::RemoveVirtualProcessors( IVirtualProcessorRoot** /*vproots*/, unsigned int /*count*/ ) { - __TBB_ASSERT( false, "RemoveVirtualProcessors() is not allowed for OMP" ); -} - -template<> -void scheduler<omp_connection_v2>::NotifyResourcesExternallyIdle( IVirtualProcessorRoot** vproots, unsigned int count ){ - if( !my_conn.is_closing() ) - my_conn.notify_resources_externally_idle( vproots, count ); -} - -template<> -void scheduler<omp_connection_v2>::NotifyResourcesExternallyBusy( IVirtualProcessorRoot** vproots, unsigned int count ){ - if( !my_conn.is_closing() ) - my_conn.notify_resources_externally_busy( vproots, count ); -} - -/* ts_idle, ts_asleep, ts_busy */ -void tbb_server_thread::Dispatch( DispatchState* ) { - // Activate() will resume a thread right after Deactivate() as if it returns from the call - tbb_connection_v2* tbb_conn = static_cast<tbb_connection_v2*>(my_conn); - make_job( *tbb_conn, *this ); - - for( ;; ) { - // Try to wake some tbb threads if the balance is positive. - // When a thread is added by ConcRT and enter here for the first time, - // the thread may wake itself up (i.e., atomically change its state to ts_busy. - if( the_balance>0 ) - wakeup_some_tbb_threads(); - if( read_state()!=ts_busy ) - if( sleep_perhaps() ) - return; - if( terminate ) - if( initiate_termination() ) - return; - if( read_state()==ts_busy ) { - // this thread has a coin (i.e., state=ts_busy; it should trip to the scheduler at least once - if ( tbb_conn->has_slack() ) { - do { - tbb_conn->try_process( *wait_for_job() ); - } while( tbb_conn->has_slack() && the_balance>=0 && !is_removed() ); - } - __TBB_ASSERT( read_state()==ts_busy, "thread is not in busy state after returning from process()" ); - // see remove_virtual_processors() - if( my_state.compare_and_swap( ts_idle, ts_busy )==ts_busy ) { - int bal = ++the_balance; - if( tbb_conn->has_slack() ) { - // slack is positive, volunteer to help - bal = --the_balance; // try to grab the coin back - if( bal>=0 ) { // got the coin back - if( my_state.compare_and_swap( ts_busy, ts_idle )!=ts_idle ) - ++the_balance; // someone else enlisted me. - // else my_state is ts_busy, I will come back to tbb_conn->try_process(). - } else { - // overdraft. return the coin - ++the_balance; - } - } // else the new request will see my changes to state & the_balance. - } else { - __TBB_ASSERT( false, "someone tampered with my state" ); - } - } // someone else might set the state to somthing other than ts_idle - } -} - -void omp_server_thread::Dispatch( DispatchState* ) { - // Activate() will resume a thread right after Deactivate() as if it returns from the call - make_job( *static_cast<omp_connection_v2*>(my_conn), *this ); - - for( ;; ) { - if( read_state()!=ts_busy ) - sleep_perhaps(); - if( terminate ) { - if( initiate_termination() ) - return; - } - if( read_state()==ts_busy ) { - omp_data.consume(); - __TBB_ASSERT( read_state()==ts_busy, "thread is not in busy state after returning from process()" ); - my_thread_map.adjust_balance( 1 ); - set_state( ts_idle ); - } - // someone else might set the state to somthing other than ts_idle - } -} - -//! Attempt to change a thread's state to ts_omp_busy, and waking it up if necessary. -thread_grab_t server_thread_rep::try_grab_for() { - thread_grab_t res = wk_failed; - thread_state_t s = read_state(); - switch( s ) { - case ts_asleep: - if( wakeup( ts_busy, ts_asleep ) ) - res = wk_from_asleep; - __TBB_ASSERT( res==wk_failed||read_state()==ts_busy, NULL ); - break; - case ts_idle: - if( my_state.compare_and_swap( ts_busy, ts_idle )==ts_idle ) - res = wk_from_idle; - // At this point a thread is grabbed (i.e., its state has changed to ts_busy. - // It is possible that the thread 1) processes the job, returns from process() and - // sets its state ts_idle again. In some cases, it even sets its state to ts_asleep. - break; - default: - break; - } - return res; -} - -bool tbb_server_thread::switch_out() { - thread_state_t s = read_state(); - __TBB_ASSERT( s==ts_asleep||s==ts_busy, NULL ); - // This thread comes back from the TBB scheduler, and changed its state to ts_asleep successfully. - // The master enlisted it and woke it up by Activate()'ing it; now it is emerging from Deactivated(). - // ConcRT requested for removal of the vp associated with the thread, and RML marks it removed. - // Now, it has ts_busy, and removed. -- we should remove it. - IExecutionResource* old_vp = my_execution_resource; - if( s==ts_busy ) { - ++the_balance; - my_state = ts_asleep; - } - IThreadProxy* proxy = my_proxy; - __TBB_ASSERT( proxy, NULL ); - my_execution_resource = (IExecutionResource*) c_remove_prepare; - old_vp->Remove( my_scheduler ); - my_execution_resource = (IExecutionResource*) c_remove_returned; - int cnt = --activation_count; - __TBB_ASSERT_EX( cnt==0||cnt==1, "too many activations?" ); - proxy->SwitchOut(); - if( terminate ) { - bool activated = activation_count==1; -#if TBB_USE_ASSERT - /* In a rare sequence of events, a thread comes out of SwitchOut with activation_count==1. - * 1) The thread is SwitchOut'ed. - * 2) AddVirtualProcessors() arrived and the thread is Activated. - * 3) The thread is coming out of SwitchOut(). - * 4) request_close_connection arrives and inform the thread that it is time to terminate. - * 5) The thread hits the check and falls into the path with 'activated==true'. - * In that case, do the clean-up but do not switch to the thread scavenger; rather simply return to RM. - */ - if( activated ) { - // thread is 'revived' in add_virtual_processors after being Activated(). - // so, if the thread extra state is still marked 'removed', it will shortly change to 'none' - // i.e., !is_remove(). The thread state is changed to ts_idle before the extra state, so - // the thread's state should be either ts_idle or ts_done. - while( is_removed() ) - __TBB_Yield(); - thread_state_t s = read_state(); - __TBB_ASSERT( s==ts_idle || s==ts_done, NULL ); - } -#endif - __TBB_ASSERT( my_state==ts_asleep||my_state==ts_idle, NULL ); - // it is possible that in make_job() the thread may not have a chance to create a job. - // my_job may not be set if the thread did not get a chance to process client's job (i.e., call try_process()) - rml::job* j; - if( my_job_automaton.try_plug(j) ) { - __TBB_ASSERT( j, NULL ); - my_client.cleanup(*j); - my_conn->remove_client_ref(); - } - // Must do remove client reference first, because execution of - // c.remove_ref() can cause *this to be destroyed. - if( !activated ) - proxy->SwitchTo( my_thread_map.get_thread_scavenger(), Idle ); - my_conn->remove_server_ref(); - return true; - } - // We revive a thread in add_virtual_processors() after we Activate the thread on a new virtual processor. - // So briefly wait until the thread's my_execution_resource gets set. - while( get_virtual_processor()==c_remove_returned ) - __TBB_Yield(); - return false; -} - -bool tbb_server_thread::sleep_perhaps () { - if( terminate ) return false; - thread_state_t s = read_state(); - if( s==ts_idle ) { - if( my_state.compare_and_swap( ts_asleep, ts_idle )==ts_idle ) { - // If a thread is between read_state() and compare_and_swap(), and the master tries to terminate, - // the master's compare_and_swap() will fail because the thread's state is ts_idle. - // We need to check if terminate is true or not before letting the thread go to sleep oetherwise - // we will miss the terminate signal. - if( !terminate ) { - if( !is_removed() ) { - --activation_count; - get_virtual_processor()->Deactivate( this ); - } - if( is_removed() ) { - if( switch_out() ) - return true; - __TBB_ASSERT( my_execution_resource>c_remove_returned, NULL ); - } - // in add_virtual_processors(), when we revive a thread, we change its state after Activate the thread - // in that case the state may be ts_asleep for a short period - while( read_state()==ts_asleep ) - __TBB_Yield(); - } else { - if( my_state.compare_and_swap( ts_done, ts_asleep )!=ts_asleep ) { - --activation_count; - // unbind() changed my state. It will call Activate(). So issue a matching Deactivate() - get_virtual_processor()->Deactivate( this ); - } - } - } - } else { - __TBB_ASSERT( s==ts_busy, NULL ); - } - return false; -} - -void omp_server_thread::sleep_perhaps () { - if( terminate ) return; - thread_state_t s = read_state(); - if( s==ts_idle ) { - if( my_state.compare_and_swap( ts_asleep, ts_idle )==ts_idle ) { - // If a thread is between read_state() and compare_and_swap(), and the master tries to terminate, - // the master's compare_and_swap() will fail because the thread's state is ts_idle. - // We need to check if terminate is true or not before letting the thread go to sleep oetherwise - // we will miss the terminate signal. - if( !terminate ) { - get_virtual_processor()->Deactivate( this ); - __TBB_ASSERT( !is_removed(), "OMP threads should not be deprived of a virtual processor" ); - __TBB_ASSERT( read_state()!=ts_asleep, NULL ); - } else { - if( my_state.compare_and_swap( ts_done, ts_asleep )!=ts_asleep ) - // unbind() changed my state. It will call Activate(). So issue a matching Deactivate() - get_virtual_processor()->Deactivate( this ); - } - } - } else { - __TBB_ASSERT( s==ts_busy, NULL ); - } -} - -bool tbb_server_thread::initiate_termination() { - if( read_state()==ts_busy ) { - int bal = ++the_balance; - if( bal>0 ) wakeup_some_tbb_threads(); - } - return destroy_job( (tbb_connection_v2*) my_conn ); -} - -template<typename Connection> -bool server_thread_rep::destroy_job( Connection* c ) { - __TBB_ASSERT( my_state!=ts_asleep, NULL ); - rml::job* j; - if( my_job_automaton.try_plug(j) ) { - __TBB_ASSERT( j, NULL ); - my_client.cleanup(*j); - c->remove_client_ref(); - } - // Must do remove client reference first, because execution of - // c.remove_ref() can cause *this to be destroyed. - c->remove_server_ref(); - return true; -} - -void thread_map::assist_cleanup( bool assist_null_only ) { - // To avoid deadlock, the current thread *must* help out with cleanups that have not started, - // becausd the thread that created the job may be busy for a long time. - for( iterator i = begin(); i!=end(); ++i ) { - rml::job* j=0; - server_thread* thr = (*i).second; - job_automaton& ja = thr->my_job_automaton; - if( assist_null_only ? ja.try_plug_null() : ja.try_plug(j) ) { - if( j ) { - my_client.cleanup(*j); - } else { - // server thread did not get a chance to create a job. - } - remove_client_ref(); - } - } -} - -void thread_map::add_virtual_processors( IVirtualProcessorRoot** vproots, unsigned int count, tbb_connection_v2& conn, ::tbb::spin_mutex& mtx ) -{ -#if TBB_USE_ASSERT - int req_cnt = ++n_add_vp_requests; - __TBB_ASSERT( req_cnt==1, NULL ); -#endif - std::vector<thread_map::iterator> vec(count); - std::vector<tbb_server_thread*> tvec(count); - iterator end; - - { - tbb::spin_mutex::scoped_lock lck( mtx ); - __TBB_ASSERT( my_map.size()==0||count==1, NULL ); - end = my_map.end(); //remember 'end' at the time of 'find' - // find entries in the map for those VPs that were previosly added and then removed. - for( size_t i=0; i<count; ++i ) { - vec[i] = my_map.find( (key_type) vproots[i] ); -#if TBB_USE_DEBUG - if( vec[i]!=end ) { - tbb_server_thread* t = (tbb_server_thread*) (*vec[i]).second; - IVirtualProcessorRoot* v = t->get_virtual_processor(); - __TBB_ASSERT( v==c_remove_prepare||v==c_remove_returned, NULL ); - } -#endif - } - - iterator nxt = my_map.begin(); - for( size_t i=0; i<count; ++i ) { - if( vec[i]!=end ) { -#if TBB_USE_ASSERT - tbb_server_thread* t = (tbb_server_thread*) (*vec[i]).second; - __TBB_ASSERT( t->read_state()==ts_asleep, NULL ); - IVirtualProcessorRoot* r = t->get_virtual_processor(); - __TBB_ASSERT( r==c_remove_prepare||r==c_remove_returned, NULL ); -#endif - continue; - } - - if( my_unrealized_threads>0 ) { - --my_unrealized_threads; - } else { - __TBB_ASSERT( nxt!=end, "nxt should not be thread_map::iterator::end" ); - // find a removed thread context for i - for( ; nxt!=end; ++nxt ) { - tbb_server_thread* t = (tbb_server_thread*) (*nxt).second; - if( t->is_removed() && t->read_state()==ts_asleep && t->get_virtual_processor()==c_remove_returned ) { - vec[i] = nxt++; - break; - } - } - // break target - if( vec[i]==end ) // ignore excessive VP. - vproots[i] = NULL; - } - } - } - - for( size_t i=0; i<count; ++i ) { - __TBB_ASSERT( !tvec[i], NULL ); - if( vec[i]==end ) { - if( vproots[i] ) { - tvec[i] = my_tbb_allocator.allocate(1); - new ( tvec[i] ) tbb_server_thread( false, my_scheduler, (IExecutionResource*)vproots[i], &conn, *this, my_client ); - } -#if TBB_USE_ASSERT - } else { - tbb_server_thread* t = (tbb_server_thread*) (*vec[i]).second; - __TBB_ASSERT( t->GetProxy(), "Proxy is cleared?" ); -#endif - } - } - - { - tbb::spin_mutex::scoped_lock lck( mtx ); - - bool closing = is_closing(); - - for( size_t i=0; i<count; ++i ) { - if( vec[i]==end ) { - if( vproots[i] ) { - thread_map::key_type key = (thread_map::key_type) vproots[i]; - vec[i] = insert( key, (server_thread*) tvec[i] ); - my_client_ref_count.add_ref(); - my_server_ref_count.add_ref(); - } - } else if( !closing ) { - tbb_server_thread* t = (tbb_server_thread*) (*vec[i]).second; - - if( (*vec[i]).first!=(thread_map::key_type)vproots[i] ) { - my_map.erase( vec[i] ); - thread_map::key_type key = (thread_map::key_type) vproots[i]; - __TBB_ASSERT( key, NULL ); - vec[i] = insert( key, t ); - } - __TBB_ASSERT( t->read_state()==ts_asleep, NULL ); - // We did not decrement server/client ref count when a thread is removed. - // So, don't increment server/client ref count here. - } - } - - // we could check is_closing() earlier. That requires marking the newly allocated server_thread objects - // that are not inserted into the thread_map, and deallocate them. Doing so seems more cumbersome - // than simply adding these to the thread_map and let thread_map's destructor take care of reclamation. - __TBB_ASSERT( closing==is_closing(), NULL ); - if( closing ) return; - } - - for( size_t i=0; i<count; ++i ) { - if( vproots[i] ) { - tbb_server_thread* t = (tbb_server_thread*) (*vec[i]).second; - __TBB_ASSERT( tvec[i]!=NULL||t->GetProxy(), "Proxy is cleared?" ); - if( t->is_removed() ) - __TBB_ASSERT( t->get_virtual_processor()==c_remove_returned, NULL ); - int cnt = ++t->activation_count; - __TBB_ASSERT_EX( cnt==0||cnt==1, NULL ); - vproots[i]->Activate( t ); - if( t->is_removed() ) - t->revive( my_scheduler, vproots[i], my_client ); - } - } -#if TBB_USE_ASSERT - req_cnt = --n_add_vp_requests; - __TBB_ASSERT( req_cnt==0, NULL ); -#endif -} - -void thread_map::remove_virtual_processors( IVirtualProcessorRoot** vproots, unsigned count, ::tbb::spin_mutex& mtx ) { - if( my_map.size()==0 ) - return; - tbb::spin_mutex::scoped_lock lck( mtx ); - - if( is_closing() ) return; - - for( unsigned int c=0; c<count; ++c ) { - iterator i = my_map.find( (key_type) vproots[c] ); - if( i==my_map.end() ) { - thread_scavenger_thread* tst = my_thread_scavenger_thread; - if( !tst ) { - // Remove unknown vp from my scheduler; - vproots[c]->Remove( my_scheduler ); - } else { - while( (tst=my_thread_scavenger_thread)==c_claimed ) - __TBB_Yield(); - if( vproots[c]!=tst->get_virtual_processor() ) - vproots[c]->Remove( my_scheduler ); - } - continue; - } - tbb_server_thread* thr = (tbb_server_thread*) (*i).second; - __TBB_ASSERT( thr->tbb_thread, "incorrect type of server_thread" ); - thr->set_removed(); - if( thr->read_state()==ts_asleep ) { - while( thr->activation_count>0 ) { - if( thr->get_virtual_processor()<=c_remove_returned ) - break; - __TBB_Yield(); - } - if( thr->get_virtual_processor()>c_remove_returned ) { - // the thread is in Deactivated state - ++thr->activation_count; - // wake the thread up so that it Switches Out itself. - thr->get_virtual_processor()->Activate( thr ); - } // else, it is Switched Out - } // else the thread will see that it is removed and proceed to switch itself out without Deactivation - } -} - -void thread_map::add_virtual_processors( IVirtualProcessorRoot** vproots, unsigned int count, omp_connection_v2& conn, ::tbb::spin_mutex& mtx ) -{ - std::vector<thread_map::iterator> vec(count); - std::vector<server_thread*> tvec(count); - iterator end; - - { - tbb::spin_mutex::scoped_lock lck( mtx ); - // read the map - end = my_map.end(); //remember 'end' at the time of 'find' - for( size_t i=0; i<count; ++i ) - vec[i] = my_map.find( (key_type) vproots[i] ); - } - - for( size_t i=0; i<count; ++i ) { - __TBB_ASSERT( !tvec[i], NULL ); - if( vec[i]==end ) { - tvec[i] = my_omp_allocator.allocate(1); - new ( tvec[i] ) omp_server_thread( false, my_scheduler, (IExecutionResource*)vproots[i], &conn, *this, my_client ); - } - } - - { - tbb::spin_mutex::scoped_lock lck( mtx ); - - for( size_t i=0; i<count; ++i ) { - if( vec[i]==my_map.end() ) { - thread_map::key_type key = (thread_map::key_type) vproots[i]; - vec[i] = insert( key, tvec[i] ); - my_client_ref_count.add_ref(); - my_server_ref_count.add_ref(); - } - } - - // we could check is_closing() earlier. That requires marking the newly allocated server_thread objects - // that are not inserted into the thread_map, and deallocate them. Doing so seems more cumbersome - // than simply adding these to the thread_map and let thread_map's destructor take care of reclamation. - if( is_closing() ) return; - } - - for( size_t i=0; i<count; ++i ) - vproots[i]->Activate( (*vec[i]).second ); - - { - tbb::spin_mutex::scoped_lock lck( mtx ); - for( size_t i=0; i<count; ++i ) - original_exec_resources.push_back( vproots[i] ); - } -} - -void thread_map::mark_virtual_processors_as_lent( IVirtualProcessorRoot** vproots, unsigned count, ::tbb::spin_mutex& mtx ) { - tbb::spin_mutex::scoped_lock lck( mtx ); - - if( is_closing() ) return; - - iterator end = my_map.end(); - for( unsigned int c=0; c<count; ++c ) { - iterator i = my_map.find( (key_type) vproots[c] ); - if( i==end ) { - // The vproc has not been added to the map in create_oversubscribers() - my_map.insert( hash_map_type::value_type( (key_type) vproots[c], (server_thread*)1 ) ); - } else { - server_thread* thr = (*i).second; - if( ((uintptr_t)thr)&~(uintptr_t)1 ) { - __TBB_ASSERT( !thr->is_removed(), "incorrectly removed" ); - ((omp_server_thread*)thr)->set_lent(); - } - } - } -} - -void thread_map::create_oversubscribers( unsigned n, std::vector<server_thread*>& thr_vec, omp_connection_v2& conn, ::tbb::spin_mutex& mtx ) { - std::vector<IExecutionResource*> curr_exec_rsc; - { - tbb::spin_mutex::scoped_lock lck( mtx ); - curr_exec_rsc = original_exec_resources; // copy construct - } - typedef std::vector<IExecutionResource*>::iterator iterator_er; - typedef ::std::vector<std::pair<hash_map_type::key_type, hash_map_type::mapped_type> > hash_val_vector_t; - hash_val_vector_t v_vec(n); - iterator_er begin = curr_exec_rsc.begin(); - iterator_er end = curr_exec_rsc.end(); - iterator_er i = begin; - for( unsigned c=0; c<n; ++c ) { - IVirtualProcessorRoot* vpr = my_scheduler_proxy->CreateOversubscriber( *i ); - omp_server_thread* t = new ( my_omp_allocator.allocate(1) ) omp_server_thread( true, my_scheduler, (IExecutionResource*)vpr, &conn, *this, my_client ); - thr_vec[c] = t; - v_vec[c] = hash_map_type::value_type( (key_type) vpr, t ); - if( ++i==end ) i = begin; - } - - { - tbb::spin_mutex::scoped_lock lck( mtx ); - - if( is_closing() ) return; - - iterator end = my_map.end(); - unsigned c = 0; - for( hash_val_vector_t::iterator vi=v_vec.begin(); vi!=v_vec.end(); ++vi, ++c ) { - iterator i = my_map.find( (key_type) (*vi).first ); - if( i==end ) { - my_map.insert( *vi ); - } else { - // the vproc has not been added to the map in mark_virtual_processors_as_returned(); - unsigned lent = (unsigned) (*i).second; - __TBB_ASSERT( lent<=1, "vproc map entry added incorrectly?"); - (*i).second = thr_vec[c]; - if( lent ) - ((omp_server_thread*)thr_vec[c])->set_lent(); - else - ((omp_server_thread*)thr_vec[c])->set_returned(); - } - my_client_ref_count.add_ref(); - my_server_ref_count.add_ref(); - } - } -} - -void thread_map::wakeup_tbb_threads( int c, ::tbb::spin_mutex& mtx ) { - std::vector<tbb_server_thread*> vec(c); - - size_t idx = 0; - { - tbb::spin_mutex::scoped_lock lck( mtx ); - - if( is_closing() ) return; - // only one RML thread is in here to wake worker threads up. - - int bal = the_balance; - int cnt = c<bal ? c : bal; - - if( cnt<=0 ) { return; } - - for( iterator i=begin(); i!=end(); ++i ) { - tbb_server_thread* thr = (tbb_server_thread*) (*i).second; - // ConcRT RM should take threads away from TBB scheduler instead of lending them to another scheduler - if( thr->is_removed() ) - continue; - - if( --the_balance>=0 ) { - thread_grab_t res; - while( (res=thr->try_grab_for())!=wk_from_idle ) { - if( res==wk_from_asleep ) { - vec[idx++] = thr; - break; - } else { - thread_state_t s = thr->read_state(); - if( s==ts_busy ) {// failed because already assigned. move on. - ++the_balance; - goto skip; - } - } - } - thread_state_t s = thr->read_state(); - __TBB_ASSERT_EX( s==ts_busy, "should have set the state to ts_busy" ); - if( --cnt==0 ) - break; - } else { - // overdraft - ++the_balance; - break; - } -skip: - ; - } - } - - for( size_t i=0; i<idx; ++i ) { - tbb_server_thread* thr = vec[i]; - __TBB_ASSERT( thr, NULL ); - thread_state_t s = thr->read_state(); - __TBB_ASSERT_EX( s==ts_busy, "should have set the state to ts_busy" ); - ++thr->activation_count; - thr->get_virtual_processor()->Activate( thr ); - } - -} - -void thread_map::mark_virtual_processors_as_returned( IVirtualProcessorRoot** vprocs, unsigned int count, tbb::spin_mutex& mtx ) { - { - tbb::spin_mutex::scoped_lock lck( mtx ); - - if( is_closing() ) return; - - iterator end = my_map.end(); - for(unsigned c=0; c<count; ++c ) { - iterator i = my_map.find( (key_type) vprocs[c] ); - if( i==end ) { - // the vproc has not been added to the map in create_oversubscribers() - my_map.insert( hash_map_type::value_type( (key_type) vprocs[c], static_cast<server_thread*>(0) ) ); - } else { - omp_server_thread* thr = (omp_server_thread*) (*i).second; - if( ((uintptr_t)thr)&~(uintptr_t)1 ) { - __TBB_ASSERT( !thr->is_removed(), "incorrectly removed" ); - // we shoud not make any assumption on the initial state of an added vproc. - thr->set_returned(); - } - } - } - } -} - - -void thread_map::unbind( rml::server& /*server*/, tbb::spin_mutex& mtx ) { - { - tbb::spin_mutex::scoped_lock lck( mtx ); - shutdown_in_progress = true; // ignore any callbacks from ConcRT RM - - // Ask each server_thread to cleanup its job for this server. - for( iterator i = begin(); i!=end(); ++i ) { - server_thread* t = (*i).second; - t->terminate = true; - if( t->is_removed() ) { - // This is for TBB only as ConcRT RM does not request OMP schedulers to remove virtual processors - if( t->read_state()==ts_asleep ) { - __TBB_ASSERT( my_thread_scavenger_thread, "this is TBB connection; thread_scavenger_thread must be allocated" ); - // thread is on its way to switch_out; see remove_virtual_processors() where - // the thread is Activated() to bring it back from 'Deactivated' in sleep_perhaps() - // now assume that the thread will go to SwitchOut() -#if TBB_USE_ASSERT - while( t->get_virtual_processor()>c_remove_returned ) - __TBB_Yield(); -#endif - // A removed thread is supposed to proceed to SwithcOut. - // There, we remove client&server references. - } - } else { - if( t->wakeup( ts_done, ts_asleep ) ) { - if( t->tbb_thread ) - ++((tbb_server_thread*)t)->activation_count; - t->get_virtual_processor()->Activate( t ); - // We mark in the thread_map such that when termination sequence started, we ignore - // all notification from ConcRT RM. - } - } - } - } - // Remove extra ref to client. - remove_client_ref(); - - if( my_thread_scavenger_thread ) { - thread_scavenger_thread* tst; - while( (tst=my_thread_scavenger_thread)==c_claimed ) - __TBB_Yield(); -#if TBB_USE_ASSERT - ++my_thread_scavenger_thread->activation_count; -#endif - tst->get_virtual_processor()->Activate( tst ); - } -} - -#if !__RML_REMOVE_VIRTUAL_PROCESSORS_DISABLED -void thread_map::allocate_thread_scavenger( IExecutionResource* v ) -{ - if( my_thread_scavenger_thread>c_claimed ) return; - thread_scavenger_thread* c = my_thread_scavenger_thread.fetch_and_store((thread_scavenger_thread*)c_claimed); - if( c==NULL ) { // successfully claimed - add_server_ref(); -#if TBB_USE_ASSERT - ++n_thread_scavengers_created; -#endif - __TBB_ASSERT( v, NULL ); - IVirtualProcessorRoot* vpr = my_scheduler_proxy->CreateOversubscriber( v ); - my_thread_scavenger_thread = c = new ( my_scavenger_allocator.allocate(1) ) thread_scavenger_thread( my_scheduler, vpr, *this ); -#if TBB_USE_ASSERT - ++c->activation_count; -#endif - vpr->Activate( c ); - } else if( c>c_claimed ) { - my_thread_scavenger_thread = c; - } -} -#endif - -void thread_scavenger_thread::Dispatch( DispatchState* ) -{ - __TBB_ASSERT( my_proxy, NULL ); -#if TBB_USE_ASSERT - --activation_count; -#endif - get_virtual_processor()->Deactivate( this ); - for( thread_map::iterator i=my_thread_map.begin(); i!=my_thread_map.end(); ++i ) { - tbb_server_thread* t = (tbb_server_thread*) (*i).second; - if( t->read_state()==ts_asleep && t->is_removed() ) { - while( t->get_execution_resource()!=c_remove_returned ) - __TBB_Yield(); - my_proxy->SwitchTo( t, Blocking ); - } - } - get_virtual_processor()->Remove( my_scheduler ); - my_thread_map.remove_server_ref(); - // signal to the connection scavenger that i am done with the map. - __TBB_ASSERT( activation_count==1, NULL ); - set_state( ts_done ); -} - -//! Windows "DllMain" that handles startup and shutdown of dynamic library. -extern "C" bool WINAPI DllMain( HINSTANCE /*hinstDLL*/, DWORD fwdReason, LPVOID lpvReserved ) { - void assist_cleanup_connections(); - if( fwdReason==DLL_PROCESS_DETACH ) { - // dll is being unloaded - if( !lpvReserved ) // if FreeLibrary has been called - assist_cleanup_connections(); - } - return true; -} - -void free_all_connections( uintptr_t conn_ex ) { - while( conn_ex ) { - bool is_tbb = (conn_ex&2)>0; - //clear extra bits - uintptr_t curr_conn = conn_ex & ~(uintptr_t)3; - __TBB_ASSERT( curr_conn, NULL ); - - // Wait for worker threads to return - if( is_tbb ) { - tbb_connection_v2* tbb_conn = reinterpret_cast<tbb_connection_v2*>(curr_conn); - conn_ex = reinterpret_cast<uintptr_t>(tbb_conn->next_conn); - while( tbb_conn->my_thread_map.remove_server_ref()>0 ) - __TBB_Yield(); - delete tbb_conn; - } else { - omp_connection_v2* omp_conn = reinterpret_cast<omp_connection_v2*>(curr_conn); - conn_ex = reinterpret_cast<uintptr_t>(omp_conn->next_conn); - while( omp_conn->my_thread_map.remove_server_ref()>0 ) - __TBB_Yield(); - delete omp_conn; - } - } -} - -void assist_cleanup_connections() -{ - //signal to connection_scavenger_thread to terminate - uintptr_t tail = connections_to_reclaim.tail; - while( connections_to_reclaim.tail.compare_and_swap( garbage_connection_queue::plugged, tail )!=tail ) { - __TBB_Yield(); - tail = connections_to_reclaim.tail; - } - - __TBB_ASSERT( connection_scavenger.state==ts_busy || connection_scavenger.state==ts_asleep, NULL ); - // Scavenger thread may be busy freeing connections - DWORD thr_exit_code = STILL_ACTIVE; - while( connection_scavenger.state==ts_busy ) { - if( GetExitCodeThread( connection_scavenger.thr_handle, &thr_exit_code )>0 ) - if( thr_exit_code!=STILL_ACTIVE ) - break; - __TBB_Yield(); - thr_exit_code = STILL_ACTIVE; - } - if( connection_scavenger.state==ts_asleep && thr_exit_code==STILL_ACTIVE ) - connection_scavenger.wakeup(); // wake the connection scavenger thread up - - // it is possible that the connection scavenger thread already exited. Take over its responsibility. - if( tail && connections_to_reclaim.tail!=garbage_connection_queue::plugged_acked ) { - // atomically claim the head of the list. - uintptr_t head = connections_to_reclaim.head.fetch_and_store( garbage_connection_queue::empty ); - if( head==garbage_connection_queue::empty ) - head = tail; - connection_scavenger.process_requests( head ); - } - __TBB_ASSERT( connections_to_reclaim.tail==garbage_connection_queue::plugged||connections_to_reclaim.tail==garbage_connection_queue::plugged_acked, "someone else added a request after termination has initiated" ); - __TBB_ASSERT( the_balance==connection_scavenger.default_concurrency, NULL ); -} - -void connection_scavenger_thread::sleep_perhaps() { - uintptr_t tail = connections_to_reclaim.tail; - // connections_to_reclaim.tail==garbage_connection_queue::plugged --> terminate, - // connections_to_reclaim.tail>garbage_connection_queue::plugged : we got work to do - if( tail>=garbage_connection_queue::plugged ) return; - __TBB_ASSERT( !tail, NULL ); - thread_monitor::cookie c; - monitor.prepare_wait(c); - if( state.compare_and_swap( ts_asleep, ts_busy )==ts_busy ) { - if( connections_to_reclaim.tail!=garbage_connection_queue::plugged ) { - monitor.commit_wait(c); - // Someone else woke me up. The compare_and_swap further below deals with spurious wakeups. - } else { - monitor.cancel_wait(); - } - thread_state_t s = state; - if( s==ts_asleep ) // if spurious wakeup. - state.compare_and_swap( ts_busy, ts_asleep ); - // I woke myself up, either because I cancelled the wait or suffered a spurious wakeup. - } else { - __TBB_ASSERT( false, "someone else tampered with my state" ); - } - __TBB_ASSERT( state==ts_busy, "a thread can only put itself to sleep" ); -} - -void connection_scavenger_thread::process_requests( uintptr_t conn_ex ) -{ - __TBB_ASSERT( conn_ex>1, NULL ); - __TBB_ASSERT( n_scavenger_threads==1||connections_to_reclaim.tail==garbage_connection_queue::plugged, "more than one connection_scavenger_thread being active?" ); - - bool done = false; - while( !done ) { - bool is_tbb = (conn_ex&2)>0; - //clear extra bits - uintptr_t curr_conn = conn_ex & ~(uintptr_t)3; - - // no contention. there is only one connection_scavenger_thread!! - uintptr_t next_conn; - tbb_connection_v2* tbb_conn = NULL; - omp_connection_v2* omp_conn = NULL; - // Wait for worker threads to return - if( is_tbb ) { - tbb_conn = reinterpret_cast<tbb_connection_v2*>(curr_conn); - next_conn = reinterpret_cast<uintptr_t>(tbb_conn->next_conn); - while( tbb_conn->my_thread_map.get_server_ref_count()>1 ) - __TBB_Yield(); - } else { - omp_conn = reinterpret_cast<omp_connection_v2*>(curr_conn); - next_conn = reinterpret_cast<uintptr_t>(omp_conn->next_conn); - while( omp_conn->my_thread_map.get_server_ref_count()>1 ) - __TBB_Yield(); - } - - //someone else may try to write into this connection object. - //So access next_conn field first before remove the extra server ref count. - - if( next_conn==0 ) { - uintptr_t tail = connections_to_reclaim.tail; - if( tail==garbage_connection_queue::plugged ) { - tail = garbage_connection_queue::plugged_acked; // connection scavenger saw the flag, and it freed all connections. - done = true; - } else if( tail==conn_ex ) { - if( connections_to_reclaim.tail.compare_and_swap( garbage_connection_queue::empty, tail )==tail ) { - __TBB_ASSERT( !connections_to_reclaim.head, NULL ); - done = true; - } - } - - if( !done ) { - // A new connection to close is added to connections_to_reclaim.tail; - // Wait for curr_conn->next_conn to be set. - if( is_tbb ) { - while( !tbb_conn->next_conn ) - __TBB_Yield(); - conn_ex = reinterpret_cast<uintptr_t>(tbb_conn->next_conn); - } else { - while( !omp_conn->next_conn ) - __TBB_Yield(); - conn_ex = reinterpret_cast<uintptr_t>(omp_conn->next_conn); - } - } - } else { - conn_ex = next_conn; - } - __TBB_ASSERT( conn_ex, NULL ); - if( is_tbb ) - // remove extra srever ref count; this will trigger Shutdown/Release of ConcRT RM - tbb_conn->remove_server_ref(); - else - // remove extra srever ref count; this will trigger Shutdown/Release of ConcRT RM - omp_conn->remove_server_ref(); - } -} - -__RML_DECL_THREAD_ROUTINE connection_scavenger_thread::thread_routine( void* arg ) { - connection_scavenger_thread* thr = (connection_scavenger_thread*) arg; - thr->state = ts_busy; - thr->thr_handle = GetCurrentThread(); -#if TBB_USE_ASSERT - ++thr->n_scavenger_threads; -#endif - for(;;) { - __TBB_Yield(); - thr->sleep_perhaps(); - if( connections_to_reclaim.tail==garbage_connection_queue::plugged || connections_to_reclaim.tail==garbage_connection_queue::plugged_acked ) { - thr->state = ts_asleep; - return 0; - } - - __TBB_ASSERT( connections_to_reclaim.tail!=garbage_connection_queue::plugged_acked, NULL ); - __TBB_ASSERT( connections_to_reclaim.tail>garbage_connection_queue::plugged && (connections_to_reclaim.tail&garbage_connection_queue::plugged)==0 , NULL ); - while( connections_to_reclaim.head==garbage_connection_queue::empty ) - __TBB_Yield(); - uintptr_t head = connections_to_reclaim.head.fetch_and_store( garbage_connection_queue::empty ); - thr->process_requests( head ); - wakeup_some_tbb_threads(); - } -} - -template<typename Server, typename Client> -void connection_scavenger_thread::add_request( generic_connection<Server,Client>* conn_to_close ) -{ - uintptr_t conn_ex = (uintptr_t)conn_to_close | (connection_traits<Server,Client>::is_tbb<<1); - __TBB_ASSERT( !conn_to_close->next_conn, NULL ); - uintptr_t old_tail_ex = connections_to_reclaim.tail; - __TBB_ASSERT( old_tail_ex==0||old_tail_ex>garbage_connection_queue::plugged_acked, "Unloading DLL called while this connection is being closed?" ); - tbb::internal::atomic_backoff backoff; - while( connections_to_reclaim.tail.compare_and_swap( conn_ex, old_tail_ex )!=old_tail_ex ) { - backoff.pause(); - old_tail_ex = connections_to_reclaim.tail; - } - - if( old_tail_ex==garbage_connection_queue::empty ) - connections_to_reclaim.head = conn_ex; - else { - bool is_tbb = (old_tail_ex&2)>0; - uintptr_t old_tail = old_tail_ex & ~(uintptr_t)3; - if( is_tbb ) - reinterpret_cast<tbb_connection_v2*>(old_tail)->next_conn = reinterpret_cast<tbb_connection_v2*>(conn_ex); - else - reinterpret_cast<omp_connection_v2*>(old_tail)->next_conn = reinterpret_cast<omp_connection_v2*>(conn_ex); - } - - if( state==ts_asleep ) - wakeup(); -} - -template<> -uintptr_t connection_scavenger_thread::grab_and_prepend( generic_connection<tbb_server,tbb_client>* /*last_conn_to_close*/ ) { return 0;} - -template<> -uintptr_t connection_scavenger_thread::grab_and_prepend( generic_connection<omp_server,omp_client>* last_conn_to_close ) -{ - uintptr_t conn_ex = (uintptr_t)last_conn_to_close; - uintptr_t head = connections_to_reclaim.head.fetch_and_store( garbage_connection_queue::empty ); - reinterpret_cast<omp_connection_v2*>(last_conn_to_close)->next_conn = reinterpret_cast<omp_connection_v2*>(head); - return conn_ex; -} - -extern "C" ULONGLONG NTAPI VerSetConditionMask( ULONGLONG, DWORD, BYTE); - -bool is_windows7_or_later () -{ - try { - return GetOSVersion()>=IResourceManager::Win7OrLater; - } catch( ... ) { - return false; - } -} - -#endif /* RML_USE_WCRM */ - -template<typename Connection, typename Server, typename Client> -static factory::status_type connect( factory& f, Server*& server, Client& client ) { - server = new Connection(*static_cast<wait_counter*>(f.scratch_ptr),client); - return factory::st_success; -} - -extern "C" factory::status_type __RML_open_factory( factory& f, version_type& server_version, version_type client_version ) { - // Hack to keep this library from being closed by causing the first client's dlopen to not have a corresponding dlclose. - // This code will be removed once we figure out how to do shutdown of the RML perfectly. - static tbb::atomic<bool> one_time_flag; - if( one_time_flag.compare_and_swap(true,false)==false) { - __TBB_ASSERT( (size_t)f.library_handle!=factory::c_dont_unload, NULL ); -#if _WIN32||_WIN64 - f.library_handle = reinterpret_cast<HMODULE>(factory::c_dont_unload); -#else - f.library_handle = reinterpret_cast<void*>(factory::c_dont_unload); -#endif - } - // End of hack - - // Initialize the_balance only once - if( the_balance_inited!=2 ) { - if( the_balance_inited.compare_and_swap( 1, 0 )==0 ) { - the_balance = hardware_concurrency()-1; - the_balance_inited = 2; -#if RML_USE_WCRM - connection_scavenger.launch( the_balance ); -#endif - } else { - tbb::internal::spin_wait_until_eq( the_balance_inited, 2 ); - } - } - - server_version = SERVER_VERSION; - f.scratch_ptr = 0; - if( client_version==0 ) { - return factory::st_incompatible; -#if RML_USE_WCRM - } else if ( !is_windows7_or_later() ) { -#if TBB_USE_DEBUG - fprintf(stderr, "This version of the RML library requires Windows 7 to run on.\nConnection request denied.\n"); -#endif - return factory::st_incompatible; -#endif - } else { -#if TBB_USE_DEBUG - if( client_version<EARLIEST_COMPATIBLE_CLIENT_VERSION ) - fprintf(stderr, "This client library is too old for the current RML server.\nThe connection request is granted but oversubscription/undersubscription may occur.\n"); -#endif - f.scratch_ptr = new wait_counter; - return factory::st_success; - } -} - -extern "C" void __RML_close_factory( factory& f ) { - if( wait_counter* fc = static_cast<wait_counter*>(f.scratch_ptr) ) { - f.scratch_ptr = 0; - fc->wait(); - size_t bal = the_balance; - f.scratch_ptr = (void*)bal; - delete fc; - } -} - -void call_with_build_date_str( ::rml::server_info_callback_t cb, void* arg ); - -}} // rml::internal - -namespace tbb { -namespace internal { -namespace rml { - -extern "C" tbb_factory::status_type __TBB_make_rml_server( tbb_factory& f, tbb_server*& server, tbb_client& client ) { - return ::rml::internal::connect< ::rml::internal::tbb_connection_v2>(f,server,client); -} - -extern "C" void __TBB_call_with_my_server_info( ::rml::server_info_callback_t cb, void* arg ) { - return ::rml::internal::call_with_build_date_str( cb, arg ); -} - -}}} - -namespace __kmp { -namespace rml { - -extern "C" omp_factory::status_type __KMP_make_rml_server( omp_factory& f, omp_server*& server, omp_client& client ) { - return ::rml::internal::connect< ::rml::internal::omp_connection_v2>(f,server,client); -} - -extern "C" void __KMP_call_with_my_server_info( ::rml::server_info_callback_t cb, void* arg ) { - return ::rml::internal::call_with_build_date_str( cb, arg ); -} - -}} - -/* - * RML server info - */ -#include "version_string.tmp" - -#ifndef __TBB_VERSION_STRINGS -#pragma message("Warning: version_string.tmp isn't generated properly by version_info.sh script!") -#endif - -// We use the build time as the RML server info. TBB is required to build RML, so we make it the same as the TBB build time. -#ifndef __TBB_DATETIME -#define __TBB_DATETIME __DATE__ " " __TIME__ -#endif - -#if !RML_USE_WCRM -#define RML_SERVER_BUILD_TIME "Intel(R) RML library built: " __TBB_DATETIME -#define RML_SERVER_VERSION_ST "Intel(R) RML library version: v" TOSTRING(SERVER_VERSION) -#else -#define RML_SERVER_BUILD_TIME "Intel(R) RML library built: " __TBB_DATETIME -#define RML_SERVER_VERSION_ST "Intel(R) RML library version: v" TOSTRING(SERVER_VERSION) " on ConcRT RM with " RML_THREAD_KIND_STRING -#endif - -namespace rml { -namespace internal { - -void call_with_build_date_str( ::rml::server_info_callback_t cb, void* arg ) -{ - (*cb)( arg, RML_SERVER_BUILD_TIME ); - (*cb)( arg, RML_SERVER_VERSION_ST ); -} -}} // rml::internal diff --git a/deal.II/bundled/tbb30_104oss/src/rml/server/thread_monitor.h b/deal.II/bundled/tbb30_104oss/src/rml/server/thread_monitor.h deleted file mode 100644 index 59e1a1c398..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/rml/server/thread_monitor.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -// All platform-specific threading support is encapsulated here. */ - -#ifndef __RML_thread_monitor_H -#define __RML_thread_monitor_H - -#if USE_WINTHREAD -#include <windows.h> -#include <process.h> -#include <malloc.h> //_alloca -#elif USE_PTHREAD -#include <pthread.h> -#include <string.h> -#include <stdlib.h> -#else -#error Unsupported platform -#endif -#include <stdio.h> -#include "tbb/itt_notify.h" - -// All platform-specific threading support is in this header. - -#if (_WIN32||_WIN64)&&!__TBB_ipf -// Deal with 64K aliasing. The formula for "offset" is a Fibonacci hash function, -// which has the desirable feature of spreading out the offsets fairly evenly -// without knowing the total number of offsets, and furthermore unlikely to -// accidentally cancel out other 64K aliasing schemes that Microsoft might implement later. -// See Knuth Vol 3. "Theorem S" for details on Fibonacci hashing. -// The second statement is really does need "volatile", otherwise the compiler might remove the _alloca. -#define AVOID_64K_ALIASING(idx) \ - size_t offset = (idx+1) * 40503U % (1U<<16); \ - void* volatile sink_for_alloca = _alloca(offset); \ - __TBB_ASSERT_EX(sink_for_alloca, "_alloca failed"); -#else -// Linux thread allocators avoid 64K aliasing. -#define AVOID_64K_ALIASING(idx) -#endif /* _WIN32||_WIN64 */ - -namespace rml { - -namespace internal { - -#if DO_ITT_NOTIFY -static const ::tbb::tchar *SyncType_RML = _T("%Constant"); -static const ::tbb::tchar *SyncObj_ThreadMonitorLock = _T("RML Lock"), - *SyncObj_ThreadMonitor = _T("RML Thr Monitor"); -#endif /* DO_ITT_NOTIFY */ - -//! Monitor with limited two-phase commit form of wait. -/** At most one thread should wait on an instance at a time. */ -class thread_monitor { -public: - class cookie { - friend class thread_monitor; - unsigned long long my_version; - }; - thread_monitor(); - ~thread_monitor(); - - //! If a thread is waiting or started a two-phase wait, notify it. - /** Can be called by any thread. */ - void notify(); - - //! Begin two-phase wait. - /** Should only be called by thread that owns the monitor. - The caller must either complete the wait or cancel it. */ - void prepare_wait( cookie& c ); - - //! Complete a two-phase wait and wait until notification occurs after the earlier prepare_wait. - void commit_wait( cookie& c ); - - //! Cancel a two-phase wait. - void cancel_wait(); - -#if USE_WINTHREAD -#define __RML_DECL_THREAD_ROUTINE unsigned WINAPI - typedef unsigned (WINAPI *thread_routine_type)(void*); -#endif /* USE_WINTHREAD */ - -#if USE_PTHREAD -#define __RML_DECL_THREAD_ROUTINE void* - typedef void*(*thread_routine_type)(void*); -#endif /* USE_PTHREAD */ - - //! Launch a thread - static void launch( thread_routine_type thread_routine, void* arg, size_t stack_size ); - static void yield(); - -private: - cookie my_cookie; -#if USE_WINTHREAD - CRITICAL_SECTION critical_section; - HANDLE event; -#endif /* USE_WINTHREAD */ -#if USE_PTHREAD - pthread_mutex_t my_mutex; - pthread_cond_t my_cond; - static void check( int error_code, const char* routine ); -#endif /* USE_PTHREAD */ -}; - -#if USE_WINTHREAD -#ifndef STACK_SIZE_PARAM_IS_A_RESERVATION -#define STACK_SIZE_PARAM_IS_A_RESERVATION 0x00010000 -#endif -inline void thread_monitor::launch( thread_routine_type thread_routine, void* arg, size_t stack_size ) { - unsigned thread_id; - uintptr_t status = _beginthreadex( NULL, unsigned(stack_size), thread_routine, arg, STACK_SIZE_PARAM_IS_A_RESERVATION, &thread_id ); - if( status==0 ) { - fprintf(stderr,"thread_monitor::launch: _beginthreadex failed\n"); - exit(1); - } else { - CloseHandle((HANDLE)status); - } -} - -inline void thread_monitor::yield() { - SwitchToThread(); -} - -inline thread_monitor::thread_monitor() { - event = CreateEvent( NULL, /*manualReset=*/true, /*initialState=*/false, NULL ); - InitializeCriticalSection( &critical_section ); - ITT_SYNC_CREATE(&event, SyncType_RML, SyncObj_ThreadMonitor); - ITT_SYNC_CREATE(&critical_section, SyncType_RML, SyncObj_ThreadMonitorLock); - my_cookie.my_version = 0; -} - -inline thread_monitor::~thread_monitor() { - // Fake prepare/acquired pair for Intel(R) Parallel Amplifier to correctly attribute the operations below - ITT_NOTIFY( sync_prepare, &event ); - CloseHandle( event ); - DeleteCriticalSection( &critical_section ); - ITT_NOTIFY( sync_acquired, &event ); -} - -inline void thread_monitor::notify() { - EnterCriticalSection( &critical_section ); - ++my_cookie.my_version; - SetEvent( event ); - LeaveCriticalSection( &critical_section ); -} - -inline void thread_monitor::prepare_wait( cookie& c ) { - EnterCriticalSection( &critical_section ); - c = my_cookie; -} - -inline void thread_monitor::commit_wait( cookie& c ) { - ResetEvent( event ); - LeaveCriticalSection( &critical_section ); - while( my_cookie.my_version==c.my_version ) { - WaitForSingleObject( event, INFINITE ); - ResetEvent( event ); - } -} - -inline void thread_monitor::cancel_wait() { - LeaveCriticalSection( &critical_section ); -} -#endif /* USE_WINTHREAD */ - -#if USE_PTHREAD -inline void thread_monitor::check( int error_code, const char* routine ) { - if( error_code ) { - fprintf(stderr,"thread_monitor %s\n", strerror(error_code) ); - exit(1); - } -} - -inline void thread_monitor::launch( void* (*thread_routine)(void*), void* arg, size_t stack_size ) { - // FIXME - consider more graceful recovery than just exiting if a thread cannot be launched. - // Note that there are some tricky situations to deal with, such that the thread is already - // grabbed as part of an OpenMP team. - pthread_attr_t s; - check(pthread_attr_init( &s ), "pthread_attr_init"); - if( stack_size>0 ) { - check(pthread_attr_setstacksize( &s, stack_size ),"pthread_attr_setstack_size"); - } - pthread_t handle; - check( pthread_create( &handle, &s, thread_routine, arg ), "pthread_create" ); - check( pthread_detach( handle ), "pthread_detach" ); -} - -inline void thread_monitor::yield() { - sched_yield(); -} - -inline thread_monitor::thread_monitor() { - check( pthread_cond_init(&my_cond,NULL), "pthread_cond_init" ); - check( pthread_mutex_init(&my_mutex,NULL), "pthread_mutex_init" ); - ITT_SYNC_CREATE(&my_cond, SyncType_RML, SyncObj_ThreadMonitor); - ITT_SYNC_CREATE(&my_mutex, SyncType_RML, SyncObj_ThreadMonitorLock); - my_cookie.my_version = 0; -} - -inline thread_monitor::~thread_monitor() { - pthread_cond_destroy(&my_cond); - pthread_mutex_destroy(&my_mutex); -} - -inline void thread_monitor::notify() { - check( pthread_mutex_lock( &my_mutex ), "pthread_mutex_lock" ); - ++my_cookie.my_version; - check( pthread_mutex_unlock( &my_mutex ), "pthread_mutex_unlock" ); - check( pthread_cond_signal(&my_cond), "pthread_cond_signal" ); -} - -inline void thread_monitor::prepare_wait( cookie& c ) { - check( pthread_mutex_lock( &my_mutex ), "pthread_mutex_lock" ); - c = my_cookie; -} - -inline void thread_monitor::commit_wait( cookie& c ) { - while( my_cookie.my_version==c.my_version ) { - pthread_cond_wait( &my_cond, &my_mutex ); - } - check( pthread_mutex_unlock( &my_mutex ), "pthread_mutex_unlock" ); -} - -inline void thread_monitor::cancel_wait() { - check( pthread_mutex_unlock( &my_mutex ), "pthread_mutex_unlock" ); -} -#endif /* USE_PTHREAD */ - -} // namespace internal -} // namespace rml - -#endif /* __RML_thread_monitor_H */ diff --git a/deal.II/bundled/tbb30_104oss/src/rml/server/wait_counter.h b/deal.II/bundled/tbb30_104oss/src/rml/server/wait_counter.h deleted file mode 100644 index 4018d1b4c6..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/rml/server/wait_counter.h +++ /dev/null @@ -1,81 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __RML_wait_counter_H -#define __RML_wait_counter_H - -#include "thread_monitor.h" -#include "tbb/atomic.h" - -namespace rml { -namespace internal { - -class wait_counter { - thread_monitor my_monitor; - tbb::atomic<int> my_count; - tbb::atomic<int> n_transients; -public: - wait_counter() { - // The "1" here is subtracted by the call to "wait". - my_count=1; - n_transients=0; - } - - //! Wait for number of operator-- invocations to match number of operator++ invocations. - /** Exactly one thread should call this method. */ - void wait() { - int k = --my_count; - __TBB_ASSERT( k>=0, "counter underflow" ); - if( k>0 ) { - thread_monitor::cookie c; - my_monitor.prepare_wait(c); - if( my_count ) - my_monitor.commit_wait(c); - else - my_monitor.cancel_wait(); - } - while( n_transients>0 ) - __TBB_Yield(); - } - void operator++() { - ++my_count; - } - void operator--() { - ++n_transients; - int k = --my_count; - __TBB_ASSERT( k>=0, "counter underflow" ); - if( k==0 ) - my_monitor.notify(); - --n_transients; - } -}; - -} // namespace internal -} // namespace rml - -#endif /* __RML_wait_counter_H */ diff --git a/deal.II/bundled/tbb30_104oss/src/rml/server/win32-rml-export.def b/deal.II/bundled/tbb30_104oss/src/rml/server/win32-rml-export.def deleted file mode 100644 index 7902330ba1..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/rml/server/win32-rml-export.def +++ /dev/null @@ -1,35 +0,0 @@ -; Copyright 2005-2010 Intel Corporation. All Rights Reserved. -; -; This file is part of Threading Building Blocks. -; -; Threading Building Blocks is free software; you can redistribute it -; and/or modify it under the terms of the GNU General Public License -; version 2 as published by the Free Software Foundation. -; -; Threading Building Blocks is distributed in the hope that it will be -; useful, but WITHOUT ANY WARRANTY; without even the implied warranty -; of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -; GNU General Public License for more details. -; -; You should have received a copy of the GNU General Public License -; along with Threading Building Blocks; if not, write to the Free Software -; Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -; -; As a special exception, you may use this file as part of a free software -; library without restriction. Specifically, if other files instantiate -; templates or use macros or inline functions from this file, or you compile -; this file and link it with other files to produce an executable, this -; file does not by itself cause the resulting executable to be covered by -; the GNU General Public License. This exception does not however -; invalidate any other reasons why the executable file might be covered by -; the GNU General Public License. - -EXPORTS - -__RML_open_factory -__RML_close_factory -__TBB_make_rml_server -__KMP_make_rml_server -__TBB_call_with_my_server_info -__KMP_call_with_my_server_info - diff --git a/deal.II/bundled/tbb30_104oss/src/rml/server/win64-rml-export.def b/deal.II/bundled/tbb30_104oss/src/rml/server/win64-rml-export.def deleted file mode 100644 index 7902330ba1..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/rml/server/win64-rml-export.def +++ /dev/null @@ -1,35 +0,0 @@ -; Copyright 2005-2010 Intel Corporation. All Rights Reserved. -; -; This file is part of Threading Building Blocks. -; -; Threading Building Blocks is free software; you can redistribute it -; and/or modify it under the terms of the GNU General Public License -; version 2 as published by the Free Software Foundation. -; -; Threading Building Blocks is distributed in the hope that it will be -; useful, but WITHOUT ANY WARRANTY; without even the implied warranty -; of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -; GNU General Public License for more details. -; -; You should have received a copy of the GNU General Public License -; along with Threading Building Blocks; if not, write to the Free Software -; Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -; -; As a special exception, you may use this file as part of a free software -; library without restriction. Specifically, if other files instantiate -; templates or use macros or inline functions from this file, or you compile -; this file and link it with other files to produce an executable, this -; file does not by itself cause the resulting executable to be covered by -; the GNU General Public License. This exception does not however -; invalidate any other reasons why the executable file might be covered by -; the GNU General Public License. - -EXPORTS - -__RML_open_factory -__RML_close_factory -__TBB_make_rml_server -__KMP_make_rml_server -__TBB_call_with_my_server_info -__KMP_call_with_my_server_info - diff --git a/deal.II/bundled/tbb30_104oss/src/rml/test/rml_omp_stub.cpp b/deal.II/bundled/tbb30_104oss/src/rml/test/rml_omp_stub.cpp deleted file mode 100644 index f0a9587b06..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/rml/test/rml_omp_stub.cpp +++ /dev/null @@ -1,71 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -// This file is compiled with C++, but linked with a program written in C. -// The intent is to find dependencies on the C++ run-time. - -#include <stdlib.h> -#define RML_PURE_VIRTUAL_HANDLER abort - -#if _MSC_VER==1500 && !defined(__INTEL_COMPILER) -// VS2008/VC9 seems to have an issue; -#pragma warning( push ) -#pragma warning( disable: 4100 ) -#endif -#include "rml_omp.h" -#if _MSC_VER==1500 && !defined(__INTEL_COMPILER) -#pragma warning( pop ) -#endif - -rml::versioned_object::version_type Version; - -class MyClient: public __kmp::rml::omp_client { -public: - /*override*/rml::versioned_object::version_type version() const {return 0;} - /*override*/size_type max_job_count() const {return 1024;} - /*override*/size_t min_stack_size() const {return 1<<20;} - /*override*/rml::job* create_one_job() {return NULL;} - /*override*/void acknowledge_close_connection() {} - /*override*/void cleanup(job&) {} - /*override*/policy_type policy() const {return throughput;} - /*override*/void process( job&, void*, __kmp::rml::omp_client::size_type ) {} - -}; - -//! Never actually set, because point of test is to find linkage issues. -__kmp::rml::omp_server* MyServerPtr; - -#define HARNESS_NO_PARSE_COMMAND_LINE 1 -#define HARNESS_CUSTOM_MAIN 1 -#include "harness.h" - -extern "C" void Cplusplus() { - MyClient client; - Version = client.version(); - REPORT("done\n"); -} diff --git a/deal.II/bundled/tbb30_104oss/src/rml/test/test_job_automaton.cpp b/deal.II/bundled/tbb30_104oss/src/rml/test/test_job_automaton.cpp deleted file mode 100644 index 0aae6367d1..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/rml/test/test_job_automaton.cpp +++ /dev/null @@ -1,153 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#include "job_automaton.h" -#define HARNESS_NO_PARSE_COMMAND_LINE 1 -#include "harness.h" -#include "harness_barrier.h" - -class State { - Harness::SpinBarrier barrier; - rml::internal::job_automaton ja; - rml::job job; - tbb::atomic<int> job_created; - tbb::atomic<int> job_destroyed; - tbb::atomic<bool> job_received; -public: - State() : barrier(2) { - job_created = 0; - job_destroyed = 0; - job_received = false; - } - void exercise( bool is_owner ); - ~State() { - ASSERT( job_created==job_destroyed, "accounting error" ); - ASSERT( job_destroyed<=1, "destroyed job twice" ); - } -}; - -int DelayMask; -const int N = 14; -tbb::atomic<int> Coverage[N]; - -//! Mark kth interval as covered and insert delay if kth bit of DelayMask is set. -/** An interval is the code between two operations on the job_automaton that we are testing. */ -void Cover( int k ) { - ASSERT( k<N, NULL ); - ++Coverage[k]; - if( DelayMask>>k&1 ) { - // Introduce delay (and possibly a thread context switch) - __TBB_Yield(); - } -} - -void State::exercise( bool is_owner ) { - barrier.wait(); - if( is_owner ) { - Cover(0); - if( ja.try_acquire() ) { - Cover(1); - ++job_created; - ja.set_and_release(job); - Cover(2); - if( ja.try_acquire() ) { - Cover(3); - ja.release(); - Cover(4); - if( ja.try_acquire() ) { - Cover(5); - ja.release(); - } - } - Cover(6); - } else { - Cover(7); - } - if( DelayMask&1<<N ) { - while( !job_received ) - __TBB_Yield(); - } - } else { - // Using extra bit of DelayMask for choosing whether to run wait_for_job or not. - if( DelayMask&1<<N ) { - rml::job* j= &ja.wait_for_job(); - if( j!=&job ) REPORT("%p\n",j); - ASSERT( j==&job, NULL ); - job_received = true; - } - Cover(8); - } - rml::job* j; - if( ja.try_plug(j) ) { - ASSERT( j==&job || !j, NULL ); - if( j ) { - Cover(9+is_owner); - ++job_destroyed; - } else { - __TBB_ASSERT( !is_owner, "owner failed to create job but plugged self" ); - Cover(11); - } - } else { - Cover(12+is_owner); - } -} - -class Loop: NoAssign { - State& s; -public: - Loop(State& s_) : s(s_) {} - void operator()( int i ) const {s.exercise(i==0);} -}; - -/** Return true if coverage is acceptable. - If report==true, issue message if it is unacceptable. */ -bool CheckCoverage( bool report ) { - bool okay = true; - for( int i=0; i<N; ++i ) { - const int min_coverage = 4; - if( Coverage[i]<min_coverage ) { - okay = false; - if( report ) - REPORT("Warning: Coverage[%d]=%d is less than acceptable minimum of %d\n", i, int(Coverage[i]),min_coverage); - } - } - return okay; -} - -int TestMain () { - for( DelayMask=0; DelayMask<8<<N; ++DelayMask ) { - State s; - NativeParallelFor( 2, Loop(s) ); - if( CheckCoverage(false) ) { - // Reached acceptable code coverage level - break; - } - } - CheckCoverage(true); - return Harness::Done; -} diff --git a/deal.II/bundled/tbb30_104oss/src/rml/test/test_rml_mixed.cpp b/deal.II/bundled/tbb30_104oss/src/rml/test/test_rml_mixed.cpp deleted file mode 100644 index 32c2f25c93..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/rml/test/test_rml_mixed.cpp +++ /dev/null @@ -1,253 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#include "rml_tbb.h" -#include "rml_omp.h" -#include "tbb/atomic.h" -#include "tbb/tick_count.h" - -#define HARNESS_DEFAULT_MIN_THREADS 4 -#include "harness.h" - -const int OMP_ParallelRegionSize = 16; -int TBB_MaxThread = 4; // Includes master -int OMP_MaxThread = int(~0u>>1); // Includes master - -template<typename Client> -class ClientBase: public Client { -protected: - typedef typename Client::version_type version_type; - typedef typename Client::job job; - typedef typename Client::policy_type policy_type; - -private: - /*override*/version_type version() const { - return 0; - } - /*override*/size_t min_stack_size() const { - return 1<<20; - } - /*override*/job* create_one_job() { - return new rml::job; - } - /*override*/policy_type policy() const { - return Client::turnaround; - } - /*override*/void acknowledge_close_connection() { - delete this; - } - /*override*/void cleanup( job& j ) {delete &j;} -}; - -//! Represents a TBB or OpenMP run-time that uses RML. -template<typename Factory, typename Client> -class RunTime { -public: - //! Factory that run-time uses to make servers. - Factory factory; - Client* client; - typename Factory::server_type* server; -#if _WIN32||_WIN64 - ::rml::server::execution_resource_t me; -#endif - RunTime() { - factory.open(); - } - ~RunTime() { - factory.close(); - } - //! Create server for this run-time - void create_connection(); - - //! Destroy server for this run-time - void destroy_connection(); -}; - -class ThreadLevelRecorder { - tbb::atomic<int> level; - struct record { - tbb::tick_count time; - int nthread; - }; - tbb::atomic<unsigned> next; - /** Must be power of two */ - static const unsigned max_record_count = 1<<20; - record array[max_record_count]; -public: - void change_level( int delta ); - void dump(); -}; - -void ThreadLevelRecorder::change_level( int delta ) { - int x = level+=delta; - tbb::tick_count t = tbb::tick_count::now(); - unsigned k = next++; - if( k<max_record_count ) { - record& r = array[k]; - r.time = t; - r.nthread = x; - } -} - -void ThreadLevelRecorder::dump() { - FILE* f = fopen("time.txt","w"); - if( !f ) { - perror("fopen(time.txt)\n"); - exit(1); - } - unsigned limit = next; - if( limit>max_record_count ) { - // Clip - limit = next; - } - for( unsigned i=0; i<limit; ++i ) { - fprintf(f,"%f\t%d\n",(array[i].time-array[0].time).seconds(),array[i].nthread); - } - fclose(f); -} - -ThreadLevelRecorder TotalThreadLevel; - -class TBB_Client: public ClientBase<tbb::internal::rml::tbb_client> { - /*override*/void process( job& j ); - /*override*/size_type max_job_count() const { - return TBB_MaxThread-1; - } -}; - -class OMP_Client: public ClientBase<__kmp::rml::omp_client> { - /*override*/void process( job&, void* cookie, omp_client::size_type ); - /*override*/size_type max_job_count() const { - return OMP_MaxThread-1; - } -}; - -RunTime<tbb::internal::rml::tbb_factory, TBB_Client> TBB_RunTime; -RunTime<__kmp::rml::omp_factory, OMP_Client> OMP_RunTime; - -template<typename Factory, typename Client> -void RunTime<Factory,Client>::create_connection() { - client = new Client; - typename Factory::status_type status = factory.make_server( server, *client ); - ASSERT( status==Factory::st_success, NULL ); -#if _WIN32||_WIN64 - server->register_master( me ); -#endif /* _WIN32||_WIN64 */ -} - -template<typename Factory, typename Client> -void RunTime<Factory,Client>::destroy_connection() { -#if _WIN32||_WIN64 - server->unregister_master( me ); -#endif /* _WIN32||_WIN64 */ - server->request_close_connection(); - server = NULL; -} - -class OMP_Team { -public: - OMP_Team( __kmp::rml::omp_server& ) {} - tbb::atomic<unsigned> barrier; -}; - -tbb::atomic<int> AvailWork; -tbb::atomic<int> CompletionCount; - -void OMPWork() { - tbb::atomic<int> x; - for( x=0; x<2000000; ++x ) { - continue; - } -} - -void TBBWork() { - if( AvailWork>=0 ) { - int k = --AvailWork; - if( k==-1 ) { - TBB_RunTime.server->adjust_job_count_estimate(-(TBB_MaxThread-1)); - ++CompletionCount; - } else if( k>=0 ) { - for( int k=0; k<4; ++k ) { - OMP_Team team( *OMP_RunTime.server ); - int n = OMP_RunTime.server->try_increase_load( OMP_ParallelRegionSize-1, /*strict=*/false ); - team.barrier = 0; - ::rml::job* array[OMP_ParallelRegionSize-1]; - if( n>0) - OMP_RunTime.server->get_threads( n, &team, array ); - // Master does work inside parallel region too. - OMPWork(); - // Master waits for workers to finish - if( n>0 ) - while( team.barrier!=unsigned(n) ) { - __TBB_Yield(); - } - } - ++CompletionCount; - } - } -} - -/*override*/void TBB_Client::process( job& ) { - TotalThreadLevel.change_level(1); - TBBWork(); - TotalThreadLevel.change_level(-1); -} - -/*override*/void OMP_Client::process( job& /* j */, void* cookie, omp_client::size_type ) { - TotalThreadLevel.change_level(1); - ASSERT( OMP_RunTime.server, NULL ); - OMPWork(); - ASSERT( OMP_RunTime.server, NULL ); - static_cast<OMP_Team*>(cookie)->barrier+=1; - TotalThreadLevel.change_level(-1); -} - -void TBBOutSideOpenMPInside() { - TotalThreadLevel.change_level(1); - CompletionCount = 0; - int tbbtasks = 32; - AvailWork = tbbtasks; - TBB_RunTime.server->adjust_job_count_estimate(TBB_MaxThread-1); - while( CompletionCount!=tbbtasks+1 ) { - TBBWork(); - } - TotalThreadLevel.change_level(-1); -} - -int TestMain () { - for( int TBB_MaxThread=MinThread; TBB_MaxThread<=MaxThread; ++TBB_MaxThread ) { - REMARK("Testing with TBB_MaxThread=%d\n", TBB_MaxThread); - TBB_RunTime.create_connection(); - OMP_RunTime.create_connection(); - TBBOutSideOpenMPInside(); - OMP_RunTime.destroy_connection(); - TBB_RunTime.destroy_connection(); - } - TotalThreadLevel.dump(); - return Harness::Done; -} diff --git a/deal.II/bundled/tbb30_104oss/src/rml/test/test_rml_omp.cpp b/deal.II/bundled/tbb30_104oss/src/rml/test/test_rml_omp.cpp deleted file mode 100644 index 99000d18df..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/rml/test/test_rml_omp.cpp +++ /dev/null @@ -1,196 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#include "rml_omp.h" - -typedef __kmp::rml::omp_server MyServer; -typedef __kmp::rml::omp_factory MyFactory; - -// Forward declaration for the function used in test_server.h -void DoClientSpecificVerification( MyServer& , int ); - -#define HARNESS_DEFAULT_MIN_THREADS 0 -#include "test_server.h" -#include "tbb/tbb_misc.h" - -static bool StrictTeam; - -class MyTeam { - MyTeam& operator=( const MyTeam& ) ; -public: - struct info_type { - rml::job* job; - bool ran; - info_type() : job(NULL), ran(false) {} - }; - MyTeam( MyServer& /* server */, size_t max_thread_ ) : - max_thread(max_thread_) - { - self_ptr = this; - info = new info_type[max_thread]; - } - ~MyTeam() { - delete[] info; - } - const size_t max_thread; - size_t n_thread; - tbb::atomic<int> barrier; - /** Indexed with 1-origin index */ - info_type* info; - int iteration; - MyTeam* self_ptr; -}; - -class MyClient: public ClientBase<__kmp::rml::omp_client> { -public: - MyServer* server; - /*override*/void process( job& j, void* cookie, size_type index ) { - MyTeam& t = *static_cast<MyTeam*>(cookie); - ASSERT( t.self_ptr==&t, "trashed cookie" ); - ASSERT( index<t.max_thread, NULL ); - ASSERT( !t.info[index].ran, "duplicate index?" ); - t.info[index].job = &j; - t.info[index].ran = true; - do_process(j); - if( index==1 && nesting.level<nesting.limit ) { - DoOneConnection<MyFactory,MyClient> doc(MaxThread,Nesting(nesting.level+1,nesting.limit),0,false); - doc(0); - } -#if _WIN32||_WIN64 - // test activate/deactivate - if( t.n_thread>1 && t.n_thread%2==0 ) { - if( nesting.level==0 ) { - if( index&1 ) { - size_type target = index-1; - ASSERT( target<t.max_thread, NULL ); - // wait until t.info[target].job is defined - tbb::internal::spin_wait_until_eq( t.info[target].ran, true ); - server->try_increase_load( 1, true ); - server->reactivate( t.info[target].job ); - } else { - server->deactivate( &j ); - } - } - } -#endif /* _WIN32||_WIN64 */ - ++t.barrier; - } - static const bool is_omp = true; - bool is_strict() const {return StrictTeam;} -}; - -void FireUpJobs( MyServer& server, MyClient& client, int max_thread, int n_extra, Checker* checker ) { - ASSERT( max_thread>=0, NULL ); -#if _WIN32||_WIN64 - ::rml::server::execution_resource_t me; - server.register_master( me ); -#endif /* _WIN32||_WIN64 */ - client.server = &server; - MyTeam team(server,size_t(max_thread)); - MyServer::size_type n_thread = 0; - for( int iteration=0; iteration<4; ++iteration ) { - for( size_t i=0; i<team.max_thread; ++i ) - team.info[i].ran = false; - switch( iteration ) { - default: - n_thread = int(max_thread); - break; - case 1: - // No change in number of threads - break; - case 2: - // Decrease number of threads. - n_thread = int(max_thread)/2; - break; - // Case 3 is same code as the default, but has effect of increasing the number of threads. - } - team.barrier = 0; - REMARK("client %d: server.run with n_thread=%d\n", client.client_id(), int(n_thread) ); - server.independent_thread_number_changed( n_extra ); - if( checker ) { - // Give RML time to respond to change in number of threads. - MilliSleep(1); - } - int n_delivered = server.try_increase_load( n_thread, StrictTeam ); - ASSERT( !StrictTeam || n_delivered==int(n_thread), "server failed to satisfy strict request" ); - if( n_delivered<0 ) { - REMARK( "client %d: oversubscription occurred (by %d)\n", client.client_id(), -n_delivered ); - server.independent_thread_number_changed( -n_extra ); - n_delivered = 0; - } else { - team.n_thread = n_delivered; - ::rml::job* job_array[JobArraySize]; - job_array[n_delivered] = (::rml::job*)intptr_t(-1); - server.get_threads( n_delivered, &team, job_array ); - __TBB_ASSERT( job_array[n_delivered]== (::rml::job*)intptr_t(-1), NULL ); - for( int i=0; i<n_delivered; ++i ) { - MyJob* j = static_cast<MyJob*>(job_array[i]); - int s = j->state; - ASSERT( s==MyJob::idle||s==MyJob::busy, NULL ); - } - server.independent_thread_number_changed( -n_extra ); - REMARK("client %d: team size is %d\n", client.client_id(), n_delivered); - if( checker ) { - checker->check_number_of_threads_delivered( n_delivered, n_thread, n_extra ); - } - // Protocol requires that master wait until workers have called "done_processing" - while( team.barrier!=n_delivered ) { - ASSERT( team.barrier>=0, NULL ); - ASSERT( team.barrier<=n_delivered, NULL ); - __TBB_Yield(); - } - REMARK("client %d: team completed\n", client.client_id() ); - for( int i=0; i<n_delivered; ++i ) { - ASSERT( team.info[i].ran, "thread on team allegedly delivered, but did not run?" ); - } - } - for( MyServer::size_type i=n_delivered; i<MyServer::size_type(max_thread); ++i ) { - ASSERT( !team.info[i].ran, "thread on team ran with illegal index" ); - } - } -#if _WIN32||_WIN64 - server.unregister_master( me ); -#endif -} - -void DoClientSpecificVerification( MyServer& server, int /*n_thread*/ ) -{ - ASSERT( server.current_balance()==int(tbb::internal::DetectNumberOfWorkers())-1, NULL ); -} - -int TestMain () { - StrictTeam = true; - VerifyInitialization<MyFactory,MyClient>( MaxThread ); - SimpleTest<MyFactory,MyClient>(); - - StrictTeam = false; - VerifyInitialization<MyFactory,MyClient>( MaxThread ); - SimpleTest<MyFactory,MyClient>(); - - return Harness::Done; -} diff --git a/deal.II/bundled/tbb30_104oss/src/rml/test/test_rml_omp_c_linkage.c b/deal.II/bundled/tbb30_104oss/src/rml/test/test_rml_omp_c_linkage.c deleted file mode 100644 index cb984bd77c..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/rml/test/test_rml_omp_c_linkage.c +++ /dev/null @@ -1,34 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -void Cplusplus(); - -int main() { - Cplusplus(); - return 0; -} diff --git a/deal.II/bundled/tbb30_104oss/src/rml/test/test_rml_tbb.cpp b/deal.II/bundled/tbb30_104oss/src/rml/test/test_rml_tbb.cpp deleted file mode 100644 index 0116fcb3c9..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/rml/test/test_rml_tbb.cpp +++ /dev/null @@ -1,201 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#include "rml_tbb.h" - -typedef tbb::internal::rml::tbb_server MyServer; -typedef tbb::internal::rml::tbb_factory MyFactory; - -// Forward declaration of the function used in test_server.h -void DoClientSpecificVerification( MyServer&, int ); - -#define HARNESS_DEFAULT_MIN_THREADS 0 -#include "test_server.h" - -tbb::atomic<int> n_available_hw_threads; - -class MyClient: public ClientBase<tbb::internal::rml::tbb_client> { - tbb::atomic<int> counter; - tbb::atomic<int> gate; - /*override*/void process( job& j ) { - do_process(j); - //wait until the gate is open. - while( gate==0 ) - MilliSleep(1); - - __TBB_ASSERT( nesting.limit<=2, NULL ); - if( nesting.level>=nesting.limit ) - return; - - size_type max_outstanding_connections = max_job_count(); // if nesting.level==0 - if( nesting.level==1 ) - max_outstanding_connections *= (1+max_outstanding_connections); - - if( default_concurrency()<=max_outstanding_connections+2 ) - // i.e., if it is not guaranteed that at least two connections may be made without depleting the_balance - return; - - // at this point, ( nesting.level<nesting.limit ) && ( my_server->default_concurrency()-max_outstanding_connections>2 ) - for( ;; ) { - while( n_available_hw_threads<=1 ) - MilliSleep(1); - - int n = --n_available_hw_threads; - if( n>0 ) break; - // else I lost - ++n_available_hw_threads; - } - - DoOneConnection<MyFactory,MyClient> doc(max_job_count(),Nesting(nesting.level+1,nesting.limit),0,false); - doc(0); - - ++n_available_hw_threads; - } -public: - MyClient() {counter=1;} - static const bool is_omp = false; - bool is_strict() const {return false;} - void open_the_gate() { gate = 1; } - void close_the_gate() { gate = 0; } -}; - -void FireUpJobs( MyServer& server, MyClient& client, int n_thread, int n_extra, Checker* checker ) { - REMARK("client %d: calling adjust_job_count_estimate(%d)\n", client.client_id(),n_thread); - // Exercise independent_thread_number_changed, even for zero values. - server.independent_thread_number_changed( n_extra ); -#if _WIN32||_WIN64 - ::rml::server::execution_resource_t me; - server.register_master( me ); -#endif /* _WIN32||_WIN64 */ - // Experiments indicate that when oversubscribing, the main thread should wait a little - // while for the RML worker threads to do some work. - if( checker ) { - // Give RML time to respond to change in number of threads. - MilliSleep(1); - for( int k=0; k<n_thread; ++k ) - client.job_array[k].processing_count = 0; - } - //close the gate to keep worker threads from returning to RML until a snapshot is taken - client.close_the_gate(); - server.adjust_job_count_estimate( n_thread ); - int n_used = 0; - if( checker ) { - MilliSleep(100); - for( int k=0; k<n_thread; ++k ) - if( client.job_array[k].processing_count ) - ++n_used; - } - // open the gate - client.open_the_gate(); - // Logic further below presumes that jobs never starve, so undo previous call - // to independent_thread_number_changed before waiting on those jobs. - server.independent_thread_number_changed( -n_extra ); - REMARK("client %d: wait for each job to be processed at least once\n",client.client_id()); - // Calculate the number of jobs that are expected to get threads. - int expected = n_thread; - // Wait for expected number of jobs to be processed. -#if RML_USE_WCRM - int default_concurrency = server.default_concurrency(); - if( N_TestConnections>0 ) { - if( default_concurrency+1>=8 && n_thread<=3 && N_TestConnections<=3 && (default_concurrency/int(N_TestConnections)-1)>=n_thread ) { -#endif /* RML_USE_WCRM */ - for(;;) { - int n = 0; - for( int k=0; k<n_thread; ++k ) - if( client.job_array[k].processing_count!=0 ) - ++n; - if( n>=expected ) break; - server.yield(); - } -#if RML_USE_WCRM - } else if( n_thread>0 ) { - for( int m=0; m<20; ++m ) { - int n = 0; - for( int k=0; k<n_thread; ++k ) - if( client.job_array[k].processing_count!=0 ) - ++n; - if( n>=expected ) break; - MilliSleep(1); - } - } - } -#endif /* RML_USE_WCRM */ - server.adjust_job_count_estimate(-n_thread); -#if _WIN32||_WIN64 - server.unregister_master( me ); -#endif - // Give RML some time to respond - if( checker ) { - MilliSleep(1); - checker->check_number_of_threads_delivered( n_used, n_thread, n_extra ); - } -} - -void DoClientSpecificVerification( MyServer&, int n_thread ) -{ - MyClient* client = new MyClient; - client->initialize( n_thread, Nesting(), ClientStackSize[0] ); - MyFactory factory; - memset( &factory, 0, sizeof(factory) ); - MyFactory::status_type status = factory.open(); - ASSERT( status!=MyFactory::st_not_found, "could not find RML library" ); - ASSERT( status!=MyFactory::st_incompatible, NULL ); - ASSERT( status==MyFactory::st_success, NULL ); - MyFactory::server_type* server; - status = factory.make_server( server, *client ); - ASSERT( status==MyFactory::st_success, NULL ); - client->set_server( server ); - client->expect_close_connection = true; - server->request_close_connection(); - // Client deletes itself when it sees call to acknowledge_close_connection from server. - factory.close(); -} - -void Initialize() -{ - MyClient* client = new MyClient; - client->initialize( 1, Nesting(), ClientStackSize[0] ); - MyFactory factory; - memset( &factory, 0, sizeof(factory) ); - factory.open(); - MyFactory::server_type* server; - factory.make_server( server, *client ); - client->set_server( server ); - n_available_hw_threads = server->default_concurrency(); - client->expect_close_connection = true; - server->request_close_connection(); - // Client deletes itself when it sees call to acknowledge_close_connection from server. - factory.close(); -} - -int TestMain () { - VerifyInitialization<MyFactory,MyClient>( MaxThread ); - Initialize(); - SimpleTest<MyFactory,MyClient>(); - return Harness::Done; -} diff --git a/deal.II/bundled/tbb30_104oss/src/rml/test/test_server.h b/deal.II/bundled/tbb30_104oss/src/rml/test/test_server.h deleted file mode 100644 index b23074ded5..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/rml/test/test_server.h +++ /dev/null @@ -1,452 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -/* This header contains code shared by test_omp_server.cpp and test_tbb_server.cpp - There is no ifndef guard - test is supposed to include this file exactly once. - The test is also exected to have #include of rml_omp.h or rml_tbb.h before - including this header. - - This header should not use any parts of TBB that require linking in the TBB run-time. - It uses a few instances of tbb::atomic<T>, all of which are completely inlined. */ - -#include "tbb/atomic.h" -#include "tbb/tbb_thread.h" -#include "harness.h" -#include "harness_memory.h" -#include "harness_concurrency_tracker.h" - -//! Define TRIVIAL as 1 to test only a single client, no nesting, no extra threads. -#define TRIVIAL 0 - -//! Maximum number of clients -#if TRIVIAL -const size_t MaxClient = 1; -#else -const size_t MaxClient = 4; -#endif - -const size_t ClientStackSize[MaxClient] = { - 1000000 -#if !TRIVIAL - ,2000000 - ,1000000 - ,4000000 -#endif /* TRIVIAL */ -}; - -const size_t OverheadStackSize = 500000; - -const size_t JobArraySize = 1000; - -static bool TestSingleConnection; - -static size_t N_TestConnections; - -#if _WIN32||_WIN64 -#include <Windows.h> /* Need Sleep */ -#else -#include <unistd.h> /* Need usleep */ -#endif - -void MilliSleep( unsigned milliseconds ) { -#if _WIN32||_WIN64 - Sleep( milliseconds ); -#else - usleep( milliseconds*1000 ); -#endif /* _WIN32||_WIN64 */ -} - -class MyJob: public ::rml::job { -public: - //! Enumeration for tracking states of a job. - enum state_t { - //! Job has not yet been allocated. - unallocated, - //! Is idle. - idle, - //! Has a thread working on it. - busy, - //! After call to client::cleanup - clean - }; - tbb::atomic<int> state; - tbb::atomic<int> processing_count; - void update( state_t new_state, state_t old_state ) { - int o = state.compare_and_swap(new_state,old_state); - ASSERT( o==old_state, "illegal transition" ); - } - void update_from_either( state_t new_state, state_t old_state1, state_t old_state2 ) { - int snapshot; - do { - snapshot = state; - ASSERT( snapshot==old_state1||snapshot==old_state2, "illegal transition" ); - } while( state.compare_and_swap(new_state,snapshot)!=snapshot ); - } - MyJob() { - state=unallocated; - processing_count=0; - } - ~MyJob() { - // Overwrite so that accidental use after destruction can be detected. - memset(this,-1,sizeof(*this)); - } -}; - -static tbb::atomic<int> ClientConstructions; -static tbb::atomic<int> ClientDestructions; - -struct Nesting { - int level; - int limit; - Nesting() : level(0), limit(0) {} - Nesting( int level_, int limit_ ) : level(level_), limit(limit_) {} -}; - -template<typename Client> -class ClientBase: public Client { -protected: - typedef typename Client::size_type size_type; - typedef typename Client::version_type version_type; - typedef typename Client::policy_type policy_type; - typedef typename Client::job job; -private: - size_type my_max_job_count; - size_t my_stack_size; - tbb::atomic<size_t> next_job_index; - int my_client_id; - rml::server* my_server; - -public: - enum state_t { - //! Treat *this as constructed. - live=0x1, - //! Treat *this as destroyed. - destroyed=0xDEAD - }; - - tbb::atomic<int> state; - void update( state_t new_state, state_t old_state ) { - int o = state.compare_and_swap(new_state,old_state); - ASSERT( o==old_state, NULL ); - } - - tbb::atomic<bool> expect_close_connection; - - MyJob *job_array; - - /*override*/version_type version() const { - ASSERT( state==live, NULL ); - return 1; - } - - /*override*/size_type max_job_count() const { - ASSERT( state==live, NULL ); - return my_max_job_count; - } - - /*override*/size_t min_stack_size() const { - ASSERT( state==live, NULL ); - return my_stack_size; - } - - /*override*/policy_type policy() const {return Client::throughput;} - - /*override*/void acknowledge_close_connection() { - ASSERT( expect_close_connection, NULL ); - for( size_t k=next_job_index; k>0; ) { - --k; - ASSERT( job_array[k].state==MyJob::clean, NULL ); - } - delete[] job_array; - job_array = NULL; - ASSERT( my_server, NULL ); - update( destroyed, live ); - delete this; - } - - /*override*/void cleanup( job& j_ ) { - REMARK("client %d: cleanup(%p) called\n",client_id(),&j_); - ASSERT( state==live, NULL ); - MyJob& j = static_cast<MyJob&>(j_); - while( j.state==MyJob::busy ) - my_server->yield(); - j.update(MyJob::clean,MyJob::idle); - REMARK("client %d: cleanup(%p) returns\n",client_id(),&j_); - } - - job* create_one_job(); - -protected: - void do_process( job& j_ ) { - ASSERT( state==live, NULL ); - MyJob& j = static_cast<MyJob&>(j_); - ASSERT( &j, NULL ); - j.update(MyJob::busy,MyJob::idle); - // use of the plain addition (not the atomic increment) is intentonial - j.processing_count = j.processing_count + 1; - ASSERT( my_stack_size>OverheadStackSize, NULL ); -#ifdef __ia64__ - // Half of the stack is reserved for RSE, so test only remaining half. - UseStackSpace( (my_stack_size-OverheadStackSize)/2 ); -#else - UseStackSpace( my_stack_size-OverheadStackSize ); -#endif - j.update(MyJob::idle,MyJob::busy); - my_server->yield(); - } -public: - ClientBase() : my_server(NULL) { - my_client_id = ClientConstructions++; - next_job_index = 0; - } - int client_id() const {return my_client_id;} - - Nesting nesting; - - void initialize( size_type max_job_count, Nesting nesting_, size_t stack_size ) { - ASSERT( stack_size>0, NULL ); - my_max_job_count = max_job_count; - nesting = nesting_; - my_stack_size = stack_size; - job_array = new MyJob[JobArraySize]; - expect_close_connection = false; - state = live; - } - - void set_server( rml::server* s ) {my_server=s;} - - unsigned default_concurrency() const { ASSERT( my_server, NULL); return my_server->default_concurrency(); } - - virtual ~ClientBase() { - ASSERT( state==destroyed, NULL ); - ++ClientDestructions; - } -}; - -template<typename Client> -typename Client::job* ClientBase<Client>::create_one_job() { - REMARK("client %d: create_one_job() called\n",client_id()); - size_t k = next_job_index++; - ASSERT( state==live, NULL ); - // Following assertion depends on assumption that implementation does not destroy jobs until - // the connection is closed. If the implementation is changed to destroy jobs sooner, the - // test logic in this header will have to be reworked. - ASSERT( k<my_max_job_count, "RML allocated more than max_job_count jobs simultaneously" ); - ASSERT( k<JobArraySize, "JobArraySize not big enough (problem is in test, not RML)" ); - MyJob& j = job_array[k]; - j.update(MyJob::idle,MyJob::unallocated); - REMARK("client %d: create_one_job() for k=%d returns %p\n",client_id(),int(k),&j); - return &j; -} - -struct warning_tracker { - tbb::atomic<int> n_more_than_available; - tbb::atomic<int> n_too_many_threads; - tbb::atomic<int> n_system_overload; - warning_tracker() { - n_more_than_available = 0; - n_too_many_threads = 0; - n_system_overload = 0; - } - bool all_set() { return n_more_than_available>0 && n_too_many_threads>0 && n_system_overload>0; } -} tracker; - -class Checker { -public: - int default_concurrency; - void check_number_of_threads_delivered( int n_delivered, int n_requested, int n_extra ) const; - Checker( rml::server& server ) : default_concurrency(int(server.default_concurrency())) {} -}; - -void Checker::check_number_of_threads_delivered( int n_delivered, int n_requested, int n_extra ) const { - ASSERT( default_concurrency>=0, NULL ); - if( tracker.all_set() ) return; - // Check that number of threads delivered is reasonable. - int n_avail = default_concurrency; - if( n_extra>0 ) - n_avail-=n_extra; - if( n_avail<0 ) - n_avail=0; - if( n_requested>default_concurrency ) - n_avail += n_requested-default_concurrency; - int n_expected = n_requested; - if( n_expected>n_avail ) - n_expected=n_avail; - const char* msg = NULL; - if( n_delivered>n_avail ) { - if( ++tracker.n_more_than_available>1 ) - return; - msg = "server delivered more threads than were theoretically available"; - } else if( n_delivered>n_expected ) { - if( ++tracker.n_too_many_threads>1 ) - return; - msg = "server delivered more threads than expected"; - } else if( n_delivered<n_expected ) { - if( ++tracker.n_system_overload>1 ) - return; - msg = "server delivered fewer threads than ideal; or, the system is overloaded?"; - } - if( msg ) { - REPORT("Warning: %s (n_delivered=%d n_avail=%d n_requested=%d n_extra=%d default_concurrency=%d)\n", - msg, n_delivered, n_avail, n_requested, n_extra, default_concurrency ); - } -} - -template<typename Factory,typename Client> -class DoOneConnection: NoAssign { - //! Number of threads to request - const int n_thread; - //! Nesting - const Nesting nesting; - //! Number of extra threads to pretend having outside the RML - const int n_extra; - //! If true, check number of threads actually delivered. - const bool check_delivered; -public: - DoOneConnection( int n_thread_, Nesting nesting_, int n_extra_, bool check_delivered_ ) : - n_thread(n_thread_), - nesting(nesting_), - n_extra(n_extra_), - check_delivered(check_delivered_) - { - } - - //! Test ith connection - void operator()( size_t i ) const; -}; - -template<typename Factory,typename Client> -void DoOneConnection<Factory,Client>::operator()( size_t i ) const { - ASSERT( i<MaxClient, NULL ); - Client* client = new Client; - client->initialize( Client::is_omp ? JobArraySize : n_thread, nesting, ClientStackSize[i] ); - Factory factory; - memset( &factory, 0, sizeof(factory) ); - typename Factory::status_type status = factory.open(); - ASSERT( status==Factory::st_success, NULL ); - - typename Factory::server_type* server; - status = factory.make_server( server, *client ); - ASSERT( status==Factory::st_success, NULL ); - Harness::ConcurrencyTracker ct; - REMARK("client %d: opened server n_thread=%d nesting=(%d,%d)\n", - client->client_id(), n_thread, nesting.level, nesting.limit); - client->set_server( server ); - Checker checker( *server ); - - FireUpJobs( *server, *client, n_thread, n_extra, check_delivered && !client->is_strict() ? &checker : NULL ); - - // Close the connection - client->expect_close_connection = true; - REMARK("client %d: calling request_close_connection\n", client->client_id()); -#if !RML_USE_WCRM - int default_concurrency = server->default_concurrency(); -#endif - server->request_close_connection(); - // Client deletes itself when it sees call to acknowledge_close_connection from server. - factory.close(); -#if !RML_USE_WCRM - if( TestSingleConnection ) - __TBB_ASSERT_EX( uintptr_t(factory.scratch_ptr)==uintptr_t(default_concurrency), "under/over subscription?" ); -#endif -} - -//! Test with n_threads threads and n_client clients. -template<typename Factory, typename Client> -void SimpleTest() { - Harness::ConcurrencyTracker::Reset(); - TestSingleConnection = true; - N_TestConnections = 1; - for( int n_thread=MinThread; n_thread<=MaxThread; ++n_thread ) { - // Test a single connection, no nesting, no extra threads - DoOneConnection<Factory,Client> doc(n_thread,Nesting(0,0),0,false); - doc(0); - } -#if !TRIVIAL - TestSingleConnection = false; - for( int n_thread=MinThread; n_thread<=MaxThread; ++n_thread ) { - // Test parallel connections - for( int n_client=1; n_client<=int(MaxClient); ++n_client ) { - N_TestConnections = n_client; - REMARK("SimpleTest: n_thread=%d n_client=%d\n",n_thread,n_client); - NativeParallelFor( n_client, DoOneConnection<Factory,Client>(n_thread,Nesting(0,0),0,false) ); - } - // Test server::independent_thread_number_changed - N_TestConnections = 1; - for( int n_extra=-4; n_extra<=32; n_extra=n_extra+1+n_extra/5 ) { - DoOneConnection<Factory,Client> doc(n_thread,Nesting(0,0),n_extra,true); - doc(0); - } -#if !RML_USE_WCRM - // Test nested connections - DoOneConnection<Factory,Client> doc(n_thread,Nesting(0,2),0,false); - doc(0); -#endif - } - ASSERT( Harness::ConcurrencyTracker::PeakParallelism()>1, "No multiple connections exercised?" ); -#endif /* !TRIVIAL */ - // Let RML catch up. - while( ClientConstructions!=ClientDestructions ) - MilliSleep(1); -} - -static void check_server_info( void* arg, const char* server_info ) -{ - ASSERT( strstr(server_info, (char*)arg), NULL ); -} - -template<typename Factory, typename Client> -void VerifyInitialization( int n_thread ) { - Client* client = new Client; - client->initialize( Client::is_omp ? JobArraySize : n_thread, Nesting(), ClientStackSize[0] ); - Factory factory; - memset( &factory, 0, sizeof(factory) ); - typename Factory::status_type status = factory.open(); - ASSERT( status!=Factory::st_not_found, "could not find RML library" ); - ASSERT( status!=Factory::st_incompatible, NULL ); - ASSERT( status==Factory::st_success, NULL ); - factory.call_with_server_info( check_server_info, (void*)"Intel(R) RML library" ); - typename Factory::server_type* server; - status = factory.make_server( server, *client ); - ASSERT( status!=Factory::st_incompatible, NULL ); - ASSERT( status!=Factory::st_not_found, NULL ); - ASSERT( status==Factory::st_success, NULL ); - REMARK("client %d: opened server n_thread=%d nesting=(%d,%d)\n", - client->client_id(), n_thread, 0, 0); - ASSERT( server, NULL ); - client->set_server( server ); - - DoClientSpecificVerification( *server, n_thread ); - - // Close the connection - client->expect_close_connection = true; - REMARK("client %d: calling request_close_connection\n", client->client_id()); - server->request_close_connection(); - // Client deletes itself when it sees call to acknowledge_close_connection from server. - factory.close(); -} diff --git a/deal.II/bundled/tbb30_104oss/src/rml/test/test_thread_monitor.cpp b/deal.II/bundled/tbb30_104oss/src/rml/test/test_thread_monitor.cpp deleted file mode 100644 index 9ba0345f0f..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/rml/test/test_thread_monitor.cpp +++ /dev/null @@ -1,118 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#include "thread_monitor.h" -#include "harness.h" -#include "harness_memory.h" - -class ThreadState { - void loop(); -public: - static __RML_DECL_THREAD_ROUTINE routine( void* arg ) { - static_cast<ThreadState*>(arg)->loop(); - return 0; - } - typedef rml::internal::thread_monitor thread_monitor; - thread_monitor monitor; - volatile int request; - volatile int ack; - volatile unsigned clock; - volatile unsigned stamp; - ThreadState() : request(-1), ack(-1), clock(0) {} -}; - -void ThreadState::loop() { - for(;;) { - ++clock; - if( ack==request ) { - thread_monitor::cookie c; - monitor.prepare_wait(c); - if( ack==request ) { - REMARK("%p: request=%d ack=%d\n", this, request, ack ); - monitor.commit_wait(c); - } else - monitor.cancel_wait(); - } else { - // Throw in delay occasionally - switch( request%8 ) { - case 0: - case 1: - case 5: - rml::internal::thread_monitor::yield(); - } - int r = request; - ack = request; - if( !r ) return; - } - } -} - -// Linux on Itanium seems to require at least 1<<18 bytes per stack. -const size_t MinStackSize = 1<<18; -const size_t MaxStackSize = 1<<22; - -int TestMain () { - for( int p=MinThread; p<=MaxThread; ++p ) { - ThreadState* t = new ThreadState[p]; - for( size_t stack_size = MinStackSize; stack_size<=MaxStackSize; stack_size*=2 ) { - REMARK("launching %d threads\n",p); - for( int i=0; i<p; ++i ) - rml::internal::thread_monitor::launch( ThreadState::routine, t+i, stack_size ); - for( int k=1000; k>=0; --k ) { - if( k%8==0 ) { - // Wait for threads to wait. - for( int i=0; i<p; ++i ) { - unsigned count = 0; - do { - t[i].stamp = t[i].clock; - rml::internal::thread_monitor::yield(); - if( ++count>=1000 ) { - REPORT("Warning: thread %d not waiting\n",i); - break; - } - } while( t[i].stamp!=t[i].clock ); - } - } - REMARK("notifying threads\n"); - for( int i=0; i<p; ++i ) { - // Change state visible to launched thread - t[i].request = k; - t[i].monitor.notify(); - } - REMARK("waiting for threads to respond\n"); - for( int i=0; i<p; ++i ) - // Wait for thread to respond - while( t[i].ack!=k ) - rml::internal::thread_monitor::yield(); - } - } - delete[] t; - } - - return Harness::Done; -} diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/arena.cpp b/deal.II/bundled/tbb30_104oss/src/tbb/arena.cpp deleted file mode 100644 index e645cbed38..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/arena.cpp +++ /dev/null @@ -1,442 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#include "arena.h" -#include "governor.h" -#include "scheduler.h" -#include "itt_notify.h" - -#if __TBB_STATISTICS_STDOUT -#include <cstdio> -#endif - -namespace tbb { -namespace internal { - -#if !__TBB_ARENA_PER_MASTER -//------------------------------------------------------------------------ -// UnpaddedArenaPrefix -//------------------------------------------------------------------------ -inline arena& UnpaddedArenaPrefix::Arena() { - return *static_cast<tbb::internal::arena*>(static_cast<void*>( static_cast<ArenaPrefix*>(this)+1 )); -} - -void UnpaddedArenaPrefix::process( job& j ) { - generic_scheduler& s = static_cast<generic_scheduler&>(j); - __TBB_ASSERT( governor::is_set(&s), NULL ); - __TBB_ASSERT( !s.innermost_running_task, NULL ); - // Try to steal a task. - // Passing reference count is technically unnecessary in this context, - // but omitting it here would add checks inside the function. - task* t = s.receive_or_steal_task( s.dummy_task->prefix().ref_count, /*return_if_no_work=*/true ); - if (t) { - // A side effect of receive_or_steal_task is that innermost_running_task can be set. - // But for the outermost dispatch loop of a worker it has to be NULL. - s.innermost_running_task = NULL; - s.local_wait_for_all(*s.dummy_task,t); - } - __TBB_ASSERT( s.inbox.is_idle_state(true), NULL ); - __TBB_ASSERT( !s.innermost_running_task, NULL ); -} - -void UnpaddedArenaPrefix::cleanup( job& j ) { - generic_scheduler& s = static_cast<generic_scheduler&>(j); - if( !governor::is_set( &s ) ) { - bool is_master = governor::is_set( NULL ); - governor::assume_scheduler( &s ); - generic_scheduler::cleanup_worker( &s, !is_master ); - governor::assume_scheduler( NULL ); - } else { - generic_scheduler::cleanup_worker( &s, true ); - } -} - -void UnpaddedArenaPrefix::acknowledge_close_connection() { - Arena().free_arena(); -} - -::rml::job* UnpaddedArenaPrefix::create_one_job() { - generic_scheduler* s = generic_scheduler::create_worker( Arena(), next_job_index++ ); - governor::sign_on(s); - return s; -} -#endif /* !__TBB_ARENA_PER_MASTER */ - -//------------------------------------------------------------------------ -// arena -//------------------------------------------------------------------------ - -#if __TBB_ARENA_PER_MASTER - -void arena::process( generic_scheduler& s ) { - __TBB_ASSERT( is_alive(my_guard), NULL ); - __TBB_ASSERT( governor::is_set(&s), NULL ); - __TBB_ASSERT( !s.innermost_running_task, NULL ); - - __TBB_ASSERT( my_num_slots != 1, NULL ); - // Start search for an empty slot from the one we occupied the last time - unsigned index = s.arena_index < my_num_slots ? s.arena_index : s.random.get() % (my_num_slots - 1) + 1, - end = index; - __TBB_ASSERT( index != 0, "A worker cannot occupy slot 0" ); - __TBB_ASSERT( index < my_num_slots, NULL ); - - // Find a vacant slot - for ( ;; ) { - if ( !slot[index].my_scheduler && __TBB_CompareAndSwapW( &slot[index].my_scheduler, (intptr_t)&s, 0 ) == 0 ) - break; - if ( ++index == my_num_slots ) - index = 1; - if ( index == end ) { - // Likely this arena is already saturated - if ( --my_num_threads_active == 0 ) - close_arena(); - return; - } - } - ITT_NOTIFY(sync_acquired, &slot[index]); - s.my_arena = this; - s.arena_index = index; - s.attach_mailbox( affinity_id(index+1) ); - - slot[index].hint_for_push = index ^ unsigned(&s-(generic_scheduler*)NULL)>>16; // randomizer seed - slot[index].hint_for_pop = index; // initial value for round-robin - - unsigned new_limit = index + 1; - unsigned old_limit = my_limit; - while ( new_limit > old_limit ) { - if ( my_limit.compare_and_swap(new_limit, old_limit) == old_limit ) - break; - old_limit = my_limit; - } - - for ( ;; ) { - // Try to steal a task. - // Passing reference count is technically unnecessary in this context, - // but omitting it here would add checks inside the function. - __TBB_ASSERT( is_alive(my_guard), NULL ); - task* t = s.receive_or_steal_task( s.dummy_task->prefix().ref_count, /*return_if_no_work=*/true ); - if (t) { - // A side effect of receive_or_steal_task is that innermost_running_task can be set. - // But for the outermost dispatch loop of a worker it has to be NULL. - s.innermost_running_task = NULL; - s.local_wait_for_all(*s.dummy_task,t); - } - ++my_num_threads_leaving; - __TBB_ASSERT ( slot[index].head == slot[index].tail, "Worker cannot leave arena while its task pool is not empty" ); - __TBB_ASSERT( slot[index].task_pool == EmptyTaskPool, "Empty task pool is not marked appropriately" ); - // Revalidate quitting condition - // This check prevents relinquishing more than necessary workers because - // of the non-atomicity of the decision making procedure - if ( num_workers_active() >= my_num_workers_allotted || !my_num_workers_requested ) - break; - --my_num_threads_leaving; - __TBB_ASSERT( !slot[0].my_scheduler || my_num_threads_active > 0, "Who requested more workers after the last one left the dispatch loop and the master's gone?" ); - } -#if __TBB_STATISTICS - ++s.my_counters.arena_roundtrips; - *slot[index].my_counters += s.my_counters; - s.my_counters.reset(); -#endif /* __TBB_STATISTICS */ - __TBB_store_with_release( slot[index].my_scheduler, (generic_scheduler*)NULL ); - s.inbox.detach(); - __TBB_ASSERT( s.inbox.is_idle_state(true), NULL ); - __TBB_ASSERT( !s.innermost_running_task, NULL ); - __TBB_ASSERT( is_alive(my_guard), NULL ); - // Decrementing my_num_threads_active first prevents extra workers from leaving - // this arena prematurely, but can result in some workers returning back just - // to repeat the escape attempt. If instead my_num_threads_leaving is decremented - // first, the result is the opposite - premature leaving is allowed and gratuitous - // return is prevented. Since such a race has any likelihood only when multiple - // workers are in the stealing loop, and consequently there is a lack of parallel - // work in this arena, we'd rather let them go out and try get employment in - // other arenas (before returning into this one again). - --my_num_threads_leaving; - if ( !--my_num_threads_active ) - close_arena(); -} - -arena::arena ( market& m, unsigned max_num_workers ) { - __TBB_ASSERT( !my_guard, "improperly allocated arena?" ); - __TBB_ASSERT( sizeof(slot[0]) % NFS_GetLineSize()==0, "arena::slot size not multiple of cache line size" ); - __TBB_ASSERT( (uintptr_t)this % NFS_GetLineSize()==0, "arena misaligned" ); - my_market = &m; - my_limit = 1; - // Two slots are mandatory: for the master, and for 1 worker (required to support starvation resistant tasks). - my_num_slots = max(2u, max_num_workers + 1); - my_max_num_workers = max_num_workers; - my_num_threads_active = 1; // accounts for the master - __TBB_ASSERT ( my_max_num_workers < my_num_slots, NULL ); - // Construct mailboxes. Mark internal synchronization elements for the tools. - for( unsigned i = 0; i < my_num_slots; ++i ) { - __TBB_ASSERT( !slot[i].my_scheduler && !slot[i].task_pool, NULL ); - ITT_SYNC_CREATE(slot + i, SyncType_Scheduler, SyncObj_WorkerTaskPool); - mailbox(i+1).construct(); - ITT_SYNC_CREATE(&mailbox(i+1), SyncType_Scheduler, SyncObj_Mailbox); -#if __TBB_STATISTICS - slot[i].my_counters = new ( NFS_Allocate(sizeof(statistics_counters), 1, NULL) ) statistics_counters; -#endif /* __TBB_STATISTICS */ - } - my_task_stream.initialize(my_num_slots); - ITT_SYNC_CREATE(&my_task_stream, SyncType_Scheduler, SyncObj_TaskStream); - my_mandatory_concurrency = false; -#if __TBB_TASK_GROUP_CONTEXT - my_master_default_ctx = NULL; -#endif -} - -arena& arena::allocate_arena( market& m, unsigned max_num_workers ) { - __TBB_ASSERT( sizeof(base_type) + sizeof(arena_slot) == sizeof(arena), "All arena data fields must go to arena_base" ); - __TBB_ASSERT( sizeof(base_type) % NFS_GetLineSize() == 0, "arena slots area misaligned: wrong padding" ); - __TBB_ASSERT( sizeof(mail_outbox) == NFS_MaxLineSize, "Mailbox padding is wrong" ); - - unsigned num_slots = max(2u, max_num_workers + 1); - size_t n = sizeof(base_type) + num_slots * (sizeof(mail_outbox) + sizeof(arena_slot)); - - unsigned char* storage = (unsigned char*)NFS_Allocate( n, 1, NULL ); - // Zero all slots to indicate that they are empty - memset( storage, 0, n ); - return *new( storage + num_slots * sizeof(mail_outbox) ) arena(m, max_num_workers); -} - -void arena::free_arena () { - __TBB_ASSERT( !my_num_threads_active, "There are threads in the dying arena" ); - poison_value( my_guard ); - intptr_t drained = 0; - for ( unsigned i = 1; i <= my_num_slots; ++i ) - drained += mailbox(i).drain(); - __TBB_ASSERT(my_task_stream.empty() && my_task_stream.drain()==0, "Not all enqueued tasks were executed"); -#if __TBB_COUNT_TASK_NODES - my_market->update_task_node_count( -drained ); -#endif /* __TBB_COUNT_TASK_NODES */ - my_market->release(); -#if __TBB_TASK_GROUP_CONTEXT - __TBB_ASSERT( my_master_default_ctx, "Master thread never entered the arena?" ); - my_master_default_ctx->~task_group_context(); - NFS_Free(my_master_default_ctx); -#endif /* __TBB_TASK_GROUP_CONTEXT */ -#if __TBB_STATISTICS - for( unsigned i = 0; i < my_num_slots; ++i ) - NFS_Free( slot[i].my_counters ); -#endif /* __TBB_STATISTICS */ - void* storage = &mailbox(my_num_slots); - this->~arena(); - NFS_Free( storage ); -} - -void arena::close_arena () { -#if !__TBB_STATISTICS_EARLY_DUMP - GATHER_STATISTIC( dump_arena_statistics() ); -#endif - my_market->detach_arena( *this ); - free_arena(); -} - -#if __TBB_STATISTICS -void arena::dump_arena_statistics () { - statistics_counters total; - for( unsigned i = 0; i < my_num_slots; ++i ) { -#if __TBB_STATISTICS_EARLY_DUMP - generic_scheduler* s = slot[i].my_scheduler; - if ( s ) - *slot[i].my_counters += s->my_counters; -#else - __TBB_ASSERT( !slot[i].my_scheduler, NULL ); -#endif - if ( i != 0 ) { - total += *slot[i].my_counters; - dump_statistics( *slot[i].my_counters, i ); - } - } - dump_statistics( *slot[0].my_counters, 0 ); -#if __TBB_STATISTICS_STDOUT - printf( "----------------------------------------------\n" ); - dump_statistics( total, workers_counters_total ); - total += *slot[0].my_counters; - dump_statistics( total, arena_counters_total ); - printf( "==============================================\n" ); -#endif /* __TBB_STATISTICS_STDOUT */ -} -#endif /* __TBB_STATISTICS */ - -#else /* !__TBB_ARENA_PER_MASTER */ - -arena* arena::allocate_arena( unsigned number_of_slots, unsigned number_of_workers, stack_size_type stack_size ) { - __TBB_ASSERT( sizeof(ArenaPrefix) % NFS_GetLineSize()==0, "ArenaPrefix not multiple of cache line size" ); - __TBB_ASSERT( sizeof(mail_outbox)==NFS_MaxLineSize, NULL ); - __TBB_ASSERT( stack_size>0, NULL ); - - size_t n = sizeof(ArenaPrefix) + number_of_slots*(sizeof(mail_outbox)+sizeof(arena_slot)); - - unsigned char* storage = (unsigned char*)NFS_Allocate( n, 1, NULL ); - // Zero all slots to indicate that they are empty - memset( storage, 0, n ); - arena* a = (arena*)(storage + sizeof(ArenaPrefix)+ number_of_slots*(sizeof(mail_outbox))); - __TBB_ASSERT( sizeof(a->slot[0]) % NFS_GetLineSize()==0, "arena::slot size not multiple of cache line size" ); - __TBB_ASSERT( (uintptr_t)a % NFS_GetLineSize()==0, NULL ); - new( &a->prefix() ) ArenaPrefix( number_of_slots, number_of_workers ); - - // Allocate the worker_list - WorkerDescriptor * w = new WorkerDescriptor[number_of_workers]; - memset( w, 0, sizeof(WorkerDescriptor)*(number_of_workers)); - a->prefix().worker_list = w; - - // Construct mailboxes. - for( unsigned j=1; j<=number_of_slots; ++j ) - a->mailbox(j).construct(); - - a->prefix().stack_size = stack_size; - size_t k; - // Mark each internal sync element for the tools - for( k=0; k<number_of_workers; ++k ) { - ITT_SYNC_CREATE(a->slot + k, SyncType_Scheduler, SyncObj_WorkerTaskPool); - ITT_SYNC_CREATE(&w[k].scheduler, SyncType_Scheduler, SyncObj_WorkerLifeCycleMgmt); - ITT_SYNC_CREATE(&a->mailbox(k+1), SyncType_Scheduler, SyncObj_Mailbox); - } - for( ; k<number_of_slots; ++k ) { - ITT_SYNC_CREATE(a->slot + k, SyncType_Scheduler, SyncObj_MasterTaskPool); - ITT_SYNC_CREATE(&a->mailbox(k+1), SyncType_Scheduler, SyncObj_Mailbox); - } - - return a; -} - -void arena::free_arena () { - // Drain mailboxes - // TODO: each scheduler should plug-and-drain its own mailbox when it terminates. - intptr_t drain_count = 0; - for( unsigned i=1; i<=prefix().number_of_slots; ++i ) - drain_count += mailbox(i).drain(); -#if __TBB_COUNT_TASK_NODES - prefix().task_node_count -= drain_count; - if( prefix().task_node_count ) { - runtime_warning( "Leaked %ld task objects\n", long(prefix().task_node_count) ); - } -#endif /* __TBB_COUNT_TASK_NODES */ - void* storage = &mailbox(prefix().number_of_slots); - delete[] prefix().worker_list; - prefix().~ArenaPrefix(); - NFS_Free( storage ); -} - -void arena::close_arena () { - for(;;) { - pool_state_t snapshot = prefix().pool_state; - if( snapshot==SNAPSHOT_SERVER_GOING_AWAY ) - break; - if( prefix().pool_state.compare_and_swap( SNAPSHOT_SERVER_GOING_AWAY, snapshot )==snapshot ) { - if( snapshot!=SNAPSHOT_EMPTY ) - prefix().server->adjust_job_count_estimate( -int(prefix().number_of_workers) ); - break; - } - } - prefix().server->request_close_connection(); -} - -#endif /* !__TBB_ARENA_PER_MASTER */ - -bool arena::is_out_of_work() { - // TODO: rework it to return at least a hint about where a task was found; better if the task itself. - for(;;) { - pool_state_t snapshot = prefix().pool_state; - switch( snapshot ) { - case SNAPSHOT_EMPTY: -#if !__TBB_ARENA_PER_MASTER - case SNAPSHOT_SERVER_GOING_AWAY: -#endif /* !__TBB_ARENA_PER_MASTER */ - return true; - case SNAPSHOT_FULL: { - // Use unique id for "busy" in order to avoid ABA problems. - const pool_state_t busy = pool_state_t(this); - // Request permission to take snapshot - if( prefix().pool_state.compare_and_swap( busy, SNAPSHOT_FULL )==SNAPSHOT_FULL ) { - // Got permission. Take the snapshot. -#if __TBB_ARENA_PER_MASTER - size_t n = my_limit; -#else /* !__TBB_ARENA_PER_MASTER */ - size_t n = prefix().limit; -#endif /* !__TBB_ARENA_PER_MASTER */ - size_t k; - for( k=0; k<n; ++k ) - if( slot[k].task_pool != EmptyTaskPool && slot[k].head < slot[k].tail ) - break; - bool work_absent = k>=n; -#if __TBB_ARENA_PER_MASTER - work_absent = work_absent && my_task_stream.empty(); -#endif /* __TBB_ARENA_PER_MASTER */ - // Test and test-and-set. - if( prefix().pool_state==busy ) { - if( work_absent ) { -#if __TBB_ARENA_PER_MASTER - // save current demand value before setting SNAPSHOT_EMPTY, - // to avoid race with advertise_new_work. - int current_demand = (int)my_max_num_workers; -#endif - if( prefix().pool_state.compare_and_swap( SNAPSHOT_EMPTY, busy )==busy ) { - // This thread transitioned pool to empty state, and thus is responsible for - // telling RML that there is no other work to do. -#if __TBB_ARENA_PER_MASTER - my_market->adjust_demand( *this, -current_demand ); -#else /* !__TBB_ARENA_PER_MASTER */ - prefix().server->adjust_job_count_estimate( -int(prefix().number_of_workers) ); -#endif /* !__TBB_ARENA_PER_MASTER */ - return true; - } - } else { - // Undo previous transition SNAPSHOT_FULL-->busy, unless another thread undid it. - prefix().pool_state.compare_and_swap( SNAPSHOT_FULL, busy ); - } - } - } - return false; - } - default: - // Another thread is taking a snapshot. - return false; - } - } -} - -#if __TBB_COUNT_TASK_NODES -intptr_t arena::workers_task_node_count() { - intptr_t result = 0; -#if __TBB_ARENA_PER_MASTER - for( unsigned i = 1; i < my_num_slots; ++i ) { - generic_scheduler* s = slot[i].my_scheduler; -#else /* !__TBB_ARENA_PER_MASTER */ - for( unsigned i=0; i<prefix().number_of_workers; ++i ) { - generic_scheduler* s = prefix().worker_list[i].scheduler; -#endif /* !__TBB_ARENA_PER_MASTER */ - if( s ) - result += s->task_node_count; - } - return result; -} -#endif /* __TBB_COUNT_TASK_NODES */ - -} // namespace internal -} // namespace tbb diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/arena.h b/deal.II/bundled/tbb30_104oss/src/tbb/arena.h deleted file mode 100644 index 495ddc0375..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/arena.h +++ /dev/null @@ -1,504 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef _TBB_arena_H -#define _TBB_arena_H - -#include "tbb/tbb_stddef.h" -#include "tbb/atomic.h" - -#if __TBB_ARENA_PER_MASTER -#include "scheduler_common.h" -#include "market.h" -#include "intrusive_list.h" -#include "task_stream.h" -#else /* !__TBB_ARENA_PER_MASTER */ -#include "../rml/include/rml_tbb.h" -#endif /* !__TBB_ARENA_PER_MASTER */ - -#include "mailbox.h" - -namespace tbb { - -#if __TBB_ARENA_PER_MASTER -class task_group_context; -class allocate_root_with_context_proxy; -#endif /* __TBB_ARENA_PER_MASTER */ - -namespace internal { - -class governor; -class arena; -class generic_scheduler; -template<typename SchedulerTraits> class custom_scheduler; - -#if !__TBB_ARENA_PER_MASTER -//------------------------------------------------------------------------ -// UnpaddedArenaPrefix -//------------------------------------------------------------------------ - -struct WorkerDescriptor { - //! NULL until worker is published. -1 if worker should not be published. - generic_scheduler* scheduler; -}; - -//! The useful contents of an ArenaPrefix -class UnpaddedArenaPrefix: no_copy, rml::tbb_client { - friend class generic_scheduler; - template<typename SchedulerTraits> friend class custom_scheduler; - friend class arena; - friend class governor; - friend struct WorkerDescriptor; - - //! Arena slot to try to acquire first for the next new master. - unsigned limit; - - //! Number of masters that own this arena. - /** This may be smaller than the number of masters who have entered the arena. */ - unsigned number_of_masters; - - //! Total number of slots in the arena - const unsigned number_of_slots; - - //! Number of workers that belong to this arena - const unsigned number_of_workers; - - //! Pointer to the RML server object that services requests for this arena. - rml::tbb_server* server; - - //! Counter used to allocate job indices - tbb::atomic<size_t> next_job_index; - - //! Stack size of worker threads - size_t stack_size; - - //! Array of workers. - WorkerDescriptor* worker_list; - -#if __TBB_COUNT_TASK_NODES - //! Net number of nodes that have been allocated from heap. - /** Updated each time a scheduler is destroyed. */ - atomic<intptr_t> task_node_count; -#endif /* __TBB_COUNT_TASK_NODES */ - - //! Estimate of number of available tasks. - /** The estimate is either 0 (SNAPSHOT_EMPTY), infinity (SNAPSHOT_FULL), or a special value. - The implementation of arena::is_busy_or_empty requires that pool_state_t be unsigned. */ - typedef uintptr_t pool_state_t; - - //! Current estimate of number of available tasks. - tbb::atomic<pool_state_t> pool_state; - -protected: - UnpaddedArenaPrefix( unsigned number_of_slots_, unsigned number_of_workers_ ) : - number_of_masters(1), - number_of_slots(number_of_slots_), - number_of_workers(number_of_workers_) - { -#if __TBB_COUNT_TASK_NODES - task_node_count = 0; -#endif /* __TBB_COUNT_TASK_NODES */ - limit = number_of_workers_; - server = NULL; - stack_size = 0; - next_job_index = 0; - } - -private: - //! Return reference to corresponding arena. - arena& Arena(); - - /*override*/ version_type version() const { - return 0; - } - - /*override*/ unsigned max_job_count() const { - return number_of_workers; - } - - /*override*/ size_t min_stack_size() const { - return stack_size; - } - - /*override*/ policy_type policy() const { - return throughput; - } - - /*override*/ job* create_one_job(); - - /*override*/ void cleanup( job& j ); - - /*override*/ void acknowledge_close_connection(); - - /*override*/ void process( job& j ); -}; // class UnpaddedArenaPrefix - -//------------------------------------------------------------------------ -// ArenaPrefix -//------------------------------------------------------------------------ - -//! The prefix to arena with padding. -class ArenaPrefix: public UnpaddedArenaPrefix { - //! Padding to fill out to multiple of cache line size. - char pad[(sizeof(UnpaddedArenaPrefix)/NFS_MaxLineSize+1)*NFS_MaxLineSize-sizeof(UnpaddedArenaPrefix)]; - -public: - ArenaPrefix( unsigned number_of_slots_, unsigned number_of_workers_ ) : - UnpaddedArenaPrefix(number_of_slots_,number_of_workers_) - { - } -}; // class ArenaPrefix - -#endif /* !__TBB_ARENA_PER_MASTER */ - -//------------------------------------------------------------------------ -// arena_slot -//------------------------------------------------------------------------ - -struct arena_slot { -#if __TBB_ARENA_PER_MASTER - //! Scheduler of the thread attached to the slot - /** Marks the slot as busy, and is used to iterate through the schedulers belonging to this arena **/ - generic_scheduler* my_scheduler; -#endif /* __TBB_ARENA_PER_MASTER */ - - // Task pool (the deque of task pointers) of the scheduler that owns this slot - /** Also is used to specify if the slot is empty or locked: - 0 - empty - -1 - locked **/ - task** task_pool; - - //! Index of the first ready task in the deque. - /** Modified by thieves, and by the owner during compaction/reallocation **/ - size_t head; - - //! Padding to avoid false sharing caused by the thieves accessing this slot - char pad1[NFS_MaxLineSize - sizeof(size_t) - sizeof(task**) -#if __TBB_ARENA_PER_MASTER - - sizeof(generic_scheduler*) -#endif /* __TBB_ARENA_PER_MASTER */ - ]; - - //! Index of the element following the last ready task in the deque. - /** Modified by the owner thread. **/ - size_t tail; - -#if __TBB_ARENA_PER_MASTER - //! Hints provided for operations with the container of starvation-resistant tasks. - /** Modified by the owner thread (during these operations). **/ - unsigned hint_for_push, hint_for_pop; - -#endif /* __TBB_ARENA_PER_MASTER */ - -#if __TBB_STATISTICS - //! Set of counters to accumulate internal statistics related to this arena - statistics_counters *my_counters; -#endif /* __TBB_STATISTICS */ - //! Padding to avoid false sharing caused by the thieves accessing the next slot - char pad2[NFS_MaxLineSize - sizeof(size_t) -#if __TBB_ARENA_PER_MASTER - - 2*sizeof(unsigned) -#endif /* __TBB_ARENA_PER_MASTER */ -#if __TBB_STATISTICS - - sizeof(statistics_counters*) -#endif /* __TBB_STATISTICS */ - ]; -}; // class arena_slot - -//------------------------------------------------------------------------ -// arena -//------------------------------------------------------------------------ - -#if __TBB_ARENA_PER_MASTER - -//! arena data except the array of slots -/** Separated in order to simplify padding. - Intrusive list node base class is used by market to form a list of arenas. **/ -struct arena_base : intrusive_list_node { - //! Market owning this arena - market* my_market; - - //! Maximal currently busy slot. - atomic<unsigned> my_limit; - - //! Number of slots in the arena - unsigned my_num_slots; - - //! Number of workers requested by the master thread owning the arena - unsigned my_max_num_workers; - - //! Number of workers that are currently requested from the resource manager - atomic<int> my_num_workers_requested; - - //! Number of workers that have been marked out by the resource manager to service the arena - unsigned my_num_workers_allotted; - - //! Number of threads in the arena at the moment - /** Consists of the workers servicing the arena and one master until it starts - arena shutdown and detaches from it. Plays the role of the arena's ref count. **/ - atomic<unsigned> my_num_threads_active; - - //! Number of threads that has exited the dispatch loop but has not left the arena yet - atomic<unsigned> my_num_threads_leaving; - - //! Current task pool state and estimate of available tasks amount. - /** The estimate is either 0 (SNAPSHOT_EMPTY) or infinity (SNAPSHOT_FULL). - Special state is "busy" (any other unsigned value). - Note that the implementation of arena::is_busy_or_empty() requires - pool_state to be unsigned. */ - tbb::atomic<uintptr_t> pool_state; - -#if __TBB_TASK_GROUP_CONTEXT - //! Pointer to the "default" task_group_context allocated by the arena's master. - task_group_context* my_master_default_ctx; -#endif - - //! The task pool that guarantees eventual execution even if new tasks are constantly coming. - task_stream my_task_stream; - - bool my_mandatory_concurrency; - -#if TBB_USE_ASSERT - uintptr_t my_guard; -#endif /* TBB_USE_ASSERT */ -}; // struct arena_base - -#endif /* __TBB_ARENA_PER_MASTER */ - -class arena -#if __TBB_ARENA_PER_MASTER -#if (__GNUC__<4 || __GNUC__==4 && __GNUC_MINOR__==0) && !__INTEL_COMPILER - : public padded<arena_base> -#else - : padded<arena_base> -#endif -#endif /* __TBB_ARENA_PER_MASTER */ -{ - friend class generic_scheduler; - template<typename SchedulerTraits> friend class custom_scheduler; - friend class governor; - -#if __TBB_ARENA_PER_MASTER - friend class market; - friend class tbb::task_group_context; - friend class allocate_root_with_context_proxy; - friend class intrusive_list<arena>; - - typedef padded<arena_base> base_type; - - //! Constructor - arena ( market&, unsigned max_num_workers ); - - arena& prefix() const { return const_cast<arena&>(*this); } - - //! Allocate an instance of arena. - static arena& allocate_arena( market&, unsigned max_num_workers ); - -#if __TBB_TASK_GROUP_CONTEXT - //! Propagates cancellation request to all descendants of the context. - /** The propagation is relayed to the market because tasks created by one - master thread can be passed to and executed by other masters. This means - that context trees can span several arenas at once and thus cancellation - propagation cannot be generally localized to one arena only. **/ - void propagate_cancellation ( task_group_context& ctx ) { - my_market->propagate_cancellation( ctx ); - } -#endif /* __TBB_TASK_GROUP_CONTEXT */ - -#else /* !__TBB_ARENA_PER_MASTER */ - - friend class UnpaddedArenaPrefix; - friend struct WorkerDescriptor; - - //! Get reference to prefix portion - ArenaPrefix& prefix() const {return ((ArenaPrefix*)(void*)this)[-1];} - - //! Allocate an instance of arena, and prepare everything to start workers. - static arena* allocate_arena( unsigned num_slots, unsigned num_workers, size_t stack_size ); -#endif /* !__TBB_ARENA_PER_MASTER */ - - //! Get reference to mailbox corresponding to given affinity_id. - mail_outbox& mailbox( affinity_id id ) { - __TBB_ASSERT( 0<id, "affinity id must be positive integer" ); -#if __TBB_ARENA_PER_MASTER - __TBB_ASSERT( id <= my_num_slots, "affinity id out of bounds" ); -#else /* !__TBB_ARENA_PER_MASTER */ - __TBB_ASSERT( id <= prefix().number_of_slots, "id out of bounds" ); -#endif /* !__TBB_ARENA_PER_MASTER */ - - return ((mail_outbox*)&prefix())[-(int)id]; - } - - //! Completes arena shutdown, destructs and deallocates it. - void free_arena (); - - typedef uintptr_t pool_state_t; - - //! No tasks to steal since last snapshot was taken - static const pool_state_t SNAPSHOT_EMPTY = 0; - - //! At least one task has been offered for stealing since the last snapshot started - static const pool_state_t SNAPSHOT_FULL = pool_state_t(-1); - -#if __TBB_ARENA_PER_MASTER - //! No tasks to steal or snapshot is being taken. - static bool is_busy_or_empty( pool_state_t s ) { return s < SNAPSHOT_FULL; } - - //! The number of workers active in the arena. - unsigned num_workers_active( ) { - return my_num_threads_active - my_num_threads_leaving - (slot[0].my_scheduler? 1: 0); - } - - //! If necessary, raise a flag that there is new job in arena. - template<bool Spawned> void advertise_new_work(); -#else /*__TBB_ARENA_PER_MASTER*/ - //! Server is going away and hence further calls to adjust_job_count_estimate are unsafe. - static const pool_state_t SNAPSHOT_SERVER_GOING_AWAY = pool_state_t(-2); - - //! No tasks to steal or snapshot is being taken. - static bool is_busy_or_empty( pool_state_t s ) { return s < SNAPSHOT_SERVER_GOING_AWAY; } - - //! If necessary, raise a flag that task was added to pool recently. - inline void mark_pool_full(); -#endif /* __TBB_ARENA_PER_MASTER */ - - //! Check if there is job anywhere in arena. - /** Return true if no job or if arena is being cleaned up. */ - bool is_out_of_work(); - - //! Initiates arena shutdown. - void close_arena (); - -#if __TBB_ARENA_PER_MASTER - //! Registers the worker with the arena and enters TBB scheduler dispatch loop - void process( generic_scheduler& s ); - -#if __TBB_STATISTICS - //! Outputs internal statistics accumulated by the arena - void dump_arena_statistics (); -#endif /* __TBB_STATISTICS */ -#endif /* __TBB_ARENA_PER_MASTER */ - -#if __TBB_COUNT_TASK_NODES - //! Returns the number of task objects "living" in worker threads - intptr_t workers_task_node_count(); -#endif - - /** Must be the last data field */ - arena_slot slot[1]; -}; // class arena - - -#if __TBB_ARENA_PER_MASTER -template<bool Spawned> void arena::advertise_new_work() { - if( !Spawned ) { // i.e. the work was enqueued - if( my_max_num_workers==0 ) { - my_max_num_workers = 1; - my_mandatory_concurrency = true; - prefix().pool_state = SNAPSHOT_FULL; - my_market->adjust_demand( *this, 1 ); - return; - } - // Local memory fence is required to avoid missed wakeups; see the comment below. - // Starvation resistant tasks require mandatory concurrency, so missed wakeups are unacceptable. - __TBB_full_memory_fence(); - } - // Double-check idiom that, in case of spawning, is deliberately sloppy about memory fences. - // Technically, to avoid missed wakeups, there should be a full memory fence between the point we - // released the task pool (i.e. spawned task) and read the arena's state. However, adding such a - // fence might hurt overall performance more than it helps, because the fence would be executed - // on every task pool release, even when stealing does not occur. Since TBB allows parallelism, - // but never promises parallelism, the missed wakeup is not a correctness problem. - pool_state_t snapshot = prefix().pool_state; - if( is_busy_or_empty(snapshot) ) { - // Attempt to mark as full. The compare_and_swap below is a little unusual because the - // result is compared to a value that can be different than the comparand argument. - if( prefix().pool_state.compare_and_swap( SNAPSHOT_FULL, snapshot )==SNAPSHOT_EMPTY ) { - if( snapshot!=SNAPSHOT_EMPTY ) { - // This thread read "busy" into snapshot, and then another thread transitioned - // pool_state to "empty" in the meantime, which caused the compare_and_swap above - // to fail. Attempt to transition pool_state from "empty" to "full". - if( prefix().pool_state.compare_and_swap( SNAPSHOT_FULL, SNAPSHOT_EMPTY )!=SNAPSHOT_EMPTY ) { - // Some other thread transitioned pool_state from "empty", and hence became - // responsible for waking up workers. - return; - } - } - // This thread transitioned pool from empty to full state, and thus is responsible for - // telling RML that there is work to do. - if( Spawned ) { - if( my_mandatory_concurrency ) { - __TBB_ASSERT(my_max_num_workers==1, ""); - // There was deliberate oversubscription on 1 core for sake of starvation-resistant tasks. - // Now a single active thread (must be the master) supposedly starts a new parallel region - // with relaxed sequential semantics, and oversubscription should be avoided. - // Demand for workers has been decreased to 0 during SNAPSHOT_EMPTY, so just keep it. - my_max_num_workers = 0; - my_mandatory_concurrency = false; - return; - } - } - my_market->adjust_demand( *this, my_max_num_workers ); - } - } -} -#else /* !__TBB_ARENA_PER_MASTER */ -inline void arena::mark_pool_full() { - // Double-check idiom that is deliberately sloppy about memory fences. - // Technically, to avoid missed wakeups, there should be a full memory fence between the point we - // released the task pool (i.e. spawned task) and read the arena's state. However, adding such a - // fence might hurt overall performance more than it helps, because the fence would be executed - // on every task pool release, even when stealing does not occur. Since TBB allows parallelism, - // but never promises parallelism, the missed wakeup is not a correctness problem. - pool_state_t snapshot = prefix().pool_state; - if( is_busy_or_empty(snapshot) ) { - // Attempt to mark as full. The compare_and_swap below is a little unusual because the - // result is compared to a value that can be different than the comparand argument. - if( prefix().pool_state.compare_and_swap( SNAPSHOT_FULL, snapshot )==SNAPSHOT_EMPTY ) { - if( snapshot!=SNAPSHOT_EMPTY ) { - // This thread read "busy" into snapshot, and then another thread transitioned - // pool_state to "empty" in the meantime, which caused the compare_and_swap above - // to fail. Attempt to transition pool_state from "empty" to "full". - if( prefix().pool_state.compare_and_swap( SNAPSHOT_FULL, SNAPSHOT_EMPTY )!=SNAPSHOT_EMPTY ) { - // Some other thread transitioned pool_state from "empty", and hence became - // responsible for waking up workers. - return; - } - } - // This thread transitioned pool from empty to full state, and thus is responsible for - // telling RML that there is work to do. - prefix().server->adjust_job_count_estimate( int(prefix().number_of_workers) ); - } - } -} -#endif /* !__TBB_ARENA_PER_MASTER */ - -} // namespace internal -} // namespace tbb - -#endif /* _TBB_arena_H */ diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/cache_aligned_allocator.cpp b/deal.II/bundled/tbb30_104oss/src/tbb/cache_aligned_allocator.cpp deleted file mode 100644 index 9c35edf536..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/cache_aligned_allocator.cpp +++ /dev/null @@ -1,277 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#include "tbb/cache_aligned_allocator.h" -#include "tbb/tbb_allocator.h" -#include "tbb/tbb_exception.h" -#include "tbb_misc.h" -#include "dynamic_link.h" -#include <cstdlib> - -#if _WIN32||_WIN64 -#include "tbb/machine/windows_api.h" -#else -#include <dlfcn.h> -#endif /* _WIN32||_WIN64 */ - -using namespace std; - -#if __TBB_WEAK_SYMBOLS - -#pragma weak scalable_malloc -#pragma weak scalable_free -#pragma weak scalable_aligned_malloc -#pragma weak scalable_aligned_free - -extern "C" { - void* scalable_malloc( size_t ); - void scalable_free( void* ); - void* scalable_aligned_malloc( size_t, size_t ); - void scalable_aligned_free( void* ); -} - -#endif /* __TBB_WEAK_SYMBOLS */ - -namespace tbb { - -namespace internal { - -//! Dummy routine used for first indirect call via MallocHandler. -static void* DummyMalloc( size_t size ); - -//! Dummy routine used for first indirect call via FreeHandler. -static void DummyFree( void * ptr ); - -//! Handler for memory allocation -static void* (*MallocHandler)( size_t size ) = &DummyMalloc; - -//! Handler for memory deallocation -static void (*FreeHandler)( void* pointer ) = &DummyFree; - -//! Dummy routine used for first indirect call via padded_allocate_handler. -static void* dummy_padded_allocate( size_t bytes, size_t alignment ); - -//! Dummy routine used for first indirect call via padded_free_handler. -static void dummy_padded_free( void * ptr ); - -// ! Allocates memory using standard malloc. It is used when scalable_allocator is not available -static void* padded_allocate( size_t bytes, size_t alignment ); - -// ! Allocates memory using standard free. It is used when scalable_allocator is not available -static void padded_free( void* p ); - -//! Handler for padded memory allocation -static void* (*padded_allocate_handler)( size_t bytes, size_t alignment ) = &dummy_padded_allocate; - -//! Handler for padded memory deallocation -static void (*padded_free_handler)( void* p ) = &dummy_padded_free; - -//! Table describing the how to link the handlers. -static const dynamic_link_descriptor MallocLinkTable[] = { - DLD(scalable_malloc, MallocHandler), - DLD(scalable_free, FreeHandler), - DLD(scalable_aligned_malloc, padded_allocate_handler), - DLD(scalable_aligned_free, padded_free_handler), -}; - - -#if TBB_USE_DEBUG -#define DEBUG_SUFFIX "_debug" -#else -#define DEBUG_SUFFIX -#endif /* TBB_USE_DEBUG */ - -// MALLOCLIB_NAME is the name of the TBB memory allocator library. -#if _WIN32||_WIN64 -#define MALLOCLIB_NAME "tbbmalloc" DEBUG_SUFFIX ".dll" -#elif __APPLE__ -#define MALLOCLIB_NAME "libtbbmalloc" DEBUG_SUFFIX ".dylib" -#elif __linux__ -#define MALLOCLIB_NAME "libtbbmalloc" DEBUG_SUFFIX __TBB_STRING(.so.TBB_COMPATIBLE_INTERFACE_VERSION) -#elif __NetBSD__ || __FreeBSD__ || __sun || _AIX -#define MALLOCLIB_NAME "libtbbmalloc" DEBUG_SUFFIX ".so" -#else -#error Unknown OS -#endif - -//! Initialize the allocation/free handler pointers. -/** Caller is responsible for ensuring this routine is called exactly once. - The routine attempts to dynamically link with the TBB memory allocator. - If that allocator is not found, it links to malloc and free. */ -void initialize_cache_aligned_allocator() { - __TBB_ASSERT( MallocHandler==&DummyMalloc, NULL ); - bool success = dynamic_link( MALLOCLIB_NAME, MallocLinkTable, 4 ); - if( !success ) { - // If unsuccessful, set the handlers to the default routines. - // This must be done now, and not before FillDynanmicLinks runs, because if other - // threads call the handlers, we want them to go through the DoOneTimeInitializations logic, - // which forces them to wait. - FreeHandler = &free; - MallocHandler = &malloc; - padded_allocate_handler = &padded_allocate; - padded_free_handler = &padded_free; - } -#if !__TBB_RML_STATIC - PrintExtraVersionInfo( "ALLOCATOR", success?"scalable_malloc":"malloc" ); -#endif -} - -//! Defined in task.cpp -extern void DoOneTimeInitializations(); - -//! Executed on very first call through MallocHandler -static void* DummyMalloc( size_t size ) { - DoOneTimeInitializations(); - __TBB_ASSERT( MallocHandler!=&DummyMalloc, NULL ); - return (*MallocHandler)( size ); -} - -//! Executed on very first call throught FreeHandler -static void DummyFree( void * ptr ) { - DoOneTimeInitializations(); - __TBB_ASSERT( FreeHandler!=&DummyFree, NULL ); - (*FreeHandler)( ptr ); -} - -//! Executed on very first call through padded_allocate_handler -static void* dummy_padded_allocate( size_t bytes, size_t alignment ) { - DoOneTimeInitializations(); - __TBB_ASSERT( padded_allocate_handler!=&dummy_padded_allocate, NULL ); - return (*padded_allocate_handler)(bytes, alignment); -} - -//! Executed on very first call throught padded_free_handler -static void dummy_padded_free( void * ptr ) { - DoOneTimeInitializations(); - __TBB_ASSERT( padded_free_handler!=&dummy_padded_free, NULL ); - (*padded_free_handler)( ptr ); -} - -static size_t NFS_LineSize = 128; - -size_t NFS_GetLineSize() { - return NFS_LineSize; -} - -#if _MSC_VER && !defined(__INTEL_COMPILER) - // unary minus operator applied to unsigned type, result still unsigned - #pragma warning( disable: 4146 4706 ) -#endif - -void* NFS_Allocate( size_t n, size_t element_size, void* /*hint*/ ) { - size_t m = NFS_LineSize; - __TBB_ASSERT( m<=NFS_MaxLineSize, "illegal value for NFS_LineSize" ); - __TBB_ASSERT( (m & (m-1))==0, "must be power of two" ); - size_t bytes = n*element_size; - - if (bytes<n || bytes+m<bytes) { - // Overflow - throw_exception(eid_bad_alloc); - } - - void* result = (*padded_allocate_handler)( bytes, m ); - __TBB_ASSERT( ((size_t)result&(m-1)) == 0, "The address returned isn't aligned to cache line size" ); - return result; -} - -void NFS_Free( void* p ) { - (*padded_free_handler)( p ); -} - -static void* padded_allocate( size_t bytes, size_t alignment ) { - unsigned char* base; - if( !(base=(unsigned char*)malloc(alignment+bytes)) ) { - throw_exception(eid_bad_alloc); - } - // Round up to the next line - unsigned char* result = (unsigned char*)((uintptr_t)(base+alignment)&-alignment); - // Record where block actually starts. - ((uintptr_t*)result)[-1] = uintptr_t(base); - return result; -} - -static void padded_free( void* p ) { - if( p ) { - __TBB_ASSERT( (uintptr_t)p>=0x4096, "attempt to free block not obtained from cache_aligned_allocator" ); - // Recover where block actually starts - unsigned char* base = ((unsigned char**)p)[-1]; - __TBB_ASSERT( (void*)((uintptr_t)(base+NFS_LineSize)&-NFS_LineSize)==p, "not allocated by NFS_Allocate?" ); - free(base); - } -} - -void* __TBB_EXPORTED_FUNC allocate_via_handler_v3( size_t n ) { - void* result; - result = (*MallocHandler) (n); - if (!result) { - // Overflow - throw_exception(eid_bad_alloc); - } - return result; -} - -void __TBB_EXPORTED_FUNC deallocate_via_handler_v3( void *p ) { - if( p ) { - (*FreeHandler)( p ); - } -} - -bool __TBB_EXPORTED_FUNC is_malloc_used_v3() { - if (MallocHandler == &DummyMalloc) { - void* void_ptr = (*MallocHandler)(1); - (*FreeHandler)(void_ptr); - } - __TBB_ASSERT( MallocHandler!=&DummyMalloc && FreeHandler!=&DummyFree, NULL ); - __TBB_ASSERT((MallocHandler==&malloc && FreeHandler==&free) || - (MallocHandler!=&malloc && FreeHandler!=&free), NULL ); - return MallocHandler == &malloc; -} - -} // namespace internal - -} // namespace tbb - -#if __TBB_RML_STATIC -#include "tbb/atomic.h" -static tbb::atomic<int> module_inited; -namespace tbb { -namespace internal { -void DoOneTimeInitializations() { - if( module_inited!=2 ) { - if( module_inited.compare_and_swap(1, 0)==0 ) { - initialize_cache_aligned_allocator(); - module_inited = 2; - } else { - do { - __TBB_Yield(); - } while( module_inited!=2 ); - } - } -} -}} //namespace tbb::internal -#endif diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/cilk-tbb-interop.h b/deal.II/bundled/tbb30_104oss/src/tbb/cilk-tbb-interop.h deleted file mode 100644 index 87555b81d3..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/cilk-tbb-interop.h +++ /dev/null @@ -1,120 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef CILK_TBB_INTEROP_H -#define CILK_TBB_INTEROP_H - -#ifndef _WIN32 -#ifdef IN_CILK_RUNTIME -#define CILK_EXPORT __attribute__((visibility("protected"))) -#else -#define CILK_EXPORT /* nothing */ -#endif -#else -#ifdef IN_CILK_RUNTIME -#define CILK_EXPORT __declspec(dllexport) -#else -#define CILK_EXPORT __declspec(dllimport) -#endif // IN_CILK_RUNTIME -#endif // _WIN32 - -#ifdef __cplusplus -extern "C" { -#endif /* __cplusplus */ - -/* A return code. 0 indicates success */ -typedef int __cilk_tbb_retcode; - -enum __cilk_tbb_stack_op { - CILK_TBB_STACK_ORPHAN, // disconnecting stack from a thread - CILK_TBB_STACK_ADOPT, // reconnecting orphaned stack to a trhead - CILK_TBB_STACK_RELEASE // releasing stack -}; - -typedef __cilk_tbb_retcode (*__cilk_tbb_pfn_stack_op)(enum __cilk_tbb_stack_op, void* data); - -typedef __cilk_tbb_retcode (*__cilk_tbb_pfn_unwatch_stacks)(void *data); - -/* Each thunk structure has two pointers: "routine" and "data". - The caller of the thunk invokes *routine, passing "data" as the void* parameter. */ - -/* Thunk invoked by Cilk when it changes the relationship between a stack and a thread. - It does not matter what stack the thunk runs on. - The thread (not fiber) on which the thunk runs is important. - - CILK_TBB_STACK_ORPHAN - The thunk must be invoked on the thread disconnecting itself from the stack. - Must "happen before" the stack is adopted elsewhere. - CILK_TBB_STACK_ADOPT - The thunk must be invoked on the thread adopting the stack. - CILK_TBB_STACK_RELEASE - The thunk must be invoked on the thread doing the releasing, - Must "happen before" the stack is used elsewhere. - - When a non-empty stack is transfered between threads, the first thread must orphan it - and the second thread must adopt it. - - An empty stack can be transfered similarly, or simply released by the first thread. - - Here is a summary of the actions as transitions on a state machine. - - watch ORPHAN - -->--> -->-- - / \ / \ - (freed empty stack) (TBB sees stack running on thread) (stack in limbo) - | \ / \ / | - | --<-- --<-- | - ^ RELEASE or ADOPT V - \ unwatch / - \ / - --------------------------<--------------------------- - RELEASE -*/ -struct __cilk_tbb_stack_op_thunk { - __cilk_tbb_pfn_stack_op routine; - void* data; /* Set by TBB */ -}; - -/* Thunk invoked by TBB when it is no longer interested in watching the stack bound to the current thread. */ -struct __cilk_tbb_unwatch_thunk { - __cilk_tbb_pfn_unwatch_stacks routine; - void* data; -}; - -/* Called by TBB, defined by Cilk. - Requests that callee invoke __cilk_tbb_stack_op_thunk when it orphans a stack. - Callee sets *u to a thunk that TBB should call when it is no longer interested in watching the stack. */ -CILK_EXPORT -__cilk_tbb_retcode __cilkrts_watch_stack(struct __cilk_tbb_unwatch_thunk* u, - struct __cilk_tbb_stack_op_thunk o); - -#ifdef __cplusplus -} -#endif /* __cplusplus */ - -#endif // CILK_TBB_INTEROP_H diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/concurrent_hash_map.cpp b/deal.II/bundled/tbb30_104oss/src/tbb/concurrent_hash_map.cpp deleted file mode 100644 index b37387dadd..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/concurrent_hash_map.cpp +++ /dev/null @@ -1,66 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#include "tbb/concurrent_hash_map.h" - -namespace tbb { - -namespace internal { -#if !TBB_NO_LEGACY -struct hash_map_segment_base { - typedef spin_rw_mutex segment_mutex_t; - //! Type of a hash code. - typedef size_t hashcode_t; - //! Log2 of n_segment - static const size_t n_segment_bits = 6; - //! Maximum size of array of chains - static const size_t max_physical_size = size_t(1)<<(8*sizeof(hashcode_t)-n_segment_bits); - //! Mutex that protects this segment - segment_mutex_t my_mutex; - // Number of nodes - atomic<size_t> my_logical_size; - // Size of chains - /** Always zero or a power of two */ - size_t my_physical_size; - //! True if my_logical_size>=my_physical_size. - /** Used to support Intel(R) Thread Checker. */ - bool __TBB_EXPORTED_METHOD internal_grow_predicate() const; -}; - -bool hash_map_segment_base::internal_grow_predicate() const { - // Intel(R) Thread Checker considers the following reads to be races, so we hide them in the - // library so that Intel(R) Thread Checker will ignore them. The reads are used in a double-check - // context, so the program is nonetheless correct despite the race. - return my_logical_size >= my_physical_size && my_physical_size < max_physical_size; -} -#endif//!TBB_NO_LEGACY - -} // namespace internal - -} // namespace tbb - diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/concurrent_monitor.cpp b/deal.II/bundled/tbb30_104oss/src/tbb/concurrent_monitor.cpp deleted file mode 100644 index 3cd5054240..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/concurrent_monitor.cpp +++ /dev/null @@ -1,109 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#include "concurrent_monitor.h" - -namespace tbb { -namespace internal { - -void concurrent_monitor::prepare_wait( thread_context& thr, void* ctx ) { - // this is good place to pump previous spurious wakeup - if( thr.spurious ) { - thr.spurious = false; - thr.sema.P(); - } - thr.context = ctx; - thr.in_waitset = true; - { - tbb::spin_mutex::scoped_lock l( mutex_ec ); - thr.epoch = epoch; - waitset_ec.add( (waitset_t::node_t*)&thr ); - } - __TBB_full_memory_fence(); -} - -void concurrent_monitor::cancel_wait( thread_context& thr ) { - // spurious wakeup will be pumped in the following prepare_wait() - thr.spurious = true; - // try to remove node from waitset - bool th_in_waitset = thr.in_waitset; - if( th_in_waitset ) { - tbb::spin_mutex::scoped_lock l( mutex_ec ); - if (thr.in_waitset) { - // successfully removed from waitset, - // so there will be no spurious wakeup - thr.in_waitset = false; - thr.spurious = false; - waitset_ec.remove( (waitset_t::node_t&)thr ); - } - } -} - -void concurrent_monitor::notify_one_relaxed() { - if( waitset_ec.size()==0 ) - return; - waitset_node_t* n; - const waitset_node_t* end = waitset_ec.end(); - { - tbb::spin_mutex::scoped_lock l( mutex_ec ); - epoch = epoch + 1; - n = waitset_ec.front(); - if( n!=end ) { - waitset_ec.remove( *n ); - to_thread_context(n)->in_waitset = false; - } - } - if( n!=end ) - to_thread_context(n)->sema.V(); -} - -void concurrent_monitor::notify_all_relaxed() { - if( waitset_ec.size()==0 ) - return; - dllist_t temp; - const waitset_node_t* end; - { - tbb::spin_mutex::scoped_lock l( mutex_ec ); - epoch = epoch + 1; - waitset_ec.flush_to( temp ); - end = temp.end(); - for( waitset_node_t* n=temp.front(); n!=end; n=n->next ) - to_thread_context(n)->in_waitset = false; - } - waitset_node_t* nxt; - for( waitset_node_t* n=temp.front(); n!=end; n=nxt ) { - nxt = n->next; - to_thread_context(n)->sema.V(); - } -#if TBB_USE_DEBUG - temp.clear(); -#endif -} - -} // namespace internal -} // namespace tbb diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/concurrent_monitor.h b/deal.II/bundled/tbb30_104oss/src/tbb/concurrent_monitor.h deleted file mode 100644 index 77ae9a347c..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/concurrent_monitor.h +++ /dev/null @@ -1,203 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_concurrent_monitor_H -#define __TBB_concurrent_monitor_H - -#include "tbb/tbb_stddef.h" -#include "tbb/atomic.h" -#include "tbb/spin_mutex.h" -#include "semaphore.h" - -namespace tbb { -namespace internal { - -//! Circular doubly-linked list with sentinel -/** head.next points to the front and head.prev points to the back */ -class circular_doubly_linked_list_with_sentinel : no_copy { -public: - struct node_t { - node_t* next; - node_t* prev; - node_t() : next(NULL), prev(NULL) {} - }; - - // ctor - circular_doubly_linked_list_with_sentinel() {clear();} - // dtor - ~circular_doubly_linked_list_with_sentinel() {__TBB_ASSERT( head.next==&head && head.prev==&head, "the list is not empty" );} - - inline size_t size() const {return count;} - inline bool empty() const {return size()==0;} - inline node_t* front() const {return head.next;} - inline node_t* last() const {return head.prev;} - inline node_t* begin() const {return front();} - inline const node_t* end() const {return &head;} - - //! add to the back of the list - inline void add( node_t* n ) { - count = count + 1; - n->prev = head.prev; - n->next = &head; - head.prev->next = n; - head.prev = n; - } - - //! remove node 'n' from the 'this' list - inline void remove( node_t& n ) { - count = count - 1; - n.prev->next = n.next; - n.next->prev = n.prev; - } - - //! move all elements to 'lst' and initiallize the 'this' list - inline void flush_to( circular_doubly_linked_list_with_sentinel& lst ) { - if( count>0 ) { - lst.count = count; - lst.head.next = head.next; - lst.head.prev = head.prev; - head.next->prev = &lst.head; - head.prev->next = &lst.head; - clear(); - } - } - -#if !TBB_USE_DEBUG -private: -#endif - atomic<size_t> count; - node_t head; - void clear() {count = 0; head.next = &head; head.prev = &head;} -}; - -typedef circular_doubly_linked_list_with_sentinel waitset_t; -typedef circular_doubly_linked_list_with_sentinel dllist_t; -typedef circular_doubly_linked_list_with_sentinel::node_t waitset_node_t; - -class concurrent_monitor; - -//! concurrent_monitor -/** fine-grained concurrent_monitor implementation */ -class concurrent_monitor : no_copy { -public: - /** per-thread descriptor for concurrent_monitor */ - class thread_context : waitset_node_t, no_copy { - friend class concurrent_monitor; - public: - thread_context() : spurious(false), context(NULL) {epoch = 0; in_waitset = false;} - ~thread_context() { if( spurious ) sema.P(); } - private: - semaphore sema; - tbb::atomic<unsigned> epoch; - tbb::atomic<bool> in_waitset; - bool spurious; - void* context; - }; - - //! ctor - concurrent_monitor() {epoch = 0;} - - //! prepare wait by inserting 'thr' into the wailt queue - void prepare_wait( thread_context& thr, void* ctx = 0 ); - - //! Commit wait if even count has not changed; otherwise, cancel wait. - /** Returns true of commited; false if canceled. */ - inline bool commit_wait( thread_context& thr ) { - bool do_it = thr.epoch==epoch; - // this check is just an optimization - if( do_it ) { - thr.sema.P(); - __TBB_ASSERT( !thr.in_waitset, "still in the queue?" ); - } else { - cancel_wait( thr ); - } - return do_it; - } - //! Cancel the wait. Removes the thread from the wait queue if not removed yet. - void cancel_wait( thread_context& thr ); - - //! Notify one thread about the event - void notify_one() {__TBB_full_memory_fence(); notify_one_relaxed();} - - //! Notify one thread about the event. Relaxed version. - void notify_one_relaxed(); - - //! Notify all waiting threads of the event - void notify_all() {__TBB_full_memory_fence(); notify_all_relaxed();} - - //! Notify all waiting threads of the event; Relaxed version - void notify_all_relaxed(); - - //! Notify waiting threads of the event that satisfies the given predicate - template<typename P> void notify( const P& predicate ) {__TBB_full_memory_fence(); notify_relaxed( predicate );} - - //! Notify waiting threads of the event that satisfies the given predicate; Relaxed version - template<typename P> void notify_relaxed( const P& predicate ); - -private: - tbb::spin_mutex mutex_ec; - waitset_t waitset_ec; - tbb::atomic<unsigned> epoch; - thread_context* to_thread_context( waitset_node_t* n ) { return static_cast<thread_context*>(n); } -}; - -template<typename P> -void concurrent_monitor::notify_relaxed( const P& predicate ) { - if( waitset_ec.size()==0 ) - return; - dllist_t temp; - waitset_node_t* nxt; - const waitset_node_t* end = waitset_ec.end(); - { - tbb::spin_mutex::scoped_lock l( mutex_ec ); - epoch = epoch + 1; - for( waitset_node_t* n=waitset_ec.last(); n!=end; n=nxt ) { - nxt = n->prev; - thread_context* thr = to_thread_context( n ); - if( predicate( thr->context ) ) { - waitset_ec.remove( *n ); - thr->in_waitset = false; - temp.add( n ); - } - } - } - - end = temp.end(); - for( waitset_node_t* n=temp.front(); n!=end; n=nxt ) { - nxt = n->next; - to_thread_context(n)->sema.V(); - } -#if TBB_USE_DEBUG - temp.clear(); -#endif -} - -} // namespace internal -} // namespace tbb - -#endif /* __TBB_concurrent_monitor_H */ diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/concurrent_queue.cpp b/deal.II/bundled/tbb30_104oss/src/tbb/concurrent_queue.cpp deleted file mode 100644 index 60896607f2..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/concurrent_queue.cpp +++ /dev/null @@ -1,613 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#include "tbb/tbb_stddef.h" -#include "tbb/tbb_machine.h" -#include "tbb/tbb_exception.h" -#include "tbb/_concurrent_queue_internal.h" -#include "concurrent_monitor.h" -#include "itt_notify.h" -#include <new> - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (push) - #pragma warning (disable: 4530) -#endif - -#include <cstring> // for memset() - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - #pragma warning (pop) -#endif - -using namespace std; - -#if defined(_MSC_VER) && defined(_Wp64) - // Workaround for overzealous compiler warnings in /Wp64 mode - #pragma warning (disable: 4267) -#endif - -#define RECORD_EVENTS 0 - - -namespace tbb { - -namespace internal { - -typedef concurrent_queue_base_v3 concurrent_queue_base; - -typedef size_t ticket; - -//! A queue using simple locking. -/** For efficient, this class has no constructor. - The caller is expected to zero-initialize it. */ -struct micro_queue { - typedef concurrent_queue_base::page page; - - friend class micro_queue_pop_finalizer; - - atomic<page*> head_page; - atomic<ticket> head_counter; - - atomic<page*> tail_page; - atomic<ticket> tail_counter; - - spin_mutex page_mutex; - - void push( const void* item, ticket k, concurrent_queue_base& base ); - - bool pop( void* dst, ticket k, concurrent_queue_base& base ); - - micro_queue& assign( const micro_queue& src, concurrent_queue_base& base ); - - page* make_copy ( concurrent_queue_base& base, const page* src_page, size_t begin_in_page, size_t end_in_page, ticket& g_index ) ; - - void make_invalid( ticket k ); -}; - -// we need to yank it out of micro_queue because of concurrent_queue_base::deallocate_page being virtual. -class micro_queue_pop_finalizer: no_copy { - typedef concurrent_queue_base::page page; - ticket my_ticket; - micro_queue& my_queue; - page* my_page; - concurrent_queue_base &base; -public: - micro_queue_pop_finalizer( micro_queue& queue, concurrent_queue_base& b, ticket k, page* p ) : - my_ticket(k), my_queue(queue), my_page(p), base(b) - {} - ~micro_queue_pop_finalizer() { - page* p = my_page; - if( p ) { - spin_mutex::scoped_lock lock( my_queue.page_mutex ); - page* q = p->next; - my_queue.head_page = q; - if( !q ) { - my_queue.tail_page = NULL; - } - } - my_queue.head_counter = my_ticket; - if( p ) - base.deallocate_page( p ); - } -}; - -struct predicate_leq { - ticket t; - predicate_leq( ticket t_ ) : t(t_) {} - bool operator() ( void* p ) const {return (ticket)p<=t;} -}; - -//! Internal representation of a ConcurrentQueue. -/** For efficient, this class has no constructor. - The caller is expected to zero-initialize it. */ -class concurrent_queue_rep { -public: -private: - friend struct micro_queue; - - //! Approximately n_queue/golden ratio - static const size_t phi = 3; - -public: - //! Must be power of 2 - static const size_t n_queue = 8; - - //! Map ticket to an array index - static size_t index( ticket k ) { - return k*phi%n_queue; - } - - atomic<ticket> head_counter; - concurrent_monitor items_avail; - atomic<size_t> n_invalid_entries; - char pad1[NFS_MaxLineSize-((sizeof(atomic<ticket>)+sizeof(concurrent_monitor)+sizeof(atomic<size_t>))&(NFS_MaxLineSize-1))]; - - atomic<ticket> tail_counter; - concurrent_monitor slots_avail; - char pad2[NFS_MaxLineSize-((sizeof(atomic<ticket>)+sizeof(concurrent_monitor))&(NFS_MaxLineSize-1))]; - micro_queue array[n_queue]; - - micro_queue& choose( ticket k ) { - // The formula here approximates LRU in a cache-oblivious way. - return array[index(k)]; - } - - //! Value for effective_capacity that denotes unbounded queue. - static const ptrdiff_t infinite_capacity = ptrdiff_t(~size_t(0)/2); -}; - -#if _MSC_VER && !defined(__INTEL_COMPILER) - // unary minus operator applied to unsigned type, result still unsigned - #pragma warning( push ) - #pragma warning( disable: 4146 ) -#endif - -static void* invalid_page; - -//------------------------------------------------------------------------ -// micro_queue -//------------------------------------------------------------------------ -void micro_queue::push( const void* item, ticket k, concurrent_queue_base& base ) { - k &= -concurrent_queue_rep::n_queue; - page* p = NULL; - size_t index = k/concurrent_queue_rep::n_queue & (base.items_per_page-1); - if( !index ) { - __TBB_TRY { - p = base.allocate_page(); - } __TBB_CATCH(...) { - ++base.my_rep->n_invalid_entries; - make_invalid( k ); - } - p->mask = 0; - p->next = NULL; - } - - if( tail_counter!=k ) { - atomic_backoff backoff; - do { - backoff.pause(); - // no memory. throws an exception; assumes concurrent_queue_rep::n_queue>1 - if( tail_counter&0x1 ) { - ++base.my_rep->n_invalid_entries; - throw_exception( eid_bad_last_alloc ); - } - } while( tail_counter!=k ) ; - } - - if( p ) { - spin_mutex::scoped_lock lock( page_mutex ); - if( page* q = tail_page ) - q->next = p; - else - head_page = p; - tail_page = p; - } else { - p = tail_page; - } - ITT_NOTIFY( sync_acquired, p ); - - __TBB_TRY { - base.copy_item( *p, index, item ); - ITT_NOTIFY( sync_releasing, p ); - // If no exception was thrown, mark item as present. - p->mask |= uintptr_t(1)<<index; - tail_counter += concurrent_queue_rep::n_queue; - } __TBB_CATCH(...) { - ++base.my_rep->n_invalid_entries; - tail_counter += concurrent_queue_rep::n_queue; - __TBB_RETHROW(); - } -} - -bool micro_queue::pop( void* dst, ticket k, concurrent_queue_base& base ) { - k &= -concurrent_queue_rep::n_queue; - spin_wait_until_eq( head_counter, k ); - spin_wait_while_eq( tail_counter, k ); - page& p = *head_page; - __TBB_ASSERT( &p, NULL ); - size_t index = k/concurrent_queue_rep::n_queue & (base.items_per_page-1); - bool success = false; - { - micro_queue_pop_finalizer finalizer( *this, base, k+concurrent_queue_rep::n_queue, index==base.items_per_page-1 ? &p : NULL ); - if( p.mask & uintptr_t(1)<<index ) { - success = true; - ITT_NOTIFY( sync_acquired, dst ); - ITT_NOTIFY( sync_acquired, head_page ); - base.assign_and_destroy_item( dst, p, index ); - ITT_NOTIFY( sync_releasing, head_page ); - } else { - --base.my_rep->n_invalid_entries; - } - } - return success; -} - -micro_queue& micro_queue::assign( const micro_queue& src, concurrent_queue_base& base ) -{ - head_counter = src.head_counter; - tail_counter = src.tail_counter; - page_mutex = src.page_mutex; - - const page* srcp = src.head_page; - if( srcp ) { - ticket g_index = head_counter; - __TBB_TRY { - size_t n_items = (tail_counter-head_counter)/concurrent_queue_rep::n_queue; - size_t index = head_counter/concurrent_queue_rep::n_queue & (base.items_per_page-1); - size_t end_in_first_page = (index+n_items<base.items_per_page)?(index+n_items):base.items_per_page; - - head_page = make_copy( base, srcp, index, end_in_first_page, g_index ); - page* cur_page = head_page; - - if( srcp != src.tail_page ) { - for( srcp = srcp->next; srcp!=src.tail_page; srcp=srcp->next ) { - cur_page->next = make_copy( base, srcp, 0, base.items_per_page, g_index ); - cur_page = cur_page->next; - } - - __TBB_ASSERT( srcp==src.tail_page, NULL ); - - size_t last_index = tail_counter/concurrent_queue_rep::n_queue & (base.items_per_page-1); - if( last_index==0 ) last_index = base.items_per_page; - - cur_page->next = make_copy( base, srcp, 0, last_index, g_index ); - cur_page = cur_page->next; - } - tail_page = cur_page; - } __TBB_CATCH(...) { - make_invalid( g_index ); - } - } else { - head_page = tail_page = NULL; - } - return *this; -} - -concurrent_queue_base::page* micro_queue::make_copy( concurrent_queue_base& base, const concurrent_queue_base::page* src_page, size_t begin_in_page, size_t end_in_page, ticket& g_index ) -{ - page* new_page = base.allocate_page(); - new_page->next = NULL; - new_page->mask = src_page->mask; - for( ; begin_in_page!=end_in_page; ++begin_in_page, ++g_index ) - if( new_page->mask & uintptr_t(1)<<begin_in_page ) - base.copy_page_item( *new_page, begin_in_page, *src_page, begin_in_page ); - return new_page; -} - -void micro_queue::make_invalid( ticket k ) -{ - static concurrent_queue_base::page dummy = {static_cast<page*>((void*)1), 0}; - // mark it so that no more pushes are allowed. - invalid_page = &dummy; - { - spin_mutex::scoped_lock lock( page_mutex ); - tail_counter = k+concurrent_queue_rep::n_queue+1; - if( page* q = tail_page ) - q->next = static_cast<page*>(invalid_page); - else - head_page = static_cast<page*>(invalid_page); - tail_page = static_cast<page*>(invalid_page); - } - __TBB_RETHROW(); -} - -#if _MSC_VER && !defined(__INTEL_COMPILER) - #pragma warning( pop ) -#endif // warning 4146 is back - -//------------------------------------------------------------------------ -// concurrent_queue_base -//------------------------------------------------------------------------ -concurrent_queue_base_v3::concurrent_queue_base_v3( size_t item_size ) { - items_per_page = item_size<=8 ? 32 : - item_size<=16 ? 16 : - item_size<=32 ? 8 : - item_size<=64 ? 4 : - item_size<=128 ? 2 : - 1; - my_capacity = size_t(-1)/(item_size>1 ? item_size : 2); - my_rep = cache_aligned_allocator<concurrent_queue_rep>().allocate(1); - __TBB_ASSERT( (size_t)my_rep % NFS_GetLineSize()==0, "alignment error" ); - __TBB_ASSERT( (size_t)&my_rep->head_counter % NFS_GetLineSize()==0, "alignment error" ); - __TBB_ASSERT( (size_t)&my_rep->tail_counter % NFS_GetLineSize()==0, "alignment error" ); - __TBB_ASSERT( (size_t)&my_rep->array % NFS_GetLineSize()==0, "alignment error" ); - memset(my_rep,0,sizeof(concurrent_queue_rep)); - new ( &my_rep->items_avail ) concurrent_monitor(); - new ( &my_rep->slots_avail ) concurrent_monitor(); - this->item_size = item_size; -} - -concurrent_queue_base_v3::~concurrent_queue_base_v3() { - size_t nq = my_rep->n_queue; - for( size_t i=0; i<nq; i++ ) - __TBB_ASSERT( my_rep->array[i].tail_page==NULL, "pages were not freed properly" ); - cache_aligned_allocator<concurrent_queue_rep>().deallocate(my_rep,1); -} - -void concurrent_queue_base_v3::internal_push( const void* src ) { - concurrent_queue_rep& r = *my_rep; - ticket k = r.tail_counter++; - ptrdiff_t e = my_capacity; - atomic_backoff backoff; - concurrent_monitor::thread_context thr_ctx; -#if DO_ITT_NOTIFY - bool sync_prepare_done = false; -#endif - while( (ptrdiff_t)(k-r.head_counter)>=e ) { -#if DO_ITT_NOTIFY - if( !sync_prepare_done ) { - ITT_NOTIFY( sync_prepare, &sync_prepare_done ); - sync_prepare_done = true; - } -#endif - if( !backoff.bounded_pause() ) { - bool slept = false; - r.slots_avail.prepare_wait( thr_ctx, (void*) ((ptrdiff_t)(k-e)) ); - while( (ptrdiff_t)(k-r.head_counter)>=const_cast<volatile ptrdiff_t&>(e = my_capacity) ) { - if( (slept = r.slots_avail.commit_wait( thr_ctx ) )==true ) - break; - r.slots_avail.prepare_wait( thr_ctx, (void*) ((ptrdiff_t)(k-e)) ); - } - if( !slept ) - r.slots_avail.cancel_wait( thr_ctx ); - break; - } - e = const_cast<volatile ptrdiff_t&>(my_capacity); - } - ITT_NOTIFY( sync_acquired, &sync_prepare_done ); - r.choose( k ).push( src, k, *this ); - r.items_avail.notify( predicate_leq(k) ); -} - -void concurrent_queue_base_v3::internal_pop( void* dst ) { - concurrent_queue_rep& r = *my_rep; - ticket k; - atomic_backoff backoff; - concurrent_monitor::thread_context thr_ctx; -#if DO_ITT_NOTIFY - bool sync_prepare_done = false; -#endif - do { - k=r.head_counter++; - while( r.tail_counter<=k ) { -#if DO_ITT_NOTIFY - if( !sync_prepare_done ) { - ITT_NOTIFY( sync_prepare, dst ); - sync_prepare_done = true; - } -#endif - // Queue is empty; pause and re-try a few times - if( !backoff.bounded_pause() ) { - bool slept = false; - r.items_avail.prepare_wait( thr_ctx, (void*)k ); - while( r.tail_counter<=k ) { - if( (slept = r.items_avail.commit_wait( thr_ctx ) )==true ) - break; - r.items_avail.prepare_wait( thr_ctx, (void*)k ); - } - if( !slept ) - r.items_avail.cancel_wait( thr_ctx ); - break; // break from inner while - } - } // break to here - } while( !r.choose(k).pop(dst,k,*this) ); - - // wake up a producer.. - r.slots_avail.notify( predicate_leq(k) ); -} - -bool concurrent_queue_base_v3::internal_pop_if_present( void* dst ) { - concurrent_queue_rep& r = *my_rep; - ticket k; - do { - k = r.head_counter; - for(;;) { - if( r.tail_counter<=k ) { - // Queue is empty - return false; - } - // Queue had item with ticket k when we looked. Attempt to get that item. - ticket tk=k; - k = r.head_counter.compare_and_swap( tk+1, tk ); - if( k==tk ) - break; - // Another thread snatched the item, retry. - } - } while( !r.choose( k ).pop( dst, k, *this ) ); - - r.slots_avail.notify( predicate_leq(k) ); - - return true; -} - -bool concurrent_queue_base_v3::internal_push_if_not_full( const void* src ) { - concurrent_queue_rep& r = *my_rep; - ticket k = r.tail_counter; - for(;;) { - if( (ptrdiff_t)(k-r.head_counter)>=my_capacity ) { - // Queue is full - return false; - } - // Queue had empty slot with ticket k when we looked. Attempt to claim that slot. - ticket tk=k; - k = r.tail_counter.compare_and_swap( tk+1, tk ); - if( k==tk ) - break; - // Another thread claimed the slot, so retry. - } - r.choose(k).push(src,k,*this); - - r.items_avail.notify( predicate_leq(k) ); - return true; -} - -ptrdiff_t concurrent_queue_base_v3::internal_size() const { - __TBB_ASSERT( sizeof(ptrdiff_t)<=sizeof(size_t), NULL ); - return ptrdiff_t(my_rep->tail_counter-my_rep->head_counter-my_rep->n_invalid_entries); -} - -bool concurrent_queue_base_v3::internal_empty() const { - ticket tc = my_rep->tail_counter; - ticket hc = my_rep->head_counter; - // if tc!=r.tail_counter, the queue was not empty at some point between the two reads. - return ( tc==my_rep->tail_counter && ptrdiff_t(tc-hc-my_rep->n_invalid_entries)<=0 ); -} - -void concurrent_queue_base_v3::internal_set_capacity( ptrdiff_t capacity, size_t /*item_size*/ ) { - my_capacity = capacity<0 ? concurrent_queue_rep::infinite_capacity : capacity; -} - -void concurrent_queue_base_v3::internal_finish_clear() { - size_t nq = my_rep->n_queue; - for( size_t i=0; i<nq; ++i ) { - page* tp = my_rep->array[i].tail_page; - __TBB_ASSERT( my_rep->array[i].head_page==tp, "at most one page should remain" ); - if( tp!=NULL) { - if( tp!=invalid_page ) deallocate_page( tp ); - my_rep->array[i].tail_page = NULL; - } - } -} - -void concurrent_queue_base_v3::internal_throw_exception() const { - throw_exception( eid_bad_alloc ); -} - -void concurrent_queue_base_v3::assign( const concurrent_queue_base& src ) { - items_per_page = src.items_per_page; - my_capacity = src.my_capacity; - - // copy concurrent_queue_rep. - my_rep->head_counter = src.my_rep->head_counter; - my_rep->tail_counter = src.my_rep->tail_counter; - my_rep->n_invalid_entries = src.my_rep->n_invalid_entries; - - // copy micro_queues - for( size_t i = 0; i<my_rep->n_queue; ++i ) - my_rep->array[i].assign( src.my_rep->array[i], *this); - - __TBB_ASSERT( my_rep->head_counter==src.my_rep->head_counter && my_rep->tail_counter==src.my_rep->tail_counter, - "the source concurrent queue should not be concurrently modified." ); -} - -//------------------------------------------------------------------------ -// concurrent_queue_iterator_rep -//------------------------------------------------------------------------ -class concurrent_queue_iterator_rep: no_assign { -public: - ticket head_counter; - const concurrent_queue_base& my_queue; - const size_t offset_of_last; - concurrent_queue_base::page* array[concurrent_queue_rep::n_queue]; - concurrent_queue_iterator_rep( const concurrent_queue_base& queue, size_t offset_of_last_ ) : - head_counter(queue.my_rep->head_counter), - my_queue(queue), - offset_of_last(offset_of_last_) - { - const concurrent_queue_rep& rep = *queue.my_rep; - for( size_t k=0; k<concurrent_queue_rep::n_queue; ++k ) - array[k] = rep.array[k].head_page; - } - //! Set item to point to kth element. Return true if at end of queue or item is marked valid; false otherwise. - bool get_item( void*& item, size_t k ) { - if( k==my_queue.my_rep->tail_counter ) { - item = NULL; - return true; - } else { - concurrent_queue_base::page* p = array[concurrent_queue_rep::index(k)]; - __TBB_ASSERT(p,NULL); - size_t i = k/concurrent_queue_rep::n_queue & (my_queue.items_per_page-1); - item = static_cast<unsigned char*>(static_cast<void*>(p)) + offset_of_last + my_queue.item_size*i; - return (p->mask & uintptr_t(1)<<i)!=0; - } - } -}; - -//------------------------------------------------------------------------ -// concurrent_queue_iterator_base -//------------------------------------------------------------------------ - -void concurrent_queue_iterator_base_v3::initialize( const concurrent_queue_base& queue, size_t offset_of_last ) { - my_rep = cache_aligned_allocator<concurrent_queue_iterator_rep>().allocate(1); - new( my_rep ) concurrent_queue_iterator_rep(queue,offset_of_last); - size_t k = my_rep->head_counter; - if( !my_rep->get_item(my_item, k) ) advance(); -} - -concurrent_queue_iterator_base_v3::concurrent_queue_iterator_base_v3( const concurrent_queue_base& queue ) { - initialize(queue,0); -} - -concurrent_queue_iterator_base_v3::concurrent_queue_iterator_base_v3( const concurrent_queue_base& queue, size_t offset_of_last ) { - initialize(queue,offset_of_last); -} - -void concurrent_queue_iterator_base_v3::assign( const concurrent_queue_iterator_base& other ) { - if( my_rep!=other.my_rep ) { - if( my_rep ) { - cache_aligned_allocator<concurrent_queue_iterator_rep>().deallocate(my_rep, 1); - my_rep = NULL; - } - if( other.my_rep ) { - my_rep = cache_aligned_allocator<concurrent_queue_iterator_rep>().allocate(1); - new( my_rep ) concurrent_queue_iterator_rep( *other.my_rep ); - } - } - my_item = other.my_item; -} - -void concurrent_queue_iterator_base_v3::advance() { - __TBB_ASSERT( my_item, "attempt to increment iterator past end of queue" ); - size_t k = my_rep->head_counter; - const concurrent_queue_base& queue = my_rep->my_queue; -#if TBB_USE_ASSERT - void* tmp; - my_rep->get_item(tmp,k); - __TBB_ASSERT( my_item==tmp, NULL ); -#endif /* TBB_USE_ASSERT */ - size_t i = k/concurrent_queue_rep::n_queue & (queue.items_per_page-1); - if( i==queue.items_per_page-1 ) { - concurrent_queue_base::page*& root = my_rep->array[concurrent_queue_rep::index(k)]; - root = root->next; - } - // advance k - my_rep->head_counter = ++k; - if( !my_rep->get_item(my_item, k) ) advance(); -} - -concurrent_queue_iterator_base_v3::~concurrent_queue_iterator_base_v3() { - //delete my_rep; - cache_aligned_allocator<concurrent_queue_iterator_rep>().deallocate(my_rep, 1); - my_rep = NULL; -} - -} // namespace internal - -} // namespace tbb diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/concurrent_vector.cpp b/deal.II/bundled/tbb30_104oss/src/tbb/concurrent_vector.cpp deleted file mode 100644 index aa90c3f373..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/concurrent_vector.cpp +++ /dev/null @@ -1,603 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#include "tbb/concurrent_vector.h" -#include "tbb/cache_aligned_allocator.h" -#include "tbb/tbb_exception.h" -#include "tbb_misc.h" -#include "itt_notify.h" - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (push) - #pragma warning (disable: 4530) -#endif - -#include <cstring> - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - #pragma warning (pop) -#endif - -#if defined(_MSC_VER) && defined(_Wp64) - // Workaround for overzealous compiler warnings in /Wp64 mode - #pragma warning (disable: 4267) -#endif - -using namespace std; - -namespace tbb { - -namespace internal { - class concurrent_vector_base_v3::helper :no_assign { -public: - //! memory page size - static const size_type page_size = 4096; - - inline static bool incompact_predicate(size_type size) { // assert size != 0, see source/test/test_vector_layout.cpp - return size < page_size || ((size-1)%page_size < page_size/2 && size < page_size * 128); // for more details - } - - inline static size_type find_segment_end(const concurrent_vector_base_v3 &v) { - segment_t *s = v.my_segment; - segment_index_t u = s==v.my_storage? pointers_per_short_table : pointers_per_long_table; - segment_index_t k = 0; - while( k < u && s[k].array > internal::vector_allocation_error_flag ) - ++k; - return k; - } - - // TODO: optimize accesses to my_first_block - //! assign first segment size. k - is index of last segment to be allocated, not a count of segments - inline static void assign_first_segment_if_neccessary(concurrent_vector_base_v3 &v, segment_index_t k) { - if( !v.my_first_block ) { - /* There was a suggestion to set first segment according to incompact_predicate: - while( k && !helper::incompact_predicate(segment_size( k ) * element_size) ) - --k; // while previous vector size is compact, decrement - // reasons to not do it: - // * constructor(n) is not ready to accept fragmented segments - // * backward compatibility due to that constructor - // * current version gives additional guarantee and faster init. - // * two calls to reserve() will give the same effect. - */ - v.my_first_block.compare_and_swap(k+1, 0); // store number of segments - } - } - - inline static void *allocate_segment(concurrent_vector_base_v3 &v, size_type n) { - void *ptr = v.vector_allocator_ptr(v, n); - if(!ptr) throw_exception(eid_bad_alloc); // check for bad allocation, throw exception - return ptr; - } - - //! Publish segment so other threads can see it. - inline static void publish_segment( segment_t& s, void* rhs ) { - // see also itt_store_pointer_with_release_v3() - ITT_NOTIFY( sync_releasing, &s.array ); - __TBB_store_with_release( s.array, rhs ); - } - - static size_type enable_segment(concurrent_vector_base_v3 &v, size_type k, size_type element_size); - - // TODO: rename as get_segments_table() and return segment pointer - inline static void extend_table_if_necessary(concurrent_vector_base_v3 &v, size_type k, size_type start ) { - if(k >= pointers_per_short_table && v.my_segment == v.my_storage) - extend_segment_table(v, start ); - } - - static void extend_segment_table(concurrent_vector_base_v3 &v, size_type start); - - inline static segment_t &acquire_segment(concurrent_vector_base_v3 &v, size_type index, size_type element_size, bool owner) { - segment_t &s = v.my_segment[index]; // TODO: pass v.my_segment as arument - if( !__TBB_load_with_acquire(s.array) ) { // do not check for internal::vector_allocation_error_flag - if( owner ) { - enable_segment( v, index, element_size ); - } else { - ITT_NOTIFY(sync_prepare, &s.array); - spin_wait_while_eq( s.array, (void*)0 ); - ITT_NOTIFY(sync_acquired, &s.array); - } - } else { - ITT_NOTIFY(sync_acquired, &s.array); - } - if( s.array <= internal::vector_allocation_error_flag ) // check for internal::vector_allocation_error_flag - throw_exception(eid_bad_last_alloc); // throw custom exception, because it's hard to recover after internal::vector_allocation_error_flag correctly - return s; - } - - ///// non-static fields of helper for exception-safe iteration across segments - segment_t *table;// TODO: review all segment_index_t as just short type - size_type first_block, k, sz, start, finish, element_size; - helper(segment_t *segments, size_type fb, size_type esize, size_type index, size_type s, size_type f) throw() - : table(segments), first_block(fb), k(index), sz(0), start(s), finish(f), element_size(esize) {} - inline void first_segment() throw() { - __TBB_ASSERT( start <= finish, NULL ); - __TBB_ASSERT( first_block || !finish, NULL ); - if( k < first_block ) k = 0; // process solid segment at a time - size_type base = segment_base( k ); - __TBB_ASSERT( base <= start, NULL ); - finish -= base; start -= base; // rebase as offsets from segment k - sz = k ? base : segment_size( first_block ); // sz==base for k>0 - } - inline void next_segment() throw() { - finish -= sz; start = 0; // offsets from next segment - if( !k ) k = first_block; - else { ++k; sz <<= 1; } - } - template<typename F> - inline size_type apply(const F &func) { - first_segment(); - while( sz < finish ) { // work for more than one segment - func( table[k], static_cast<char*>(table[k].array)+element_size*start, sz-start ); - next_segment(); - } - func( table[k], static_cast<char*>(table[k].array)+element_size*start, finish-start ); - return k; - } - inline void *get_segment_ptr(size_type index, bool wait) { - segment_t &s = table[index]; - if( !__TBB_load_with_acquire(s.array) && wait ) { - ITT_NOTIFY(sync_prepare, &s.array); - spin_wait_while_eq( s.array, (void*)0 ); - ITT_NOTIFY(sync_acquired, &s.array); - } - return s.array; - } - ~helper() { - if( sz >= finish ) return; // the work is done correctly - cleanup(); - } - - //! Out of line code to assists destructor in infrequent cases. - void cleanup(); - - /// TODO: turn into lambda functions when available - struct init_body { - internal_array_op2 func; - const void *arg; - init_body(internal_array_op2 init, const void *src) : func(init), arg(src) {} - void operator()(segment_t &, void *begin, size_type n) const { - func( begin, arg, n ); - } - }; - struct safe_init_body { - internal_array_op2 func; - const void *arg; - safe_init_body(internal_array_op2 init, const void *src) : func(init), arg(src) {} - void operator()(segment_t &s, void *begin, size_type n) const { - if( s.array <= internal::vector_allocation_error_flag ) - throw_exception(eid_bad_last_alloc); // throw custom exception - func( begin, arg, n ); - } - }; - struct destroy_body { - internal_array_op1 func; - destroy_body(internal_array_op1 destroy) : func(destroy) {} - void operator()(segment_t &s, void *begin, size_type n) const { - if( s.array > internal::vector_allocation_error_flag ) - func( begin, n ); - } - }; -}; - -void concurrent_vector_base_v3::helper::extend_segment_table(concurrent_vector_base_v3 &v, concurrent_vector_base_v3::size_type start) { - if( start > segment_size(pointers_per_short_table) ) start = segment_size(pointers_per_short_table); - // If other threads are trying to set pointers in the short segment, wait for them to finish their - // assigments before we copy the short segment to the long segment. Note: grow_to_at_least depends on it - for( segment_index_t i = 0; segment_base(i) < start && v.my_segment == v.my_storage; i++ ) - if(!v.my_storage[i].array) { - ITT_NOTIFY(sync_prepare, &v.my_storage[i].array); - atomic_backoff backoff; - do backoff.pause(); while( v.my_segment == v.my_storage && !v.my_storage[i].array ); - ITT_NOTIFY(sync_acquired, &v.my_storage[i].array); - } - if( v.my_segment != v.my_storage ) return; - - segment_t* s = (segment_t*)NFS_Allocate( pointers_per_long_table, sizeof(segment_t), NULL ); - // No need to check !s here, because NFS_Allocate throws exception if it cannot allocate the requested storage. - memset( s, 0, pointers_per_long_table*sizeof(segment_t) ); - for( segment_index_t i = 0; i < pointers_per_short_table; i++) - s[i] = v.my_storage[i]; - if( v.my_segment.compare_and_swap( s, v.my_storage ) != v.my_storage ) - NFS_Free( s ); - // else TODO: add ITT_NOTIFY signals for v.my_segment? -} - -concurrent_vector_base_v3::size_type concurrent_vector_base_v3::helper::enable_segment(concurrent_vector_base_v3 &v, concurrent_vector_base_v3::size_type k, concurrent_vector_base_v3::size_type element_size) { - segment_t* s = v.my_segment; // TODO: optimize out as argument? Optimize accesses to my_first_block - __TBB_ASSERT( s[k].array <= internal::vector_allocation_error_flag, "concurrent operation during growth?" ); - if( !k ) { - assign_first_segment_if_neccessary(v, default_initial_segments-1); - __TBB_TRY { - publish_segment(s[0], allocate_segment(v, segment_size(v.my_first_block) ) ); - } __TBB_CATCH(...) { // intercept exception here, assign internal::vector_allocation_error_flag value, re-throw exception - publish_segment(s[0], internal::vector_allocation_error_flag); - __TBB_RETHROW(); - } - return 2; - } - size_type m = segment_size(k); - if( !v.my_first_block ) // push_back only - spin_wait_while_eq( v.my_first_block, segment_index_t(0) ); - if( k < v.my_first_block ) { - // s[0].array is changed only once ( 0 -> !0 ) and points to uninitialized memory - void *array0 = __TBB_load_with_acquire(s[0].array); - if( !array0 ) { - // sync_prepare called only if there is a wait - ITT_NOTIFY(sync_prepare, &s[0].array ); - spin_wait_while_eq( s[0].array, (void*)0 ); - array0 = __TBB_load_with_acquire(s[0].array); - } - ITT_NOTIFY(sync_acquired, &s[0].array); - if( array0 <= internal::vector_allocation_error_flag ) { // check for internal::vector_allocation_error_flag of initial segment - publish_segment(s[k], internal::vector_allocation_error_flag); // and assign internal::vector_allocation_error_flag here - throw_exception(eid_bad_last_alloc); // throw custom exception - } - publish_segment( s[k], - static_cast<void*>( static_cast<char*>(array0) + segment_base(k)*element_size ) - ); - } else { - __TBB_TRY { - publish_segment(s[k], allocate_segment(v, m)); - } __TBB_CATCH(...) { // intercept exception here, assign internal::vector_allocation_error_flag value, re-throw exception - publish_segment(s[k], internal::vector_allocation_error_flag); - __TBB_RETHROW(); - } - } - return m; -} - -void concurrent_vector_base_v3::helper::cleanup() { - if( !sz ) { // allocation failed, restore the table - segment_index_t k_start = k, k_end = segment_index_of(finish-1); - if( segment_base( k_start ) < start ) - get_segment_ptr(k_start++, true); // wait - if( k_start < first_block ) { - void *array0 = get_segment_ptr(0, start>0); // wait if necessary - if( array0 && !k_start ) ++k_start; - if( array0 <= internal::vector_allocation_error_flag ) - for(; k_start < first_block && k_start <= k_end; ++k_start ) - publish_segment(table[k_start], internal::vector_allocation_error_flag); - else for(; k_start < first_block && k_start <= k_end; ++k_start ) - publish_segment(table[k_start], static_cast<void*>( - static_cast<char*>(array0) + segment_base(k_start)*element_size) ); - } - for(; k_start <= k_end; ++k_start ) // not in first block - if( !__TBB_load_with_acquire(table[k_start].array) ) - publish_segment(table[k_start], internal::vector_allocation_error_flag); - // fill alocated items - first_segment(); - goto recover; - } - while( sz <= finish ) { // there is still work for at least one segment - next_segment(); -recover: - void *array = table[k].array; - if( array > internal::vector_allocation_error_flag ) - std::memset( static_cast<char*>(array)+element_size*start, 0, ((sz<finish?sz:finish) - start)*element_size ); - else __TBB_ASSERT( array == internal::vector_allocation_error_flag, NULL ); - } -} - -concurrent_vector_base_v3::~concurrent_vector_base_v3() { - segment_t* s = my_segment; - if( s != my_storage ) { - // Clear short segment. - for( segment_index_t i = 0; i < pointers_per_short_table; i++) - my_storage[i].array = NULL; -#if TBB_USE_DEBUG - for( segment_index_t i = 0; i < pointers_per_long_table; i++) - __TBB_ASSERT( my_segment[i].array <= internal::vector_allocation_error_flag, "Segment should have been freed. Please recompile with new TBB before using exceptions."); -#endif - my_segment = my_storage; - NFS_Free( s ); - } -} - -concurrent_vector_base_v3::size_type concurrent_vector_base_v3::internal_capacity() const { - return segment_base( helper::find_segment_end(*this) ); -} - -void concurrent_vector_base_v3::internal_throw_exception(size_type t) const { - switch(t) { - case 0: throw_exception(eid_out_of_range); - case 1: throw_exception(eid_segment_range_error); - case 2: throw_exception(eid_index_range_error); - } -} - -void concurrent_vector_base_v3::internal_reserve( size_type n, size_type element_size, size_type max_size ) { - if( n>max_size ) - throw_exception(eid_reservation_length_error); - __TBB_ASSERT( n, NULL ); - helper::assign_first_segment_if_neccessary(*this, segment_index_of(n-1)); - segment_index_t k = helper::find_segment_end(*this); - __TBB_TRY { - for( ; segment_base(k)<n; ++k ) { - helper::extend_table_if_necessary(*this, k, 0); - if(my_segment[k].array <= internal::vector_allocation_error_flag) - helper::enable_segment(*this, k, element_size); - } - } __TBB_CATCH(...) { - my_segment[k].array = NULL; - __TBB_RETHROW(); // repair and rethrow - } -} - -void concurrent_vector_base_v3::internal_copy( const concurrent_vector_base_v3& src, size_type element_size, internal_array_op2 copy ) { - size_type n = src.my_early_size; - __TBB_ASSERT( my_segment == my_storage, NULL); - if( n ) { - helper::assign_first_segment_if_neccessary(*this, segment_index_of(n-1)); - size_type b; - for( segment_index_t k=0; (b=segment_base(k))<n; ++k ) { - if( (src.my_segment == (segment_t*)src.my_storage && k >= pointers_per_short_table) - || src.my_segment[k].array <= internal::vector_allocation_error_flag ) { - my_early_size = b; break; - } - helper::extend_table_if_necessary(*this, k, 0); - size_type m = helper::enable_segment(*this, k, element_size); - if( m > n-b ) m = n-b; - my_early_size = b+m; - copy( my_segment[k].array, src.my_segment[k].array, m ); - } - } -} - -void concurrent_vector_base_v3::internal_assign( const concurrent_vector_base_v3& src, size_type element_size, internal_array_op1 destroy, internal_array_op2 assign, internal_array_op2 copy ) { - size_type n = src.my_early_size; - while( my_early_size>n ) { // TODO: improve - segment_index_t k = segment_index_of( my_early_size-1 ); - size_type b=segment_base(k); - size_type new_end = b>=n ? b : n; - __TBB_ASSERT( my_early_size>new_end, NULL ); - if( my_segment[k].array <= internal::vector_allocation_error_flag) // check vector was broken before - throw_exception(eid_bad_last_alloc); // throw custom exception - // destructors are supposed to not throw any exceptions - destroy( (char*)my_segment[k].array+element_size*(new_end-b), my_early_size-new_end ); - my_early_size = new_end; - } - size_type dst_initialized_size = my_early_size; - my_early_size = n; - helper::assign_first_segment_if_neccessary(*this, segment_index_of(n)); - size_type b; - for( segment_index_t k=0; (b=segment_base(k))<n; ++k ) { - if( (src.my_segment == (segment_t*)src.my_storage && k >= pointers_per_short_table) - || src.my_segment[k].array <= internal::vector_allocation_error_flag ) { // if source is damaged - my_early_size = b; break; // TODO: it may cause undestructed items - } - helper::extend_table_if_necessary(*this, k, 0); - if( !my_segment[k].array ) - helper::enable_segment(*this, k, element_size); - else if( my_segment[k].array <= internal::vector_allocation_error_flag ) - throw_exception(eid_bad_last_alloc); // throw custom exception - size_type m = k? segment_size(k) : 2; - if( m > n-b ) m = n-b; - size_type a = 0; - if( dst_initialized_size>b ) { - a = dst_initialized_size-b; - if( a>m ) a = m; - assign( my_segment[k].array, src.my_segment[k].array, a ); - m -= a; - a *= element_size; - } - if( m>0 ) - copy( (char*)my_segment[k].array+a, (char*)src.my_segment[k].array+a, m ); - } - __TBB_ASSERT( src.my_early_size==n, "detected use of concurrent_vector::operator= with right side that was concurrently modified" ); -} - -void* concurrent_vector_base_v3::internal_push_back( size_type element_size, size_type& index ) { - __TBB_ASSERT( sizeof(my_early_size)==sizeof(uintptr_t), NULL ); - size_type tmp = __TBB_FetchAndIncrementWacquire(&my_early_size); - index = tmp; - segment_index_t k_old = segment_index_of( tmp ); - size_type base = segment_base(k_old); - helper::extend_table_if_necessary(*this, k_old, tmp); - segment_t& s = helper::acquire_segment(*this, k_old, element_size, base==tmp); - size_type j_begin = tmp-base; - return (void*)((char*)s.array+element_size*j_begin); -} - -void concurrent_vector_base_v3::internal_grow_to_at_least( size_type new_size, size_type element_size, internal_array_op2 init, const void *src ) { - internal_grow_to_at_least_with_result( new_size, element_size, init, src ); -} - -concurrent_vector_base_v3::size_type concurrent_vector_base_v3::internal_grow_to_at_least_with_result( size_type new_size, size_type element_size, internal_array_op2 init, const void *src ) { - size_type e = my_early_size; - while( e<new_size ) { - size_type f = my_early_size.compare_and_swap(new_size,e); - if( f==e ) { - internal_grow( e, new_size, element_size, init, src ); - break; - } - e = f; - } - // Check/wait for segments allocation completes - segment_index_t i, k_old = segment_index_of( new_size-1 ); - if( k_old >= pointers_per_short_table && my_segment == my_storage ) { - spin_wait_while_eq( my_segment, my_storage ); - } - for( i = 0; i <= k_old; ++i ) { - segment_t &s = my_segment[i]; - if(!s.array) { - ITT_NOTIFY(sync_prepare, &s.array); - atomic_backoff backoff; - do backoff.pause(); - while( !__TBB_load_with_acquire(my_segment[i].array) ); // my_segment may change concurrently - ITT_NOTIFY(sync_acquired, &s.array); - } - if( my_segment[i].array <= internal::vector_allocation_error_flag ) - throw_exception(eid_bad_last_alloc); - } -#if TBB_USE_DEBUG - size_type capacity = internal_capacity(); - __TBB_ASSERT( capacity >= new_size, NULL); -#endif - return e; -} - -concurrent_vector_base_v3::size_type concurrent_vector_base_v3::internal_grow_by( size_type delta, size_type element_size, internal_array_op2 init, const void *src ) { - size_type result = my_early_size.fetch_and_add(delta); - internal_grow( result, result+delta, element_size, init, src ); - return result; -} - -void concurrent_vector_base_v3::internal_grow( const size_type start, size_type finish, size_type element_size, internal_array_op2 init, const void *src ) { - __TBB_ASSERT( start<finish, "start must be less than finish" ); - segment_index_t k_start = segment_index_of(start), k_end = segment_index_of(finish-1); - helper::assign_first_segment_if_neccessary(*this, k_end); - helper::extend_table_if_necessary(*this, k_end, start); - helper range(my_segment, my_first_block, element_size, k_start, start, finish); - for(; k_end > k_start && k_end >= range.first_block; --k_end ) // allocate segments in reverse order - helper::acquire_segment(*this, k_end, element_size, true/*for k_end>k_start*/); - for(; k_start <= k_end; ++k_start ) // but allocate first block in straight order - helper::acquire_segment(*this, k_start, element_size, segment_base( k_start ) >= start ); - range.apply( helper::init_body(init, src) ); -} - -void concurrent_vector_base_v3::internal_resize( size_type n, size_type element_size, size_type max_size, const void *src, - internal_array_op1 destroy, internal_array_op2 init ) { - size_type j = my_early_size; - if( n > j ) { // construct items - internal_reserve(n, element_size, max_size); - my_early_size = n; - helper for_each(my_segment, my_first_block, element_size, segment_index_of(j), j, n); - for_each.apply( helper::safe_init_body(init, src) ); - } else { - my_early_size = n; - helper for_each(my_segment, my_first_block, element_size, segment_index_of(n), n, j); - for_each.apply( helper::destroy_body(destroy) ); - } -} - -concurrent_vector_base_v3::segment_index_t concurrent_vector_base_v3::internal_clear( internal_array_op1 destroy ) { - __TBB_ASSERT( my_segment, NULL ); - size_type j = my_early_size; - my_early_size = 0; - helper for_each(my_segment, my_first_block, 0, 0, 0, j); // element_size is safe to be zero if 'start' is zero - j = for_each.apply( helper::destroy_body(destroy) ); - size_type i = helper::find_segment_end(*this); - return j < i? i : j+1; -} - -void *concurrent_vector_base_v3::internal_compact( size_type element_size, void *table, internal_array_op1 destroy, internal_array_op2 copy ) -{ - const size_type my_size = my_early_size; - const segment_index_t k_end = helper::find_segment_end(*this); // allocated segments - const segment_index_t k_stop = my_size? segment_index_of(my_size-1) + 1 : 0; // number of segments to store existing items: 0=>0; 1,2=>1; 3,4=>2; [5-8]=>3;.. - const segment_index_t first_block = my_first_block; // number of merged segments, getting values from atomics - - segment_index_t k = first_block; - if(k_stop < first_block) - k = k_stop; - else - while (k < k_stop && helper::incompact_predicate(segment_size( k ) * element_size) ) k++; - if(k_stop == k_end && k == first_block) - return NULL; - - segment_t *const segment_table = my_segment; - internal_segments_table &old = *static_cast<internal_segments_table*>( table ); - memset(&old, 0, sizeof(old)); - - if ( k != first_block && k ) // first segment optimization - { - // exception can occur here - void *seg = old.table[0] = helper::allocate_segment( *this, segment_size(k) ); - old.first_block = k; // fill info for freeing new segment if exception occurs - // copy items to the new segment - size_type my_segment_size = segment_size( first_block ); - for (segment_index_t i = 0, j = 0; i < k && j < my_size; j = my_segment_size) { - __TBB_ASSERT( segment_table[i].array > internal::vector_allocation_error_flag, NULL); - void *s = static_cast<void*>( - static_cast<char*>(seg) + segment_base(i)*element_size ); - if(j + my_segment_size >= my_size) my_segment_size = my_size - j; - __TBB_TRY { // exception can occur here - copy( s, segment_table[i].array, my_segment_size ); - } __TBB_CATCH(...) { // destroy all the already copied items - helper for_each(reinterpret_cast<segment_t*>(&old.table[0]), old.first_block, element_size, - 0, 0, segment_base(i)+my_segment_size); - for_each.apply( helper::destroy_body(destroy) ); - __TBB_RETHROW(); - } - my_segment_size = i? segment_size( ++i ) : segment_size( i = first_block ); - } - // commit the changes - memcpy(old.table, segment_table, k * sizeof(segment_t)); - for (segment_index_t i = 0; i < k; i++) { - segment_table[i].array = static_cast<void*>( - static_cast<char*>(seg) + segment_base(i)*element_size ); - } - old.first_block = first_block; my_first_block = k; // now, first_block != my_first_block - // destroy original copies - my_segment_size = segment_size( first_block ); // old.first_block actually - for (segment_index_t i = 0, j = 0; i < k && j < my_size; j = my_segment_size) { - if(j + my_segment_size >= my_size) my_segment_size = my_size - j; - // destructors are supposed to not throw any exceptions - destroy( old.table[i], my_segment_size ); - my_segment_size = i? segment_size( ++i ) : segment_size( i = first_block ); - } - } - // free unnecessary segments allocated by reserve() call - if ( k_stop < k_end ) { - old.first_block = first_block; - memcpy(old.table+k_stop, segment_table+k_stop, (k_end-k_stop) * sizeof(segment_t)); - memset(segment_table+k_stop, 0, (k_end-k_stop) * sizeof(segment_t)); - if( !k ) my_first_block = 0; - } - return table; -} - -void concurrent_vector_base_v3::internal_swap(concurrent_vector_base_v3& v) -{ - size_type my_sz = my_early_size, v_sz = v.my_early_size; - if(!my_sz && !v_sz) return; - size_type tmp = my_first_block; my_first_block = v.my_first_block; v.my_first_block = tmp; - bool my_short = (my_segment == my_storage), v_short = (v.my_segment == v.my_storage); - if ( my_short && v_short ) { // swap both tables - char tbl[pointers_per_short_table * sizeof(segment_t)]; - memcpy(tbl, my_storage, pointers_per_short_table * sizeof(segment_t)); - memcpy(my_storage, v.my_storage, pointers_per_short_table * sizeof(segment_t)); - memcpy(v.my_storage, tbl, pointers_per_short_table * sizeof(segment_t)); - } - else if ( my_short ) { // my -> v - memcpy(v.my_storage, my_storage, pointers_per_short_table * sizeof(segment_t)); - my_segment = v.my_segment; v.my_segment = v.my_storage; - } - else if ( v_short ) { // v -> my - memcpy(my_storage, v.my_storage, pointers_per_short_table * sizeof(segment_t)); - v.my_segment = my_segment; my_segment = my_storage; - } else { - segment_t *ptr = my_segment; my_segment = v.my_segment; v.my_segment = ptr; - } - my_early_size = v_sz; v.my_early_size = my_sz; -} - -} // namespace internal - -} // tbb diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/condition_variable.cpp b/deal.II/bundled/tbb30_104oss/src/tbb/condition_variable.cpp deleted file mode 100644 index d4b63ec67f..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/condition_variable.cpp +++ /dev/null @@ -1,213 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#include "tbb/compat/condition_variable" -#include "tbb/atomic.h" -#include "dynamic_link.h" -#include "itt_notify.h" - -namespace tbb { - -namespace internal { - -//condition_variable -#if _WIN32||_WIN64 -using tbb::interface5::internal::condition_variable_using_event; - -static atomic<int> condvar_module_inited; - -void WINAPI init_condvar_using_event( condition_variable_using_event* cv_event ) -{ - cv_event->event = CreateEvent( NULL, TRUE/*manual reset*/, FALSE/*not signalled initially*/, NULL); - InitializeCriticalSection( &cv_event->mutex ); - cv_event->n_waiters = 0; - cv_event->release_count = 0; - cv_event->epoch = 0; -} - -BOOL WINAPI sleep_condition_variable_cs_using_event( condition_variable_using_event* cv_event, LPCRITICAL_SECTION cs, DWORD dwMilliseconds ) -{ - EnterCriticalSection( &cv_event->mutex ); - ++cv_event->n_waiters; - unsigned my_generation = cv_event->epoch; - LeaveCriticalSection( &cv_event->mutex ); - LeaveCriticalSection( cs ); - for (;;) { - // should come here at least once - DWORD rc = WaitForSingleObject( cv_event->event, dwMilliseconds ); - EnterCriticalSection( &cv_event->mutex ); - if( rc!=WAIT_OBJECT_0 ) { - --cv_event->n_waiters; - LeaveCriticalSection( &cv_event->mutex ); - if( rc==WAIT_TIMEOUT ) { - SetLastError( WAIT_TIMEOUT ); - EnterCriticalSection( cs ); - } - return false; - } - __TBB_ASSERT( rc==WAIT_OBJECT_0, NULL ); - if( cv_event->release_count>0 && cv_event->epoch!=my_generation ) - break; - LeaveCriticalSection( &cv_event->mutex ); - } - - // still in the critical section - --cv_event->n_waiters; - int count = --cv_event->release_count; - LeaveCriticalSection( &cv_event->mutex ); - - if( count==0 ) { - __TBB_ASSERT( cv_event->event, "Premature destruction of condition variable?" ); - ResetEvent( cv_event->event ); - } - EnterCriticalSection( cs ); - return true; -} - -void WINAPI wake_condition_variable_using_event( condition_variable_using_event* cv_event ) -{ - EnterCriticalSection( &cv_event->mutex ); - if( cv_event->n_waiters>cv_event->release_count ) { - SetEvent( cv_event->event ); // Signal the manual-reset event. - ++cv_event->release_count; - ++cv_event->epoch; - } - LeaveCriticalSection( &cv_event->mutex ); -} - -void WINAPI wake_all_condition_variable_using_event( condition_variable_using_event* cv_event ) -{ - EnterCriticalSection( &cv_event->mutex ); - if( cv_event->n_waiters>0 ) { - SetEvent( cv_event->event ); - cv_event->release_count = cv_event->n_waiters; - ++cv_event->epoch; - } - LeaveCriticalSection( &cv_event->mutex ); -} - -void WINAPI destroy_condvar_using_event( condition_variable_using_event* cv_event ) -{ - HANDLE my_event = cv_event->event; - EnterCriticalSection( &cv_event->mutex ); - // NULL is an invalid HANDLE value - cv_event->event = NULL; - if( cv_event->n_waiters>0 ) { - LeaveCriticalSection( &cv_event->mutex ); - spin_wait_until_eq( cv_event->n_waiters, 0 ); - // make sure the last thread completes its access to cv - EnterCriticalSection( &cv_event->mutex ); - } - LeaveCriticalSection( &cv_event->mutex ); - CloseHandle( my_event ); -} - -void WINAPI destroy_condvar_noop( CONDITION_VARIABLE* /*cv*/ ) { /*no op*/ } - -static void (WINAPI *__TBB_init_condvar)( PCONDITION_VARIABLE ) = (void (WINAPI *)(PCONDITION_VARIABLE))&init_condvar_using_event; -static BOOL (WINAPI *__TBB_condvar_wait)( PCONDITION_VARIABLE, LPCRITICAL_SECTION, DWORD ) = (BOOL (WINAPI *)(PCONDITION_VARIABLE,LPCRITICAL_SECTION, DWORD))&sleep_condition_variable_cs_using_event; -static void (WINAPI *__TBB_condvar_notify_one)( PCONDITION_VARIABLE ) = (void (WINAPI *)(PCONDITION_VARIABLE))&wake_condition_variable_using_event; -static void (WINAPI *__TBB_condvar_notify_all)( PCONDITION_VARIABLE ) = (void (WINAPI *)(PCONDITION_VARIABLE))&wake_all_condition_variable_using_event; -static void (WINAPI *__TBB_destroy_condvar)( PCONDITION_VARIABLE ) = (void (WINAPI *)(PCONDITION_VARIABLE))&destroy_condvar_using_event; - -//! Table describing the how to link the handlers. -static const dynamic_link_descriptor CondVarLinkTable[] = { - DLD(InitializeConditionVariable, __TBB_init_condvar), - DLD(SleepConditionVariableCS, __TBB_condvar_wait), - DLD(WakeConditionVariable, __TBB_condvar_notify_one), - DLD(WakeAllConditionVariable, __TBB_condvar_notify_all) -}; - -void init_condvar_module() -{ - __TBB_ASSERT( (uintptr_t)__TBB_init_condvar==(uintptr_t)&init_condvar_using_event, NULL ); - if( dynamic_link( "Kernel32.dll", CondVarLinkTable, 4 ) ) - __TBB_destroy_condvar = (void (WINAPI *)(PCONDITION_VARIABLE))&destroy_condvar_noop; -} -#endif /* _WIN32||_WIN64 */ - -} // namespace internal - -#if _WIN32||_WIN64 - -namespace interface5 { -namespace internal { - -using tbb::internal::condvar_module_inited; -using tbb::internal::__TBB_init_condvar; -using tbb::internal::__TBB_condvar_wait; -using tbb::internal::__TBB_condvar_notify_one; -using tbb::internal::__TBB_condvar_notify_all; -using tbb::internal::__TBB_destroy_condvar; -using tbb::internal::init_condvar_module; - -void internal_initialize_condition_variable( condvar_impl_t& cv ) -{ - if( condvar_module_inited!=2 ) { - if( condvar_module_inited==0 ) { - if( condvar_module_inited.compare_and_swap( 1, 0 )==0 ) { - init_condvar_module(); - condvar_module_inited = 2; - } - } - - spin_wait_until_eq( condvar_module_inited, 2 ); - } - __TBB_init_condvar( &cv.cv_native ); -} - -void internal_destroy_condition_variable( condvar_impl_t& cv ) -{ - __TBB_destroy_condvar( &cv.cv_native ); -} - -void internal_condition_variable_notify_one( condvar_impl_t& cv ) -{ - __TBB_condvar_notify_one ( &cv.cv_native ); -} - -void internal_condition_variable_notify_all( condvar_impl_t& cv ) -{ - __TBB_condvar_notify_all( &cv.cv_native ); -} - -bool internal_condition_variable_wait( condvar_impl_t& cv, mutex* mtx, const tick_count::interval_t* i ) -{ - DWORD duration = i ? DWORD((i->seconds()*1000)) : INFINITE; - mtx->set_state( mutex::INITIALIZED ); - BOOL res = __TBB_condvar_wait( &cv.cv_native, mtx->native_handle(), duration ); - mtx->set_state( mutex::HELD ); - return res?true:false; -} - -} // namespace internal -} // nameespace interface5 - -#endif /* _WIN32||_WIN64 */ - -} // namespace tbb diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/critical_section.cpp b/deal.II/bundled/tbb30_104oss/src/tbb/critical_section.cpp deleted file mode 100644 index c67621f47e..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/critical_section.cpp +++ /dev/null @@ -1,39 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#include "tbb/critical_section.h" -#include "itt_notify.h" - -namespace tbb { - namespace internal { - -void critical_section_v4::internal_construct() { - ITT_SYNC_CREATE(&my_impl, _T("ppl::critical_section"), _T("")); -} -} // namespace internal -} // namespace tbb diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/custom_scheduler.h b/deal.II/bundled/tbb30_104oss/src/tbb/custom_scheduler.h deleted file mode 100644 index 9151127bdd..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/custom_scheduler.h +++ /dev/null @@ -1,485 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef _TBB_custom_scheduler_H -#define _TBB_custom_scheduler_H - -#include "scheduler.h" -#include "observer_proxy.h" -#include "itt_notify.h" - -namespace tbb { -namespace internal { - -//! Amount of time to pause between steals. -/** The default values below were found to be best empirically for K-Means - on the 32-way Altix and 4-way (*2 for HT) fxqlin04. */ -#if __TBB_ipf -static const long PauseTime = 1500; -#else -static const long PauseTime = 80; -#endif - -//------------------------------------------------------------------------ -//! Traits classes for scheduler -//------------------------------------------------------------------------ - -struct DefaultSchedulerTraits { - static const bool itt_possible = true; - static const bool has_slow_atomic = false; -}; - -struct IntelSchedulerTraits { - static const bool itt_possible = false; -#if __TBB_x86_32||__TBB_x86_64 - static const bool has_slow_atomic = true; -#else - static const bool has_slow_atomic = false; -#endif /* __TBB_x86_32||__TBB_x86_64 */ -}; - -//------------------------------------------------------------------------ -// custom_scheduler -//------------------------------------------------------------------------ - -//! A scheduler with a customized evaluation loop. -/** The customization can use SchedulerTraits to make decisions without needing a run-time check. */ -template<typename SchedulerTraits> -class custom_scheduler: private generic_scheduler { - typedef custom_scheduler<SchedulerTraits> scheduler_type; - - //! Scheduler loop that dispatches tasks. - /** If child is non-NULL, it is dispatched first. - Then, until "parent" has a reference count of 1, other task are dispatched or stolen. */ - /*override*/ - void local_wait_for_all( task& parent, task* child ); - - //! Entry point from client code to the scheduler loop that dispatches tasks. - /** The method is virtual, but the *this object is used only for sake of dispatching on the correct vtable, - not necessarily the correct *this object. The correct *this object is looked up in TLS. */ - /*override*/ - void wait_for_all( task& parent, task* child ) { - static_cast<custom_scheduler*>(governor::local_scheduler())->scheduler_type::local_wait_for_all( parent, child ); - } - - //! Construct a custom_scheduler - custom_scheduler( arena* a, size_t index ) : generic_scheduler(a, index) {} - - //! Decrements ref_count of a predecessor. - /** If it achieves 0, the predecessor is scheduled for execution. - When changing, remember that this is a hot path function. */ - void tally_completion_of_predecessor( task& s, task*& bypass_slot ) { - task_prefix& p = s.prefix(); - if( SchedulerTraits::itt_possible ) - ITT_NOTIFY(sync_releasing, &p.ref_count); - if( SchedulerTraits::has_slow_atomic && p.ref_count==1 ) { - p.ref_count=0; - } else { - if( __TBB_FetchAndDecrementWrelease(&p.ref_count) > 1 ) // more references exist - return; - } - __TBB_ASSERT(p.ref_count==0, "completion of task caused predecessor's reference count to underflow"); - if( SchedulerTraits::itt_possible ) - ITT_NOTIFY(sync_acquired, &p.ref_count); -#if TBB_USE_ASSERT - p.extra_state &= ~es_ref_count_active; -#endif /* TBB_USE_ASSERT */ - - if( bypass_slot==NULL ) - bypass_slot = &s; - else - local_spawn( s, s.prefix().next ); - } - -public: - static generic_scheduler* allocate_scheduler( arena* a, size_t index ) { -#if !__TBB_ARENA_PER_MASTER - __TBB_ASSERT( a, "missing arena" ); -#endif /* !__TBB_ARENA_PER_MASTER */ - scheduler_type* s = (scheduler_type*)NFS_Allocate(sizeof(scheduler_type),1,NULL); - new( s ) scheduler_type( a, index ); - s->assert_task_pool_valid(); - ITT_SYNC_CREATE(s, SyncType_Scheduler, SyncObj_TaskPoolSpinning); - return s; - } - - //! Try getting a task from the mailbox or stealing from another scheduler. - /** Returns the stolen task or NULL if all attempts fail. */ - /* override */ task* receive_or_steal_task( reference_count&, bool ); - -}; // class custom_scheduler<> - -//------------------------------------------------------------------------ -// custom_scheduler methods -//------------------------------------------------------------------------ - -template<typename SchedulerTraits> -task* custom_scheduler<SchedulerTraits>::receive_or_steal_task( reference_count& completion_ref_count, - bool return_if_no_work ) { - task* t = NULL; - inbox.set_is_idle( true ); - // The state "failure_count==-1" is used only when itt_possible is true, - // and denotes that a sync_prepare has not yet been issued. - for( int failure_count = -static_cast<int>(SchedulerTraits::itt_possible);; ++failure_count) { - if( completion_ref_count==1 ) { - if( SchedulerTraits::itt_possible ) { - if( failure_count!=-1 ) { - ITT_NOTIFY(sync_prepare, &completion_ref_count); - // Notify Intel(R) Thread Profiler that thread has stopped spinning. - ITT_NOTIFY(sync_acquired, this); - } - ITT_NOTIFY(sync_acquired, &completion_ref_count); - } - inbox.set_is_idle( false ); - return NULL; - } -#if __TBB_ARENA_PER_MASTER - size_t n = my_arena->my_limit; - __TBB_ASSERT( arena_index < n, NULL ); -#else /* !__TBB_ARENA_PER_MASTER */ - size_t n = my_arena->prefix().limit; -#endif /* !__TBB_ARENA_PER_MASTER */ - if( n>1 ) { - if( my_affinity_id && (t=get_mailbox_task()) ) { - GATHER_STATISTIC( ++my_counters.mails_received ); - } -#if __TBB_ARENA_PER_MASTER - // Check if there are tasks in starvation-resistant stream. - // Only allowed for workers with empty stack, which is identified by return_if_no_work. - else if ( return_if_no_work && (t=dequeue_task()) ) { - // just proceed with the obtained task - } - // Check if the resource manager requires our arena to relinquish some threads - else if ( return_if_no_work && (my_arena->my_num_workers_allotted < my_arena->num_workers_active()) ) { - if( SchedulerTraits::itt_possible ) { - if( failure_count!=-1 ) - ITT_NOTIFY(sync_cancel, this); - } - return NULL; - } -#endif /* __TBB_ARENA_PER_MASTER */ - else { - // Try to steal a task from a random victim. - if ( !can_steal() ) - goto fail; - size_t k = random.get() % (n-1); - arena_slot* victim = &my_arena->slot[k]; - // The following condition excludes the master that might have - // already taken our previous place in the arena from the list . - // of potential victims. But since such a situation can take - // place only in case of significant oversubscription, keeping - // the checks simple seems to be preferable to complicating the code. - if( k >= arena_index ) - ++victim; // Adjusts random distribution to exclude self - t = steal_task( *victim ); - if( !t ) goto fail; - if( is_proxy(*t) ) { - t = strip_proxy((task_proxy*)t); - if( !t ) goto fail; - GATHER_STATISTIC( ++my_counters.proxies_stolen ); - } - t->prefix().extra_state |= es_task_is_stolen; - if( is_version_3_task(*t) ) { - innermost_running_task = t; - t->note_affinity( my_affinity_id ); - } - GATHER_STATISTIC( ++my_counters.steals_committed ); - } - __TBB_ASSERT(t,NULL); -#if __TBB_SCHEDULER_OBSERVER - // No memory fence required for read of global_last_observer_proxy, because prior fence on steal/mailbox suffices. - if( local_last_observer_proxy!=global_last_observer_proxy ) { - notify_entry_observers(); - } -#endif /* __TBB_SCHEDULER_OBSERVER */ - if( SchedulerTraits::itt_possible ) { - if( failure_count!=-1 ) { - // FIXME - might be victim, or might be selected from a mailbox - // Notify Intel(R) Thread Profiler that thread has stopped spinning. - ITT_NOTIFY(sync_acquired, this); - } - } - inbox.set_is_idle( false ); - break; // jumps to: return t; - } -fail: - GATHER_STATISTIC( ++my_counters.steals_failed ); - if( SchedulerTraits::itt_possible && failure_count==-1 ) { - // The first attempt to steal work failed, so notify Intel(R) Thread Profiler that - // the thread has started spinning. Ideally, we would do this notification - // *before* the first failed attempt to steal, but at that point we do not - // know that the steal will fail. - ITT_NOTIFY(sync_prepare, this); - failure_count = 0; - } - // Pause, even if we are going to yield, because the yield might return immediately. - __TBB_Pause(PauseTime); - int yield_threshold = 2*int(n); - if( failure_count>=yield_threshold ) { - __TBB_Yield(); - if( failure_count>=yield_threshold+100 ) { - // When a worker thread has nothing to do, return it to RML. - // For purposes of affinity support, the thread is considered idle while in RML. - if( return_if_no_work && my_arena->is_out_of_work() ) { - if( SchedulerTraits::itt_possible ) { - if( failure_count!=-1 ) - ITT_NOTIFY(sync_cancel, this); - } - return NULL; - } - failure_count = yield_threshold; - } - } - } - return t; -} - -template<typename SchedulerTraits> -void custom_scheduler<SchedulerTraits>::local_wait_for_all( task& parent, task* child ) { - __TBB_ASSERT( governor::is_set(this), NULL ); - if( child ) { - child->prefix().owner = this; - } - __TBB_ASSERT( parent.ref_count() >= (child && child->parent() == &parent ? 2 : 1), "ref_count is too small" ); - assert_task_pool_valid(); - // Using parent's refcount in sync_prepare (in the stealing loop below) is - // a workaround for TP. We need to name it here to display correctly in Ampl. - if( SchedulerTraits::itt_possible ) - ITT_SYNC_CREATE(&parent.prefix().ref_count, SyncType_Scheduler, SyncObj_TaskStealingLoop); -#if __TBB_TASK_GROUP_CONTEXT - __TBB_ASSERT( parent.prefix().context || (is_worker() && &parent == dummy_task), "parent task does not have context" ); -#endif /* __TBB_TASK_GROUP_CONTEXT */ - task* t = child; - // Constant all_local_work_done is an unreacheable refcount value that prevents - // early quitting the dispatch loop. It is defined to be in the middle of the range - // of negative values representable by the reference_count type. - static const reference_count - // For normal dispatch loops - parents_work_done = 1, - // For termination dispatch loops in masters - all_local_work_done = (reference_count)3 << (sizeof(reference_count) * 8 - 2); - reference_count quit_point; - if( innermost_running_task == dummy_task ) { - // We are in the outermost task dispatch loop of a master thread, - __TBB_ASSERT( !is_worker(), NULL ); - quit_point = &parent == dummy_task ? all_local_work_done : parents_work_done; - } else { - quit_point = parents_work_done; - } - task* old_innermost_running_task = innermost_running_task; -#if __TBB_TASK_GROUP_CONTEXT && TBB_USE_EXCEPTIONS -exception_was_caught: - try { -#endif /* __TBB_TASK_GROUP_CONTEXT && TBB_USE_EXCEPTIONS */ - // Outer loop steals tasks when necessary. - for(;;) { - // Middle loop evaluates tasks that are pulled off "array". - do { - // Inner loop evaluates tasks that are handed directly to us by other tasks. - while(t) { - __TBB_ASSERT( inbox.is_idle_state(false), NULL ); -#if TBB_USE_ASSERT - __TBB_ASSERT(!is_proxy(*t),"unexpected proxy"); - __TBB_ASSERT( t->prefix().owner==this, NULL ); -#if __TBB_TASK_GROUP_CONTEXT - if ( !t->prefix().context->my_cancellation_requested ) -#endif - __TBB_ASSERT( 1L<<t->state() & (1L<<task::allocated|1L<<task::ready|1L<<task::reexecute), NULL ); - assert_task_pool_valid(); -#endif /* TBB_USE_ASSERT */ - task* t_next = NULL; - innermost_running_task = t; - t->prefix().state = task::executing; -#if __TBB_TASK_GROUP_CONTEXT - if ( !t->prefix().context->my_cancellation_requested ) -#endif - { - GATHER_STATISTIC( ++my_counters.tasks_executed ); -#if __TBB_TASK_GROUP_CONTEXT - if( SchedulerTraits::itt_possible ) - ITT_STACK(callee_enter, t->prefix().context->itt_caller); -#endif - t_next = t->execute(); -#if __TBB_TASK_GROUP_CONTEXT - if( SchedulerTraits::itt_possible ) - ITT_STACK(callee_leave, t->prefix().context->itt_caller); -#endif - if (t_next) { - __TBB_ASSERT( t_next->state()==task::allocated, - "if task::execute() returns task, it must be marked as allocated" ); - t_next->prefix().extra_state &= ~es_task_is_stolen; -#if TBB_USE_ASSERT - affinity_id next_affinity=t_next->prefix().affinity; - if (next_affinity != 0 && next_affinity != my_affinity_id) - GATHER_STATISTIC( ++my_counters.affinity_ignored ); -#endif - } - } - assert_task_pool_valid(); - switch( task::state_type(t->prefix().state) ) { - case task::executing: { - task* s = t->parent(); - __TBB_ASSERT( innermost_running_task==t, NULL ); - __TBB_ASSERT( t->prefix().ref_count==0, "Task still has children after it has been executed" ); - t->~task(); - if( s ) - tally_completion_of_predecessor(*s, t_next); - free_task<no_hint>( *t ); - assert_task_pool_valid(); - break; - } - - case task::recycle: // set by recycle_as_safe_continuation() - t->prefix().state = task::allocated; - __TBB_ASSERT( t_next != t, "a task returned from method execute() can not be recycled in another way" ); - t->prefix().extra_state &= ~es_task_is_stolen; - // for safe continuation, need atomically decrement ref_count; - tally_completion_of_predecessor(*t, t_next); - assert_task_pool_valid(); - break; - - case task::reexecute: // set by recycle_to_reexecute() - __TBB_ASSERT( t_next, "reexecution requires that method execute() return another task" ); - __TBB_ASSERT( t_next != t, "a task returned from method execute() can not be recycled in another way" ); - t->prefix().state = task::allocated; - t->prefix().extra_state &= ~es_task_is_stolen; - local_spawn( *t, t->prefix().next ); - assert_task_pool_valid(); - break; - case task::allocated: - t->prefix().extra_state &= ~es_task_is_stolen; - break; -#if TBB_USE_ASSERT - case task::ready: - __TBB_ASSERT( false, "task is in READY state upon return from method execute()" ); - break; - default: - __TBB_ASSERT( false, "illegal state" ); -#else - default: // just to shut up some compilation warnings - break; -#endif /* TBB_USE_ASSERT */ - } - - if( t_next ) { - // The store here has a subtle secondary effect - it fetches *t_next into cache. - t_next->prefix().owner = this; - GATHER_STATISTIC( ++my_counters.spawns_bypassed ); - } - t = t_next; - } // end of scheduler bypass loop - assert_task_pool_valid(); - - if ( parent.prefix().ref_count == quit_point ) - break; - t = get_task(); - __TBB_ASSERT(!t || !is_proxy(*t),"unexpected proxy"); -#if TBB_USE_ASSERT - assert_task_pool_valid(); - if(t) { - assert_task_valid(*t); - __TBB_ASSERT( t->prefix().owner==this, "thread got task that it does not own" ); - } -#endif /* TBB_USE_ASSERT */ - } while( t ); // end of local task array processing loop - - if ( quit_point == all_local_work_done ) { - __TBB_ASSERT( my_arena_slot == &dummy_slot && my_arena_slot->head == 0 && my_arena_slot->tail == 0, NULL ); - innermost_running_task = old_innermost_running_task; - return; - } -#if __TBB_ARENA_PER_MASTER - __TBB_ASSERT( my_arena->my_max_num_workers > 0 || parent.prefix().ref_count == 1, "deadlock detected" ); -#else /* !__TBB_ARENA_PER_MASTER */ - __TBB_ASSERT( my_arena->prefix().number_of_workers>0||parent.prefix().ref_count==1, "deadlock detected" ); -#endif /* !__TBB_ARENA_PER_MASTER */ - // old_innermost_running_task is NULL *iff* a worker thread is in its "inborn" dispath loop - // (i.e. its execution stack is empty), and it should return from there if no work is available. - t = receive_or_steal_task( parent.prefix().ref_count, !old_innermost_running_task ); - if (!t) { - if( parent.prefix().ref_count==1 ) goto done; - __TBB_ASSERT( is_worker() && !old_innermost_running_task, "a thread exits dispatch loop prematurely" ); - innermost_running_task = NULL; - return; - } - __TBB_ASSERT(t,NULL); - __TBB_ASSERT(!is_proxy(*t),"unexpected proxy"); - t->prefix().owner = this; - } // end of stealing loop -#if __TBB_TASK_GROUP_CONTEXT && TBB_USE_EXCEPTIONS - } TbbCatchAll( t->prefix().context ); - - if( task::state_type(t->prefix().state) == task::recycle ) { // state set by recycle_as_safe_continuation() - t->prefix().state = task::allocated; - // for safe continuation, need to atomically decrement ref_count; - if( SchedulerTraits::itt_possible ) - ITT_NOTIFY(sync_releasing, &t->prefix().ref_count); - if( __TBB_FetchAndDecrementWrelease(&t->prefix().ref_count)==1 ) { - if( SchedulerTraits::itt_possible ) - ITT_NOTIFY(sync_acquired, &t->prefix().ref_count); - }else{ - t = NULL; - } - } - goto exception_was_caught; -#endif /* __TBB_TASK_GROUP_CONTEXT && TBB_USE_EXCEPTIONS */ -done: - if ( !ConcurrentWaitsEnabled(parent) ) - parent.prefix().ref_count = 0; -#if TBB_USE_ASSERT - parent.prefix().extra_state &= ~es_ref_count_active; -#endif /* TBB_USE_ASSERT */ - innermost_running_task = old_innermost_running_task; -#if __TBB_TASK_GROUP_CONTEXT - __TBB_ASSERT(parent.prefix().context && dummy_task->prefix().context, NULL); - task_group_context* parent_ctx = parent.prefix().context; - if ( parent_ctx->my_cancellation_requested ) { - task_group_context::exception_container_type *pe = parent_ctx->my_exception; - if ( innermost_running_task == dummy_task && parent_ctx == dummy_task->prefix().context ) { - // We are in the outermost task dispatch loop of a master thread, and - // the whole task tree has been collapsed. So we may clear cancellation data. - parent_ctx->my_cancellation_requested = 0; - __TBB_ASSERT(dummy_task->prefix().context == parent_ctx || !CancellationInfoPresent(*dummy_task), - "Unexpected exception or cancellation data in the dummy task"); - // If possible, add assertion that master's dummy task context does not have children - } - if ( pe ) - pe->throw_self(); - } - __TBB_ASSERT(!is_worker() || !CancellationInfoPresent(*dummy_task), - "Worker's dummy task context modified"); - __TBB_ASSERT(innermost_running_task != dummy_task || !CancellationInfoPresent(*dummy_task), - "Unexpected exception or cancellation data in the master's dummy task"); -#endif /* __TBB_TASK_GROUP_CONTEXT */ - assert_task_pool_valid(); -} - -} // namespace internal -} // namespace tbb - -#endif /* _TBB_custom_scheduler_H */ diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/dynamic_link.cpp b/deal.II/bundled/tbb30_104oss/src/tbb/dynamic_link.cpp deleted file mode 100644 index 8d7c8c97b0..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/dynamic_link.cpp +++ /dev/null @@ -1,138 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#include "dynamic_link.h" - -#ifndef LIBRARY_ASSERT -#include "tbb/tbb_stddef.h" -#define LIBRARY_ASSERT(x,y) __TBB_ASSERT(x,y) -#endif /* LIBRARY_ASSERT */ - -#if _WIN32||_WIN64 - #include <malloc.h> /* alloca */ -#else - #include <dlfcn.h> -#if __NetBSD__ || __FreeBSD__ - #include <stdlib.h> /* alloca */ -#else - #include <alloca.h> -#endif -#endif - -OPEN_INTERNAL_NAMESPACE - -#if __TBB_WEAK_SYMBOLS - -bool dynamic_link( void*, const dynamic_link_descriptor descriptors[], size_t n, size_t required ) -{ - if ( required == ~(size_t)0 ) - required = n; - LIBRARY_ASSERT( required<=n, "Number of required entry points exceeds their total number" ); - size_t k = 0; - // Check if the first required entries are present in what was loaded into our process - while ( k < required && descriptors[k].ptr ) - ++k; - if ( k < required ) - return false; - // Commit all the entry points. - for ( k = 0; k < n; ++k ) - *descriptors[k].handler = (pointer_to_handler) descriptors[k].ptr; - return true; -} - -#else /* !__TBB_WEAK_SYMBOLS */ - -bool dynamic_link( void* module, const dynamic_link_descriptor descriptors[], size_t n, size_t required ) -{ - pointer_to_handler *h = (pointer_to_handler*)alloca(n * sizeof(pointer_to_handler)); - if ( required == ~(size_t)0 ) - required = n; - LIBRARY_ASSERT( required<=n, "Number of required entry points exceeds their total number" ); - size_t k = 0; - for ( ; k < n; ++k ) { -#if _WIN32||_WIN64 - h[k] = pointer_to_handler(GetProcAddress( (HMODULE)module, descriptors[k].name )); -#else - // Lvalue casting is used; this way icc -strict-ansi does not warn about nonstandard pointer conversion - (void *&)h[k] = dlsym( module, descriptors[k].name ); -#endif /* _WIN32||_WIN64 */ - if ( !h[k] && k < required ) - return false; - } - LIBRARY_ASSERT( k == n, "if required entries are initialized, all entries are expected to be walked"); - // Commit the entry points. - // Cannot use memset here, because the writes must be atomic. - for( k = 0; k < n; ++k ) - *descriptors[k].handler = h[k]; - return true; -} - -#endif /* !__TBB_WEAK_SYMBOLS */ - -bool dynamic_link( const char* library, const dynamic_link_descriptor descriptors[], size_t n, size_t required, dynamic_link_handle* handle ) -{ -#if _WIN32||_WIN64 - // Interpret non-NULL handle parameter as request to really link against another library. - if ( !handle && dynamic_link( GetModuleHandle(NULL), descriptors, n, required ) ) - // Target library was statically linked into this executable - return true; - // Prevent Windows from displaying silly message boxes if it fails to load library - // (e.g. because of MS runtime problems - one of those crazy manifest related ones) -#if _XBOX - dynamic_link_handle module = LoadLibrary (library); -#else - UINT prev_mode = SetErrorMode (SEM_FAILCRITICALERRORS); - dynamic_link_handle module = LoadLibrary (library); - SetErrorMode (prev_mode); -#endif /* _XBOX */ -#else - dynamic_link_handle module = dlopen( library, RTLD_LAZY ); -#endif /* _WIN32||_WIN64 */ - if( module ) { - if( !dynamic_link( module, descriptors, n, required ) ) { - // Return true if the library is there and it contains all the expected entry points. - dynamic_unlink(module); - module = NULL; - } - } - if( handle ) - *handle = module; - return module!=NULL; -} - -void dynamic_unlink( dynamic_link_handle handle ) { - if( handle ) { -#if _WIN32||_WIN64 - FreeLibrary( handle ); -#else - dlclose( handle ); -#endif /* _WIN32||_WIN64 */ - } -} - -CLOSE_INTERNAL_NAMESPACE diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/dynamic_link.h b/deal.II/bundled/tbb30_104oss/src/tbb/dynamic_link.h deleted file mode 100644 index 6e048b1be9..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/dynamic_link.h +++ /dev/null @@ -1,102 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_dynamic_link -#define __TBB_dynamic_link - -// Support for dynamically linking to a shared library. -// By default, the symbols defined here go in namespace tbb::internal. -// The symbols can be put in another namespace by defining the preprocessor -// symbols OPEN_INTERNAL_NAMESPACE and CLOSE_INTERNAL_NAMESPACE to open and -// close the other namespace. See default definition below for an example. - -#ifndef OPEN_INTERNAL_NAMESPACE -#define OPEN_INTERNAL_NAMESPACE namespace tbb { namespace internal { -#define CLOSE_INTERNAL_NAMESPACE }} -#endif /* OPEN_INTERNAL_NAMESPACE */ - -#include <stddef.h> -#if _WIN32||_WIN64 -#include "tbb/machine/windows_api.h" -#endif /* _WIN32||_WIN64 */ - -OPEN_INTERNAL_NAMESPACE - -//! Type definition for a pointer to a void somefunc(void) -typedef void (*pointer_to_handler)(); - -// Double cast through the void* from func_ptr in DLD macro is necessary to -// prevent warnings from some compilers (g++ 4.1) -#if __TBB_WEAK_SYMBOLS - -#define DLD(s,h) {(pointer_to_handler)&s, (pointer_to_handler*)(void*)(&h)} -//! Association between a handler name and location of pointer to it. -struct dynamic_link_descriptor { - //! pointer to the handler - pointer_to_handler ptr; - //! Pointer to the handler - pointer_to_handler* handler; -}; - -#else /* !__TBB_WEAK_SYMBOLS */ - -#define DLD(s,h) {#s, (pointer_to_handler*)(void*)(&h)} -//! Association between a handler name and location of pointer to it. -struct dynamic_link_descriptor { - //! Name of the handler - const char* name; - //! Pointer to the handler - pointer_to_handler* handler; -}; - -#endif /* !__TBB_WEAK_SYMBOLS */ - -#if _WIN32||_WIN64 -typedef HMODULE dynamic_link_handle; -#else -typedef void* dynamic_link_handle; -#endif /* _WIN32||_WIN64 */ - -//! Fill in dynamically linked handlers. -/** 'n' is the length of the array descriptors[]. - 'required' is the number of the initial entries in the array descriptors[] - that have to be found in order for the call to succeed. If the library and - all the required handlers are found, then the corresponding handler pointers - are set, and the return value is true. Otherwise the original array of - descriptors is left untouched and the return value is false. **/ -bool dynamic_link( const char* libraryname, - const dynamic_link_descriptor descriptors[], - size_t n, - size_t required = ~(size_t)0, - dynamic_link_handle* handle = 0 ); - -void dynamic_unlink( dynamic_link_handle handle ); - -CLOSE_INTERNAL_NAMESPACE - -#endif /* __TBB_dynamic_link */ diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/governor.cpp b/deal.II/bundled/tbb30_104oss/src/tbb/governor.cpp deleted file mode 100644 index 2b972472dc..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/governor.cpp +++ /dev/null @@ -1,340 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#include "governor.h" -#include "tbb_main.h" -#include "scheduler.h" -#if __TBB_ARENA_PER_MASTER -#include "market.h" -#endif /* __TBB_ARENA_PER_MASTER */ -#include "arena.h" - -#include "tbb/task_scheduler_init.h" - -#if __TBB_SURVIVE_THREAD_SWITCH -#include "dynamic_link.h" -#endif /* __TBB_SURVIVE_THREAD_SWITCH */ - -namespace tbb { -namespace internal { - -//------------------------------------------------------------------------ -// governor -//------------------------------------------------------------------------ - -#if __TBB_SURVIVE_THREAD_SWITCH - -#if _WIN32 -#define CILKLIB_NAME "cilkrts20.dll" -#else -#define CILKLIB_NAME "libcilkrts.so" -#endif - -//! Handler for memory allocation -static __cilk_tbb_retcode (*watch_stack_handler)(struct __cilk_tbb_unwatch_thunk* u, - struct __cilk_tbb_stack_op_thunk o); - -//! Table describing the how to link the handlers. -static const dynamic_link_descriptor CilkLinkTable[] = { - DLD(__cilkrts_watch_stack, watch_stack_handler) -}; - -void initialize_survive_thread_switch() { - dynamic_link( CILKLIB_NAME, CilkLinkTable, 1 ); -} -#endif /* __TBB_SURVIVE_THREAD_SWITCH */ - -namespace rml { - tbb_server* make_private_server( tbb_client& client ); -} - -void governor::acquire_resources () { -#if USE_PTHREAD - int status = theTLS.create(auto_terminate); -#else - int status = theTLS.create(); -#endif - if( status ) - handle_perror(status, "TBB failed to initialize TLS storage\n"); - - ::rml::factory::status_type res = theRMLServerFactory.open(); - UsePrivateRML = res != ::rml::factory::st_success; -} - -void governor::release_resources () { - theRMLServerFactory.close(); -#if TBB_USE_ASSERT - if( __TBB_InitOnce::initialization_done() && theTLS.get() ) - runtime_warning( "TBB is unloaded while tbb::task_scheduler_init object is alive?" ); -#endif - int status = theTLS.destroy(); - if( status ) - handle_perror(status, "TBB failed to destroy TLS storage"); -} - -rml::tbb_server* governor::create_rml_server ( rml::tbb_client& client ) { - rml::tbb_server* server = NULL; - if( !UsePrivateRML ) { - ::rml::factory::status_type status = theRMLServerFactory.make_server( server, client ); - if( status != ::rml::factory::st_success ) { - UsePrivateRML = true; - runtime_warning( "rml::tbb_factorymake_server failed with status %x, falling back on private rml", status ); - } - } - if ( !server ) { - __TBB_ASSERT( UsePrivateRML, NULL ); - server = rml::make_private_server( client ); - } - __TBB_ASSERT( server, "Failed to create RML server" ); - return server; -} - -#if !__TBB_ARENA_PER_MASTER - -arena* governor::obtain_arena( int number_of_threads, stack_size_type thread_stack_size ) { - mutex::scoped_lock lock( theArenaMutex ); - arena* arena = theArena; - if( arena ) { - arena->prefix().number_of_masters += 1; - } else { - __TBB_ASSERT( number_of_threads > 0, NULL ); - arena = arena::allocate_arena( 2*number_of_threads, number_of_threads-1, - thread_stack_size ? thread_stack_size : ThreadStackSize ); - __TBB_ASSERT( arena->prefix().number_of_masters==1, NULL ); - NumWorkers = arena->prefix().number_of_workers; - - arena->prefix().server = create_rml_server( arena->prefix() ); - - // Publish the arena. - // A memory release fence is not required here, because workers have not started yet, - // and concurrent masters inspect theArena while holding theArenaMutex. - __TBB_ASSERT( !theArena, NULL ); - theArena = arena; - } - return arena; -} - -void governor::finish_with_arena() { - mutex::scoped_lock lock( theArenaMutex ); - arena* a = theArena; - __TBB_ASSERT( a, "theArena is missing" ); - if( --(a->prefix().number_of_masters) ) - a = NULL; - else { - theArena = NULL; - // Must do this while holding lock, otherwise terminate message might reach - // RML thread *after* initialize message reaches it for the next arena, - // which causes TLS to be set to new value before old one is erased! - a->close_arena(); - } -} -#endif /* !__TBB_ARENA_PER_MASTER */ - -void governor::sign_on(generic_scheduler* s) { - __TBB_ASSERT( !s->is_registered, NULL ); - s->is_registered = true; -#if !__TBB_ARENA_PER_MASTER - __TBB_InitOnce::add_ref(); -#endif /* !__TBB_ARENA_PER_MASTER */ - theTLS.set(s); -#if __TBB_SURVIVE_THREAD_SWITCH - __cilk_tbb_stack_op_thunk o; - o.routine = &stack_op_handler; - o.data = s; - if( watch_stack_handler ) { - if( (*watch_stack_handler)(&s->my_cilk_unwatch_thunk, o) ) { - // Failed to register with Cilk, make sure we are clean - s->my_cilk_unwatch_thunk.routine = NULL; - } -#if TBB_USE_ASSERT - else - s->my_cilk_state = generic_scheduler::cs_running; -#endif /* TBB_USE_ASSERT */ - } -#endif /* __TBB_SURVIVE_THREAD_SWITCH */ -} - -void governor::sign_off(generic_scheduler* s) { - if( s->is_registered ) { - __TBB_ASSERT( theTLS.get()==s || (!s->is_worker() && !theTLS.get()), "attempt to unregister a wrong scheduler instance" ); - theTLS.set(NULL); - s->is_registered = false; -#if __TBB_SURVIVE_THREAD_SWITCH - __cilk_tbb_unwatch_thunk &ut = s->my_cilk_unwatch_thunk; - if ( ut.routine ) - (*ut.routine)(ut.data); -#endif /* __TBB_SURVIVE_THREAD_SWITCH */ -#if !__TBB_ARENA_PER_MASTER - __TBB_InitOnce::remove_ref(); -#endif /* !__TBB_ARENA_PER_MASTER */ - } -} - -generic_scheduler* governor::init_scheduler( unsigned num_threads, stack_size_type stack_size, bool auto_init ) { - if( !__TBB_InitOnce::initialization_done() ) - DoOneTimeInitializations(); - generic_scheduler* s = theTLS.get(); - if( s ) { - s->ref_count += 1; - return s; - } - if( (int)num_threads == task_scheduler_init::automatic ) - num_threads = default_num_threads(); -#if __TBB_ARENA_PER_MASTER - s = generic_scheduler::create_master( - market::create_arena( num_threads - 1, stack_size ? stack_size : ThreadStackSize ) ); -#else /* !__TBB_ARENA_PER_MASTER */ - s = generic_scheduler::create_master( *obtain_arena(num_threads, stack_size) ); -#endif /* !__TBB_ARENA_PER_MASTER */ - __TBB_ASSERT(s, "Somehow a local scheduler creation for a master thread failed"); - s->is_auto_initialized = auto_init; - return s; -} - -void governor::terminate_scheduler( generic_scheduler* s ) { - __TBB_ASSERT( s == theTLS.get(), "Attempt to terminate non-local scheduler instance" ); - if( !--(s->ref_count) ) - s->cleanup_master(); -} - -void governor::auto_terminate(void* arg){ - generic_scheduler* s = static_cast<generic_scheduler*>(arg); - if( s && s->is_auto_initialized ) { - if( !--(s->ref_count) ) { - if ( !theTLS.get() && !s->local_task_pool_empty() ) { - // This thread's TLS slot is already cleared. But in order to execute - // remaining tasks cleanup_master() will need TLS correctly set. - // So we temporarily restore its value. - theTLS.set(s); - s->cleanup_master(); - theTLS.set(NULL); - } - else - s->cleanup_master(); - } - } -} - -void governor::print_version_info () { - if ( UsePrivateRML ) - PrintExtraVersionInfo( "RML", "private" ); - else { - PrintExtraVersionInfo( "RML", "shared" ); - theRMLServerFactory.call_with_server_info( PrintRMLVersionInfo, (void*)"" ); - } -#if __TBB_SURVIVE_THREAD_SWITCH - if( watch_stack_handler ) - PrintExtraVersionInfo( "CILK", CILKLIB_NAME ); -#endif /* __TBB_SURVIVE_THREAD_SWITCH */ -} - -#if __TBB_SURVIVE_THREAD_SWITCH -__cilk_tbb_retcode governor::stack_op_handler( __cilk_tbb_stack_op op, void* data ) { - __TBB_ASSERT(data,NULL); - generic_scheduler* s = static_cast<generic_scheduler*>(data); -#if TBB_USE_ASSERT - void* current = theTLS.get(); -#if _WIN32||_WIN64 - unsigned thread_id = GetCurrentThreadId(); -#else - unsigned thread_id = unsigned(pthread_self()); -#endif - -#endif /* TBB_USE_ASSERT */ - switch( op ) { - default: - __TBB_ASSERT( 0, "invalid op" ); - case CILK_TBB_STACK_ADOPT: { - __TBB_ASSERT( !current && s->my_cilk_state==generic_scheduler::cs_limbo || - current==s && s->my_cilk_state==generic_scheduler::cs_running, "invalid adoption" ); -#if TBB_USE_ASSERT - if( current==s ) - runtime_warning( "redundant adoption of %p by thread %x\n", s, thread_id ); - s->my_cilk_state = generic_scheduler::cs_running; -#endif /* TBB_USE_ASSERT */ - theTLS.set(s); - break; - } - case CILK_TBB_STACK_ORPHAN: { - __TBB_ASSERT( current==s && s->my_cilk_state==generic_scheduler::cs_running, "invalid orphaning" ); -#if TBB_USE_ASSERT - s->my_cilk_state = generic_scheduler::cs_limbo; -#endif /* TBB_USE_ASSERT */ - theTLS.set(NULL); - break; - } - case CILK_TBB_STACK_RELEASE: { - __TBB_ASSERT( !current && s->my_cilk_state==generic_scheduler::cs_limbo || - current==s && s->my_cilk_state==generic_scheduler::cs_running, "invalid release" ); -#if TBB_USE_ASSERT - s->my_cilk_state = generic_scheduler::cs_freed; -#endif /* TBB_USE_ASSERT */ - s->my_cilk_unwatch_thunk.routine = NULL; - auto_terminate( s ); - } - } - return 0; -} -#endif /* __TBB_SURVIVE_THREAD_SWITCH */ - -} // namespace internal - -//------------------------------------------------------------------------ -// task_scheduler_init -//------------------------------------------------------------------------ - -using namespace internal; - -/** Left out-of-line for the sake of the backward binary compatibility **/ -void task_scheduler_init::initialize( int number_of_threads ) { - initialize( number_of_threads, 0 ); -} - -void task_scheduler_init::initialize( int number_of_threads, stack_size_type thread_stack_size ) { - if( number_of_threads!=deferred ) { - __TBB_ASSERT( !my_scheduler, "task_scheduler_init already initialized" ); - __TBB_ASSERT( number_of_threads==-1 || number_of_threads>=1, - "number_of_threads for task_scheduler_init must be -1 or positive" ); - my_scheduler = governor::init_scheduler( number_of_threads, thread_stack_size ); - } else { - __TBB_ASSERT( !thread_stack_size, "deferred initialization ignores stack size setting" ); - } -} - -void task_scheduler_init::terminate() { - generic_scheduler* s = static_cast<generic_scheduler*>(my_scheduler); - my_scheduler = NULL; - __TBB_ASSERT( s, "task_scheduler_init::terminate without corresponding task_scheduler_init::initialize()"); - governor::terminate_scheduler(s); -} - -int task_scheduler_init::default_num_threads() { - return governor::default_num_threads(); -} - -} // namespace tbb diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/governor.h b/deal.II/bundled/tbb30_104oss/src/tbb/governor.h deleted file mode 100644 index 45b82a19ea..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/governor.h +++ /dev/null @@ -1,197 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef _TBB_governor_H -#define _TBB_governor_H - -#include "tbb/task_scheduler_init.h" -#if !__TBB_ARENA_PER_MASTER -#include "tbb/mutex.h" -#endif /* !__TBB_ARENA_PER_MASTER */ -#include "../rml/include/rml_tbb.h" - -#include "tbb_misc.h" // for DetectNumberOfWorkers and ThreadStackSize -#include "tls.h" - -#if __TBB_SURVIVE_THREAD_SWITCH -#include "cilk-tbb-interop.h" -#endif /* __TBB_SURVIVE_THREAD_SWITCH */ - -namespace tbb { -namespace internal { - -#if __TBB_ARENA_PER_MASTER -class market; -#else /* !__TBB_ARENA_PER_MASTER */ -class arena; -#endif /* !__TBB_ARENA_PER_MASTER */ -class generic_scheduler; -class __TBB_InitOnce; - -//------------------------------------------------------------------------ -// Class governor -//------------------------------------------------------------------------ - -#if __TBB_ARENA_PER_MASTER -//! The class handles access to the single instance of market, and to TLS to keep scheduler instances. -#else /* !__TBB_ARENA_PER_MASTER */ -//! The class handles access to the single instance of arena, and to TLS to keep scheduler instances. -#endif /* !__TBB_ARENA_PER_MASTER */ -/** It also supports automatic on-demand initialization of the TBB scheduler. - The class contains only static data members and methods.*/ -class governor { - friend class __TBB_InitOnce; -#if __TBB_ARENA_PER_MASTER - friend class market; -#else /* !__TBB_ARENA_PER_MASTER */ - friend void ITT_DoUnsafeOneTimeInitialization (); -#endif /* __TBB_ARENA_PER_MASTER */ - - //! TLS for scheduler instances associated with individual threads - static basic_tls<generic_scheduler*> theTLS; - -#if !__TBB_ARENA_PER_MASTER - //! Currently active arena - static arena* theArena; - - //! Mutex guarding creation/destruction of theArena - static mutex theArenaMutex; - - //! Caches the number of workers in the currently active arena - static unsigned NumWorkers; -#endif /* !__TBB_ARENA_PER_MASTER */ - - //! Caches the maximal level of paralellism supported by the hardware - static unsigned DefaultNumberOfThreads; - - static rml::tbb_factory theRMLServerFactory; - - static bool UsePrivateRML; - - //! Create key for thread-local storage and initialize RML. - static void acquire_resources (); - - //! Destroy the thread-local storage key and deinitialize RML. - static void release_resources (); - - static rml::tbb_server* create_rml_server ( rml::tbb_client& ); - -#if !__TBB_ARENA_PER_MASTER - //! Obtain the instance of arena to register a new master thread - /** If there is no active arena, create one. */ - static arena* obtain_arena( int number_of_threads, stack_size_type thread_stack_size ); -#endif /* !__TBB_ARENA_PER_MASTER */ - - //! The internal routine to undo automatic initialization. - /** The signature is written with void* so that the routine - can be the destructor argument to pthread_key_create. */ - static void auto_terminate(void* scheduler); - -public: - static unsigned default_num_threads () { - // No memory fence required, because at worst each invoking thread calls DetectNumberOfWorkers once. - return DefaultNumberOfThreads ? DefaultNumberOfThreads : - DefaultNumberOfThreads = DetectNumberOfWorkers(); - } - //! Processes scheduler initialization request (possibly nested) in a master thread - /** If necessary creates new instance of arena and/or local scheduler. - The auto_init argument specifies if the call is due to automatic initialization. **/ - static generic_scheduler* init_scheduler( unsigned num_threads, stack_size_type stack_size, bool auto_init = false ); - - //! Processes scheduler termination request (possibly nested) in a master thread - static void terminate_scheduler( generic_scheduler* s ); - -#if __TBB_ARENA_PER_MASTER - //! Returns number of worker threads in the currently active arena. - inline static unsigned max_number_of_workers (); - -#else /* !__TBB_ARENA_PER_MASTER */ - //! Dereference arena when a master thread stops using TBB. - /** If no more masters in the arena, terminate workers and destroy it. */ - static void finish_with_arena(); - - static unsigned max_number_of_workers() { - __TBB_ASSERT( theArena, "thread did not activate a task_scheduler_init object?" ); - return NumWorkers; - } -#endif /* !__TBB_ARENA_PER_MASTER */ - - //! Register TBB scheduler instance in thread local storage. - static void sign_on(generic_scheduler* s); - - //! Unregister TBB scheduler instance from thread local storage. - static void sign_off(generic_scheduler* s); - - //! Used to check validity of the local scheduler TLS contents. - static bool is_set ( generic_scheduler* s ) { return theTLS.get() == s; } - - //! Temporarily set TLS slot to the given scheduler - static void assume_scheduler( generic_scheduler* s ) { -#if !__TBB_ARENA_PER_MASTER - // should be called by a Master - __TBB_ASSERT( !s || !theTLS.get(), "should be called by master" ); -#endif - theTLS.set( s ); - } - - //! Obtain the thread local instance of the TBB scheduler. - /** If the scheduler has not been initialized yet, initialization is done automatically. - Note that auto-initialized scheduler instance is destroyed only when its thread terminates. **/ - static generic_scheduler* local_scheduler () { - generic_scheduler* s = theTLS.get(); - return s ? s : init_scheduler( (unsigned)task_scheduler_init::automatic, 0, true ); - } - - static generic_scheduler* local_scheduler_if_initialized () { - return theTLS.get(); - } - - //! Undo automatic initialization if necessary; call when a thread exits. - static void terminate_auto_initialized_scheduler() { - auto_terminate( theTLS.get() ); - } - - static void print_version_info (); - -#if __TBB_SURVIVE_THREAD_SWITCH - static __cilk_tbb_retcode stack_op_handler( __cilk_tbb_stack_op op, void* ); -#endif /* __TBB_SURVIVE_THREAD_SWITCH */ -}; // class governor - -} // namespace internal -} // namespace tbb - -#if __TBB_ARENA_PER_MASTER -#include "scheduler.h" - -inline unsigned tbb::internal::governor::max_number_of_workers () { - return local_scheduler()->number_of_workers_in_my_arena(); -} -#endif /* __TBB_ARENA_PER_MASTER */ - -#endif /* _TBB_governor_H */ diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/ia32-masm/atomic_support.asm b/deal.II/bundled/tbb30_104oss/src/tbb/ia32-masm/atomic_support.asm deleted file mode 100644 index 8c881dc84f..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/ia32-masm/atomic_support.asm +++ /dev/null @@ -1,196 +0,0 @@ -; Copyright 2005-2010 Intel Corporation. All Rights Reserved. -; -; This file is part of Threading Building Blocks. -; -; Threading Building Blocks is free software; you can redistribute it -; and/or modify it under the terms of the GNU General Public License -; version 2 as published by the Free Software Foundation. -; -; Threading Building Blocks is distributed in the hope that it will be -; useful, but WITHOUT ANY WARRANTY; without even the implied warranty -; of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -; GNU General Public License for more details. -; -; You should have received a copy of the GNU General Public License -; along with Threading Building Blocks; if not, write to the Free Software -; Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -; -; As a special exception, you may use this file as part of a free software -; library without restriction. Specifically, if other files instantiate -; templates or use macros or inline functions from this file, or you compile -; this file and link it with other files to produce an executable, this -; file does not by itself cause the resulting executable to be covered by -; the GNU General Public License. This exception does not however -; invalidate any other reasons why the executable file might be covered by -; the GNU General Public License. - -.686 -.model flat,c -.code - ALIGN 4 - PUBLIC c __TBB_machine_fetchadd1 -__TBB_machine_fetchadd1: - mov edx,4[esp] - mov eax,8[esp] - lock xadd [edx],al - ret -.code - ALIGN 4 - PUBLIC c __TBB_machine_fetchstore1 -__TBB_machine_fetchstore1: - mov edx,4[esp] - mov eax,8[esp] - lock xchg [edx],al - ret -.code - ALIGN 4 - PUBLIC c __TBB_machine_cmpswp1 -__TBB_machine_cmpswp1: - mov edx,4[esp] - mov ecx,8[esp] - mov eax,12[esp] - lock cmpxchg [edx],cl - ret -.code - ALIGN 4 - PUBLIC c __TBB_machine_fetchadd2 -__TBB_machine_fetchadd2: - mov edx,4[esp] - mov eax,8[esp] - lock xadd [edx],ax - ret -.code - ALIGN 4 - PUBLIC c __TBB_machine_fetchstore2 -__TBB_machine_fetchstore2: - mov edx,4[esp] - mov eax,8[esp] - lock xchg [edx],ax - ret -.code - ALIGN 4 - PUBLIC c __TBB_machine_cmpswp2 -__TBB_machine_cmpswp2: - mov edx,4[esp] - mov ecx,8[esp] - mov eax,12[esp] - lock cmpxchg [edx],cx - ret -.code - ALIGN 4 - PUBLIC c __TBB_machine_fetchadd4 -__TBB_machine_fetchadd4: - mov edx,4[esp] - mov eax,8[esp] - lock xadd [edx],eax - ret -.code - ALIGN 4 - PUBLIC c __TBB_machine_fetchstore4 -__TBB_machine_fetchstore4: - mov edx,4[esp] - mov eax,8[esp] - lock xchg [edx],eax - ret -.code - ALIGN 4 - PUBLIC c __TBB_machine_cmpswp4 -__TBB_machine_cmpswp4: - mov edx,4[esp] - mov ecx,8[esp] - mov eax,12[esp] - lock cmpxchg [edx],ecx - ret -.code - ALIGN 4 - PUBLIC c __TBB_machine_fetchadd8 -__TBB_machine_fetchadd8: - push ebx - push edi - mov edi,12[esp] - mov eax,[edi] - mov edx,4[edi] -__TBB_machine_fetchadd8_loop: - mov ebx,16[esp] - mov ecx,20[esp] - add ebx,eax - adc ecx,edx - lock cmpxchg8b qword ptr [edi] - jnz __TBB_machine_fetchadd8_loop - pop edi - pop ebx - ret -.code - ALIGN 4 - PUBLIC c __TBB_machine_fetchstore8 -__TBB_machine_fetchstore8: - push ebx - push edi - mov edi,12[esp] - mov ebx,16[esp] - mov ecx,20[esp] - mov eax,[edi] - mov edx,4[edi] -__TBB_machine_fetchstore8_loop: - lock cmpxchg8b qword ptr [edi] - jnz __TBB_machine_fetchstore8_loop - pop edi - pop ebx - ret -.code - ALIGN 4 - PUBLIC c __TBB_machine_cmpswp8 -__TBB_machine_cmpswp8: - push ebx - push edi - mov edi,12[esp] - mov ebx,16[esp] - mov ecx,20[esp] - mov eax,24[esp] - mov edx,28[esp] - lock cmpxchg8b qword ptr [edi] - pop edi - pop ebx - ret -.code - ALIGN 4 - PUBLIC c __TBB_machine_load8 -__TBB_machine_Load8: - ; If location is on stack, compiler may have failed to align it correctly, so we do dynamic check. - mov ecx,4[esp] - test ecx,7 - jne load_slow - ; Load within a cache line - sub esp,12 - fild qword ptr [ecx] - fistp qword ptr [esp] - mov eax,[esp] - mov edx,4[esp] - add esp,12 - ret -load_slow: - ; Load is misaligned. Use cmpxchg8b. - push ebx - push edi - mov edi,ecx - xor eax,eax - xor ebx,ebx - xor ecx,ecx - xor edx,edx - lock cmpxchg8b qword ptr [edi] - pop edi - pop ebx - ret -EXTRN __TBB_machine_store8_slow:PROC -.code - ALIGN 4 - PUBLIC c __TBB_machine_store8 -__TBB_machine_Store8: - ; If location is on stack, compiler may have failed to align it correctly, so we do dynamic check. - mov ecx,4[esp] - test ecx,7 - jne __TBB_machine_store8_slow ;; tail call to tbb_misc.cpp - fild qword ptr 8[esp] - fistp qword ptr [ecx] - ret -end diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/ia32-masm/lock_byte.asm b/deal.II/bundled/tbb30_104oss/src/tbb/ia32-masm/lock_byte.asm deleted file mode 100644 index be1552fcc8..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/ia32-masm/lock_byte.asm +++ /dev/null @@ -1,46 +0,0 @@ -; Copyright 2005-2010 Intel Corporation. All Rights Reserved. -; -; This file is part of Threading Building Blocks. -; -; Threading Building Blocks is free software; you can redistribute it -; and/or modify it under the terms of the GNU General Public License -; version 2 as published by the Free Software Foundation. -; -; Threading Building Blocks is distributed in the hope that it will be -; useful, but WITHOUT ANY WARRANTY; without even the implied warranty -; of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -; GNU General Public License for more details. -; -; You should have received a copy of the GNU General Public License -; along with Threading Building Blocks; if not, write to the Free Software -; Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -; -; As a special exception, you may use this file as part of a free software -; library without restriction. Specifically, if other files instantiate -; templates or use macros or inline functions from this file, or you compile -; this file and link it with other files to produce an executable, this -; file does not by itself cause the resulting executable to be covered by -; the GNU General Public License. This exception does not however -; invalidate any other reasons why the executable file might be covered by -; the GNU General Public License. - -; DO NOT EDIT - AUTOMATICALLY GENERATED FROM .s FILE -.686 -.model flat,c -.code - ALIGN 4 - PUBLIC c __TBB_machine_trylockbyte -__TBB_machine_trylockbyte: - mov edx,4[esp] - mov al,[edx] - mov cl,1 - test al,1 - jnz __TBB_machine_trylockbyte_contended - lock cmpxchg [edx],cl - jne __TBB_machine_trylockbyte_contended - mov eax,1 - ret -__TBB_machine_trylockbyte_contended: - xor eax,eax - ret -end diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/ia64-gas/atomic_support.s b/deal.II/bundled/tbb30_104oss/src/tbb/ia64-gas/atomic_support.s deleted file mode 100644 index f7c6835343..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/ia64-gas/atomic_support.s +++ /dev/null @@ -1,678 +0,0 @@ -// Copyright 2005-2010 Intel Corporation. All Rights Reserved. -// -// This file is part of Threading Building Blocks. -// -// Threading Building Blocks is free software; you can redistribute it -// and/or modify it under the terms of the GNU General Public License -// version 2 as published by the Free Software Foundation. -// -// Threading Building Blocks is distributed in the hope that it will be -// useful, but WITHOUT ANY WARRANTY; without even the implied warranty -// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with Threading Building Blocks; if not, write to the Free Software -// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -// -// As a special exception, you may use this file as part of a free software -// library without restriction. Specifically, if other files instantiate -// templates or use macros or inline functions from this file, or you compile -// this file and link it with other files to produce an executable, this -// file does not by itself cause the resulting executable to be covered by -// the GNU General Public License. This exception does not however -// invalidate any other reasons why the executable file might be covered by -// the GNU General Public License. - -// DO NOT EDIT - AUTOMATICALLY GENERATED FROM tools/generate_atomic/ipf_generate.sh -# 1 "<stdin>" -# 1 "<built-in>" -# 1 "<command line>" -# 1 "<stdin>" - - - - - - .section .text - .align 16 - - - .proc __TBB_machine_fetchadd1__TBB_full_fence# - .global __TBB_machine_fetchadd1__TBB_full_fence# -__TBB_machine_fetchadd1__TBB_full_fence: -{ - mf - br __TBB_machine_fetchadd1acquire -} - .endp __TBB_machine_fetchadd1__TBB_full_fence# - - .proc __TBB_machine_fetchadd1acquire# - .global __TBB_machine_fetchadd1acquire# -__TBB_machine_fetchadd1acquire: - - - - - - - - ld1 r9=[r32] -;; -Retry_1acquire: - mov ar.ccv=r9 - mov r8=r9; - add r10=r9,r33 -;; - cmpxchg1.acq r9=[r32],r10,ar.ccv -;; - cmp.ne p7,p0=r8,r9 - (p7) br.cond.dpnt Retry_1acquire - br.ret.sptk.many b0 -# 49 "<stdin>" - .endp __TBB_machine_fetchadd1acquire# -# 62 "<stdin>" - .section .text - .align 16 - .proc __TBB_machine_fetchstore1__TBB_full_fence# - .global __TBB_machine_fetchstore1__TBB_full_fence# -__TBB_machine_fetchstore1__TBB_full_fence: - mf -;; - xchg1 r8=[r32],r33 - br.ret.sptk.many b0 - .endp __TBB_machine_fetchstore1__TBB_full_fence# - - - .proc __TBB_machine_fetchstore1acquire# - .global __TBB_machine_fetchstore1acquire# -__TBB_machine_fetchstore1acquire: - xchg1 r8=[r32],r33 - br.ret.sptk.many b0 - .endp __TBB_machine_fetchstore1acquire# -# 88 "<stdin>" - .section .text - .align 16 - - - .proc __TBB_machine_cmpswp1__TBB_full_fence# - .global __TBB_machine_cmpswp1__TBB_full_fence# -__TBB_machine_cmpswp1__TBB_full_fence: -{ - mf - br __TBB_machine_cmpswp1acquire -} - .endp __TBB_machine_cmpswp1__TBB_full_fence# - - .proc __TBB_machine_cmpswp1acquire# - .global __TBB_machine_cmpswp1acquire# -__TBB_machine_cmpswp1acquire: - - zxt1 r34=r34 -;; - - mov ar.ccv=r34 -;; - cmpxchg1.acq r8=[r32],r33,ar.ccv - br.ret.sptk.many b0 - .endp __TBB_machine_cmpswp1acquire# -// DO NOT EDIT - AUTOMATICALLY GENERATED FROM tools/generate_atomic/ipf_generate.sh -# 1 "<stdin>" -# 1 "<built-in>" -# 1 "<command line>" -# 1 "<stdin>" - - - - - - .section .text - .align 16 - - - .proc __TBB_machine_fetchadd2__TBB_full_fence# - .global __TBB_machine_fetchadd2__TBB_full_fence# -__TBB_machine_fetchadd2__TBB_full_fence: -{ - mf - br __TBB_machine_fetchadd2acquire -} - .endp __TBB_machine_fetchadd2__TBB_full_fence# - - .proc __TBB_machine_fetchadd2acquire# - .global __TBB_machine_fetchadd2acquire# -__TBB_machine_fetchadd2acquire: - - - - - - - - ld2 r9=[r32] -;; -Retry_2acquire: - mov ar.ccv=r9 - mov r8=r9; - add r10=r9,r33 -;; - cmpxchg2.acq r9=[r32],r10,ar.ccv -;; - cmp.ne p7,p0=r8,r9 - (p7) br.cond.dpnt Retry_2acquire - br.ret.sptk.many b0 -# 49 "<stdin>" - .endp __TBB_machine_fetchadd2acquire# -# 62 "<stdin>" - .section .text - .align 16 - .proc __TBB_machine_fetchstore2__TBB_full_fence# - .global __TBB_machine_fetchstore2__TBB_full_fence# -__TBB_machine_fetchstore2__TBB_full_fence: - mf -;; - xchg2 r8=[r32],r33 - br.ret.sptk.many b0 - .endp __TBB_machine_fetchstore2__TBB_full_fence# - - - .proc __TBB_machine_fetchstore2acquire# - .global __TBB_machine_fetchstore2acquire# -__TBB_machine_fetchstore2acquire: - xchg2 r8=[r32],r33 - br.ret.sptk.many b0 - .endp __TBB_machine_fetchstore2acquire# -# 88 "<stdin>" - .section .text - .align 16 - - - .proc __TBB_machine_cmpswp2__TBB_full_fence# - .global __TBB_machine_cmpswp2__TBB_full_fence# -__TBB_machine_cmpswp2__TBB_full_fence: -{ - mf - br __TBB_machine_cmpswp2acquire -} - .endp __TBB_machine_cmpswp2__TBB_full_fence# - - .proc __TBB_machine_cmpswp2acquire# - .global __TBB_machine_cmpswp2acquire# -__TBB_machine_cmpswp2acquire: - - zxt2 r34=r34 -;; - - mov ar.ccv=r34 -;; - cmpxchg2.acq r8=[r32],r33,ar.ccv - br.ret.sptk.many b0 - .endp __TBB_machine_cmpswp2acquire# -// DO NOT EDIT - AUTOMATICALLY GENERATED FROM tools/generate_atomic/ipf_generate.sh -# 1 "<stdin>" -# 1 "<built-in>" -# 1 "<command line>" -# 1 "<stdin>" - - - - - - .section .text - .align 16 - - - .proc __TBB_machine_fetchadd4__TBB_full_fence# - .global __TBB_machine_fetchadd4__TBB_full_fence# -__TBB_machine_fetchadd4__TBB_full_fence: -{ - mf - br __TBB_machine_fetchadd4acquire -} - .endp __TBB_machine_fetchadd4__TBB_full_fence# - - .proc __TBB_machine_fetchadd4acquire# - .global __TBB_machine_fetchadd4acquire# -__TBB_machine_fetchadd4acquire: - - cmp.eq p6,p0=1,r33 - cmp.eq p8,p0=-1,r33 - (p6) br.cond.dptk Inc_4acquire - (p8) br.cond.dpnt Dec_4acquire -;; - - ld4 r9=[r32] -;; -Retry_4acquire: - mov ar.ccv=r9 - mov r8=r9; - add r10=r9,r33 -;; - cmpxchg4.acq r9=[r32],r10,ar.ccv -;; - cmp.ne p7,p0=r8,r9 - (p7) br.cond.dpnt Retry_4acquire - br.ret.sptk.many b0 - -Inc_4acquire: - fetchadd4.acq r8=[r32],1 - br.ret.sptk.many b0 -Dec_4acquire: - fetchadd4.acq r8=[r32],-1 - br.ret.sptk.many b0 - - .endp __TBB_machine_fetchadd4acquire# -# 62 "<stdin>" - .section .text - .align 16 - .proc __TBB_machine_fetchstore4__TBB_full_fence# - .global __TBB_machine_fetchstore4__TBB_full_fence# -__TBB_machine_fetchstore4__TBB_full_fence: - mf -;; - xchg4 r8=[r32],r33 - br.ret.sptk.many b0 - .endp __TBB_machine_fetchstore4__TBB_full_fence# - - - .proc __TBB_machine_fetchstore4acquire# - .global __TBB_machine_fetchstore4acquire# -__TBB_machine_fetchstore4acquire: - xchg4 r8=[r32],r33 - br.ret.sptk.many b0 - .endp __TBB_machine_fetchstore4acquire# -# 88 "<stdin>" - .section .text - .align 16 - - - .proc __TBB_machine_cmpswp4__TBB_full_fence# - .global __TBB_machine_cmpswp4__TBB_full_fence# -__TBB_machine_cmpswp4__TBB_full_fence: -{ - mf - br __TBB_machine_cmpswp4acquire -} - .endp __TBB_machine_cmpswp4__TBB_full_fence# - - .proc __TBB_machine_cmpswp4acquire# - .global __TBB_machine_cmpswp4acquire# -__TBB_machine_cmpswp4acquire: - - zxt4 r34=r34 -;; - - mov ar.ccv=r34 -;; - cmpxchg4.acq r8=[r32],r33,ar.ccv - br.ret.sptk.many b0 - .endp __TBB_machine_cmpswp4acquire# -// DO NOT EDIT - AUTOMATICALLY GENERATED FROM tools/generate_atomic/ipf_generate.sh -# 1 "<stdin>" -# 1 "<built-in>" -# 1 "<command line>" -# 1 "<stdin>" - - - - - - .section .text - .align 16 - - - .proc __TBB_machine_fetchadd8__TBB_full_fence# - .global __TBB_machine_fetchadd8__TBB_full_fence# -__TBB_machine_fetchadd8__TBB_full_fence: -{ - mf - br __TBB_machine_fetchadd8acquire -} - .endp __TBB_machine_fetchadd8__TBB_full_fence# - - .proc __TBB_machine_fetchadd8acquire# - .global __TBB_machine_fetchadd8acquire# -__TBB_machine_fetchadd8acquire: - - cmp.eq p6,p0=1,r33 - cmp.eq p8,p0=-1,r33 - (p6) br.cond.dptk Inc_8acquire - (p8) br.cond.dpnt Dec_8acquire -;; - - ld8 r9=[r32] -;; -Retry_8acquire: - mov ar.ccv=r9 - mov r8=r9; - add r10=r9,r33 -;; - cmpxchg8.acq r9=[r32],r10,ar.ccv -;; - cmp.ne p7,p0=r8,r9 - (p7) br.cond.dpnt Retry_8acquire - br.ret.sptk.many b0 - -Inc_8acquire: - fetchadd8.acq r8=[r32],1 - br.ret.sptk.many b0 -Dec_8acquire: - fetchadd8.acq r8=[r32],-1 - br.ret.sptk.many b0 - - .endp __TBB_machine_fetchadd8acquire# -# 62 "<stdin>" - .section .text - .align 16 - .proc __TBB_machine_fetchstore8__TBB_full_fence# - .global __TBB_machine_fetchstore8__TBB_full_fence# -__TBB_machine_fetchstore8__TBB_full_fence: - mf -;; - xchg8 r8=[r32],r33 - br.ret.sptk.many b0 - .endp __TBB_machine_fetchstore8__TBB_full_fence# - - - .proc __TBB_machine_fetchstore8acquire# - .global __TBB_machine_fetchstore8acquire# -__TBB_machine_fetchstore8acquire: - xchg8 r8=[r32],r33 - br.ret.sptk.many b0 - .endp __TBB_machine_fetchstore8acquire# -# 88 "<stdin>" - .section .text - .align 16 - - - .proc __TBB_machine_cmpswp8__TBB_full_fence# - .global __TBB_machine_cmpswp8__TBB_full_fence# -__TBB_machine_cmpswp8__TBB_full_fence: -{ - mf - br __TBB_machine_cmpswp8acquire -} - .endp __TBB_machine_cmpswp8__TBB_full_fence# - - .proc __TBB_machine_cmpswp8acquire# - .global __TBB_machine_cmpswp8acquire# -__TBB_machine_cmpswp8acquire: - - - - - mov ar.ccv=r34 -;; - cmpxchg8.acq r8=[r32],r33,ar.ccv - br.ret.sptk.many b0 - .endp __TBB_machine_cmpswp8acquire# -// DO NOT EDIT - AUTOMATICALLY GENERATED FROM tools/generate_atomic/ipf_generate.sh -# 1 "<stdin>" -# 1 "<built-in>" -# 1 "<command line>" -# 1 "<stdin>" - - - - - - .section .text - .align 16 -# 19 "<stdin>" - .proc __TBB_machine_fetchadd1release# - .global __TBB_machine_fetchadd1release# -__TBB_machine_fetchadd1release: - - - - - - - - ld1 r9=[r32] -;; -Retry_1release: - mov ar.ccv=r9 - mov r8=r9; - add r10=r9,r33 -;; - cmpxchg1.rel r9=[r32],r10,ar.ccv -;; - cmp.ne p7,p0=r8,r9 - (p7) br.cond.dpnt Retry_1release - br.ret.sptk.many b0 -# 49 "<stdin>" - .endp __TBB_machine_fetchadd1release# -# 62 "<stdin>" - .section .text - .align 16 - .proc __TBB_machine_fetchstore1release# - .global __TBB_machine_fetchstore1release# -__TBB_machine_fetchstore1release: - mf -;; - xchg1 r8=[r32],r33 - br.ret.sptk.many b0 - .endp __TBB_machine_fetchstore1release# -# 88 "<stdin>" - .section .text - .align 16 -# 101 "<stdin>" - .proc __TBB_machine_cmpswp1release# - .global __TBB_machine_cmpswp1release# -__TBB_machine_cmpswp1release: - - zxt1 r34=r34 -;; - - mov ar.ccv=r34 -;; - cmpxchg1.rel r8=[r32],r33,ar.ccv - br.ret.sptk.many b0 - .endp __TBB_machine_cmpswp1release# -// DO NOT EDIT - AUTOMATICALLY GENERATED FROM tools/generate_atomic/ipf_generate.sh -# 1 "<stdin>" -# 1 "<built-in>" -# 1 "<command line>" -# 1 "<stdin>" - - - - - - .section .text - .align 16 -# 19 "<stdin>" - .proc __TBB_machine_fetchadd2release# - .global __TBB_machine_fetchadd2release# -__TBB_machine_fetchadd2release: - - - - - - - - ld2 r9=[r32] -;; -Retry_2release: - mov ar.ccv=r9 - mov r8=r9; - add r10=r9,r33 -;; - cmpxchg2.rel r9=[r32],r10,ar.ccv -;; - cmp.ne p7,p0=r8,r9 - (p7) br.cond.dpnt Retry_2release - br.ret.sptk.many b0 -# 49 "<stdin>" - .endp __TBB_machine_fetchadd2release# -# 62 "<stdin>" - .section .text - .align 16 - .proc __TBB_machine_fetchstore2release# - .global __TBB_machine_fetchstore2release# -__TBB_machine_fetchstore2release: - mf -;; - xchg2 r8=[r32],r33 - br.ret.sptk.many b0 - .endp __TBB_machine_fetchstore2release# -# 88 "<stdin>" - .section .text - .align 16 -# 101 "<stdin>" - .proc __TBB_machine_cmpswp2release# - .global __TBB_machine_cmpswp2release# -__TBB_machine_cmpswp2release: - - zxt2 r34=r34 -;; - - mov ar.ccv=r34 -;; - cmpxchg2.rel r8=[r32],r33,ar.ccv - br.ret.sptk.many b0 - .endp __TBB_machine_cmpswp2release# -// DO NOT EDIT - AUTOMATICALLY GENERATED FROM tools/generate_atomic/ipf_generate.sh -# 1 "<stdin>" -# 1 "<built-in>" -# 1 "<command line>" -# 1 "<stdin>" - - - - - - .section .text - .align 16 -# 19 "<stdin>" - .proc __TBB_machine_fetchadd4release# - .global __TBB_machine_fetchadd4release# -__TBB_machine_fetchadd4release: - - cmp.eq p6,p0=1,r33 - cmp.eq p8,p0=-1,r33 - (p6) br.cond.dptk Inc_4release - (p8) br.cond.dpnt Dec_4release -;; - - ld4 r9=[r32] -;; -Retry_4release: - mov ar.ccv=r9 - mov r8=r9; - add r10=r9,r33 -;; - cmpxchg4.rel r9=[r32],r10,ar.ccv -;; - cmp.ne p7,p0=r8,r9 - (p7) br.cond.dpnt Retry_4release - br.ret.sptk.many b0 - -Inc_4release: - fetchadd4.rel r8=[r32],1 - br.ret.sptk.many b0 -Dec_4release: - fetchadd4.rel r8=[r32],-1 - br.ret.sptk.many b0 - - .endp __TBB_machine_fetchadd4release# -# 62 "<stdin>" - .section .text - .align 16 - .proc __TBB_machine_fetchstore4release# - .global __TBB_machine_fetchstore4release# -__TBB_machine_fetchstore4release: - mf -;; - xchg4 r8=[r32],r33 - br.ret.sptk.many b0 - .endp __TBB_machine_fetchstore4release# -# 88 "<stdin>" - .section .text - .align 16 -# 101 "<stdin>" - .proc __TBB_machine_cmpswp4release# - .global __TBB_machine_cmpswp4release# -__TBB_machine_cmpswp4release: - - zxt4 r34=r34 -;; - - mov ar.ccv=r34 -;; - cmpxchg4.rel r8=[r32],r33,ar.ccv - br.ret.sptk.many b0 - .endp __TBB_machine_cmpswp4release# -// DO NOT EDIT - AUTOMATICALLY GENERATED FROM tools/generate_atomic/ipf_generate.sh -# 1 "<stdin>" -# 1 "<built-in>" -# 1 "<command line>" -# 1 "<stdin>" - - - - - - .section .text - .align 16 -# 19 "<stdin>" - .proc __TBB_machine_fetchadd8release# - .global __TBB_machine_fetchadd8release# -__TBB_machine_fetchadd8release: - - cmp.eq p6,p0=1,r33 - cmp.eq p8,p0=-1,r33 - (p6) br.cond.dptk Inc_8release - (p8) br.cond.dpnt Dec_8release -;; - - ld8 r9=[r32] -;; -Retry_8release: - mov ar.ccv=r9 - mov r8=r9; - add r10=r9,r33 -;; - cmpxchg8.rel r9=[r32],r10,ar.ccv -;; - cmp.ne p7,p0=r8,r9 - (p7) br.cond.dpnt Retry_8release - br.ret.sptk.many b0 - -Inc_8release: - fetchadd8.rel r8=[r32],1 - br.ret.sptk.many b0 -Dec_8release: - fetchadd8.rel r8=[r32],-1 - br.ret.sptk.many b0 - - .endp __TBB_machine_fetchadd8release# -# 62 "<stdin>" - .section .text - .align 16 - .proc __TBB_machine_fetchstore8release# - .global __TBB_machine_fetchstore8release# -__TBB_machine_fetchstore8release: - mf -;; - xchg8 r8=[r32],r33 - br.ret.sptk.many b0 - .endp __TBB_machine_fetchstore8release# -# 88 "<stdin>" - .section .text - .align 16 -# 101 "<stdin>" - .proc __TBB_machine_cmpswp8release# - .global __TBB_machine_cmpswp8release# -__TBB_machine_cmpswp8release: - - - - - mov ar.ccv=r34 -;; - cmpxchg8.rel r8=[r32],r33,ar.ccv - br.ret.sptk.many b0 - .endp __TBB_machine_cmpswp8release# diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/ia64-gas/ia64_misc.s b/deal.II/bundled/tbb30_104oss/src/tbb/ia64-gas/ia64_misc.s deleted file mode 100644 index d4233d2945..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/ia64-gas/ia64_misc.s +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2005-2010 Intel Corporation. All Rights Reserved. -// -// This file is part of Threading Building Blocks. -// -// Threading Building Blocks is free software; you can redistribute it -// and/or modify it under the terms of the GNU General Public License -// version 2 as published by the Free Software Foundation. -// -// Threading Building Blocks is distributed in the hope that it will be -// useful, but WITHOUT ANY WARRANTY; without even the implied warranty -// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with Threading Building Blocks; if not, write to the Free Software -// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -// -// As a special exception, you may use this file as part of a free software -// library without restriction. Specifically, if other files instantiate -// templates or use macros or inline functions from this file, or you compile -// this file and link it with other files to produce an executable, this -// file does not by itself cause the resulting executable to be covered by -// the GNU General Public License. This exception does not however -// invalidate any other reasons why the executable file might be covered by -// the GNU General Public License. - - // RSE backing store pointer retrieval - .section .text - .align 16 - .proc __TBB_get_bsp# - .global __TBB_get_bsp# -__TBB_get_bsp: - mov r8=ar.bsp - br.ret.sptk.many b0 - .endp __TBB_get_bsp# diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/ia64-gas/lock_byte.s b/deal.II/bundled/tbb30_104oss/src/tbb/ia64-gas/lock_byte.s deleted file mode 100644 index 932bd378ed..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/ia64-gas/lock_byte.s +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2005-2010 Intel Corporation. All Rights Reserved. -// -// This file is part of Threading Building Blocks. -// -// Threading Building Blocks is free software; you can redistribute it -// and/or modify it under the terms of the GNU General Public License -// version 2 as published by the Free Software Foundation. -// -// Threading Building Blocks is distributed in the hope that it will be -// useful, but WITHOUT ANY WARRANTY; without even the implied warranty -// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with Threading Building Blocks; if not, write to the Free Software -// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -// -// As a special exception, you may use this file as part of a free software -// library without restriction. Specifically, if other files instantiate -// templates or use macros or inline functions from this file, or you compile -// this file and link it with other files to produce an executable, this -// file does not by itself cause the resulting executable to be covered by -// the GNU General Public License. This exception does not however -// invalidate any other reasons why the executable file might be covered by -// the GNU General Public License. - - // Support for class TinyLock - .section .text - .align 16 - // unsigned int __TBB_machine_trylockbyte( byte& flag ); - // r32 = address of flag - .proc __TBB_machine_trylockbyte# - .global __TBB_machine_trylockbyte# -ADDRESS_OF_FLAG=r32 -RETCODE=r8 -FLAG=r9 -BUSY=r10 -SCRATCH=r11 -__TBB_machine_trylockbyte: - ld1.acq FLAG=[ADDRESS_OF_FLAG] - mov BUSY=1 - mov RETCODE=0 -;; - cmp.ne p6,p0=0,FLAG - mov ar.ccv=r0 -(p6) br.ret.sptk.many b0 -;; - cmpxchg1.acq SCRATCH=[ADDRESS_OF_FLAG],BUSY,ar.ccv // Try to acquire lock -;; - cmp.eq p6,p0=0,SCRATCH -;; -(p6) mov RETCODE=1 - br.ret.sptk.many b0 - .endp __TBB_machine_trylockbyte# diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/ia64-gas/log2.s b/deal.II/bundled/tbb30_104oss/src/tbb/ia64-gas/log2.s deleted file mode 100644 index ed07b988be..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/ia64-gas/log2.s +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2005-2010 Intel Corporation. All Rights Reserved. -// -// This file is part of Threading Building Blocks. -// -// Threading Building Blocks is free software; you can redistribute it -// and/or modify it under the terms of the GNU General Public License -// version 2 as published by the Free Software Foundation. -// -// Threading Building Blocks is distributed in the hope that it will be -// useful, but WITHOUT ANY WARRANTY; without even the implied warranty -// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with Threading Building Blocks; if not, write to the Free Software -// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -// -// As a special exception, you may use this file as part of a free software -// library without restriction. Specifically, if other files instantiate -// templates or use macros or inline functions from this file, or you compile -// this file and link it with other files to produce an executable, this -// file does not by itself cause the resulting executable to be covered by -// the GNU General Public License. This exception does not however -// invalidate any other reasons why the executable file might be covered by -// the GNU General Public License. - - // Support for class ConcurrentVector - .section .text - .align 16 - // unsigned long __TBB_machine_lg( unsigned long x ); - // r32 = x - .proc __TBB_machine_lg# - .global __TBB_machine_lg# -__TBB_machine_lg: - shr r16=r32,1 // .x -;; - shr r17=r32,2 // ..x - or r32=r32,r16 // xx -;; - shr r16=r32,3 // ...xx - or r32=r32,r17 // xxx -;; - shr r17=r32,5 // .....xxx - or r32=r32,r16 // xxxxx -;; - shr r16=r32,8 // ........xxxxx - or r32=r32,r17 // xxxxxxxx -;; - shr r17=r32,13 - or r32=r32,r16 // 13x -;; - shr r16=r32,21 - or r32=r32,r17 // 21x -;; - shr r17=r32,34 - or r32=r32,r16 // 34x -;; - shr r16=r32,55 - or r32=r32,r17 // 55x -;; - or r32=r32,r16 // 64x -;; - popcnt r8=r32 -;; - add r8=-1,r8 - br.ret.sptk.many b0 - .endp __TBB_machine_lg# diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/ia64-gas/pause.s b/deal.II/bundled/tbb30_104oss/src/tbb/ia64-gas/pause.s deleted file mode 100644 index 45c2bb7ba8..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/ia64-gas/pause.s +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2005-2010 Intel Corporation. All Rights Reserved. -// -// This file is part of Threading Building Blocks. -// -// Threading Building Blocks is free software; you can redistribute it -// and/or modify it under the terms of the GNU General Public License -// version 2 as published by the Free Software Foundation. -// -// Threading Building Blocks is distributed in the hope that it will be -// useful, but WITHOUT ANY WARRANTY; without even the implied warranty -// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with Threading Building Blocks; if not, write to the Free Software -// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -// -// As a special exception, you may use this file as part of a free software -// library without restriction. Specifically, if other files instantiate -// templates or use macros or inline functions from this file, or you compile -// this file and link it with other files to produce an executable, this -// file does not by itself cause the resulting executable to be covered by -// the GNU General Public License. This exception does not however -// invalidate any other reasons why the executable file might be covered by -// the GNU General Public License. - - .section .text - .align 16 - // void __TBB_machine_pause( long count ); - // r32 = count - .proc __TBB_machine_pause# - .global __TBB_machine_pause# -count = r32 -__TBB_machine_pause: - hint.m 0 - add count=-1,count -;; - cmp.eq p6,p7=0,count -(p7) br.cond.dpnt __TBB_machine_pause -(p6) br.ret.sptk.many b0 - .endp __TBB_machine_pause# diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/ibm_aix51/atomic_support.c b/deal.II/bundled/tbb30_104oss/src/tbb/ibm_aix51/atomic_support.c deleted file mode 100644 index b72fe7a47d..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/ibm_aix51/atomic_support.c +++ /dev/null @@ -1,55 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#include <stdint.h> -#include <sys/atomic_op.h> - -/* This file must be compiled with gcc. The IBM compiler doesn't seem to - support inline assembly statements (October 2007). */ - -#ifdef __GNUC__ - -int32_t __TBB_machine_cas_32 (volatile void* ptr, int32_t value, int32_t comparand) { - __asm__ __volatile__ ("sync\n"); /* memory release operation */ - compare_and_swap ((atomic_p) ptr, &comparand, value); - __asm__ __volatile__ ("sync\n"); /* memory acquire operation */ - return comparand; -} - -int64_t __TBB_machine_cas_64 (volatile void* ptr, int64_t value, int64_t comparand) { - __asm__ __volatile__ ("sync\n"); /* memory release operation */ - compare_and_swaplp ((atomic_l) ptr, &comparand, value); - __asm__ __volatile__ ("sync\n"); /* memory acquire operation */ - return comparand; -} - -void __TBB_machine_flush () { - __asm__ __volatile__ ("sync\n"); -} - -#endif /* __GNUC__ */ diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/index.html b/deal.II/bundled/tbb30_104oss/src/tbb/index.html deleted file mode 100644 index 7ac305c1a3..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/index.html +++ /dev/null @@ -1,32 +0,0 @@ -<HTML> -<BODY> - -<H2>Overview</H2> -This directory contains the source code of the TBB core components. - -<H2>Directories</H2> -<DL> -<DT><A HREF="tools_api">tools_api</A> -<DD>Source code of the interface components provided by the Intel® Parallel Studio tools. -<DT><A HREF="intel64-masm">intel64-masm</A> -<DD>Assembly code for the Intel® 64 architecture. -<DT><A HREF="ia32-masm">ia32-masm</A> -<DD>Assembly code for IA32 architecture. -<DT><A HREF="ia64-gas">ia64-gas</A> -<DD>Assembly code for IA64 architecture. -<DT><A HREF="ibm_aix51">ibm_aix51</A> -<DD>Assembly code for AIX 5.1 port. -</DL> - -<HR> -<A HREF="../index.html">Up to parent directory</A> -<p></p> -Copyright © 2005-2010 Intel Corporation. All Rights Reserved. -<p></p> -Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are -registered trademarks or trademarks of Intel Corporation or its -subsidiaries in the United States and other countries. -<p></p> -* Other names and brands may be claimed as the property of others. -</BODY> -</HTML> diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/intel64-masm/atomic_support.asm b/deal.II/bundled/tbb30_104oss/src/tbb/intel64-masm/atomic_support.asm deleted file mode 100644 index 0431221482..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/intel64-masm/atomic_support.asm +++ /dev/null @@ -1,80 +0,0 @@ -; Copyright 2005-2010 Intel Corporation. All Rights Reserved. -; -; This file is part of Threading Building Blocks. -; -; Threading Building Blocks is free software; you can redistribute it -; and/or modify it under the terms of the GNU General Public License -; version 2 as published by the Free Software Foundation. -; -; Threading Building Blocks is distributed in the hope that it will be -; useful, but WITHOUT ANY WARRANTY; without even the implied warranty -; of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -; GNU General Public License for more details. -; -; You should have received a copy of the GNU General Public License -; along with Threading Building Blocks; if not, write to the Free Software -; Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -; -; As a special exception, you may use this file as part of a free software -; library without restriction. Specifically, if other files instantiate -; templates or use macros or inline functions from this file, or you compile -; this file and link it with other files to produce an executable, this -; file does not by itself cause the resulting executable to be covered by -; the GNU General Public License. This exception does not however -; invalidate any other reasons why the executable file might be covered by -; the GNU General Public License. - -; DO NOT EDIT - AUTOMATICALLY GENERATED FROM .s FILE -.code - ALIGN 8 - PUBLIC __TBB_machine_fetchadd1 -__TBB_machine_fetchadd1: - mov rax,rdx - lock xadd [rcx],al - ret -.code - ALIGN 8 - PUBLIC __TBB_machine_fetchstore1 -__TBB_machine_fetchstore1: - mov rax,rdx - lock xchg [rcx],al - ret -.code - ALIGN 8 - PUBLIC __TBB_machine_cmpswp1 -__TBB_machine_cmpswp1: - mov rax,r8 - lock cmpxchg [rcx],dl - ret -.code - ALIGN 8 - PUBLIC __TBB_machine_fetchadd2 -__TBB_machine_fetchadd2: - mov rax,rdx - lock xadd [rcx],ax - ret -.code - ALIGN 8 - PUBLIC __TBB_machine_fetchstore2 -__TBB_machine_fetchstore2: - mov rax,rdx - lock xchg [rcx],ax - ret -.code - ALIGN 8 - PUBLIC __TBB_machine_cmpswp2 -__TBB_machine_cmpswp2: - mov rax,r8 - lock cmpxchg [rcx],dx - ret -.code - ALIGN 8 - PUBLIC __TBB_machine_pause -__TBB_machine_pause: -L1: - dw 090f3H; pause - add ecx,-1 - jne L1 - ret -end - diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/intrusive_list.h b/deal.II/bundled/tbb30_104oss/src/tbb/intrusive_list.h deleted file mode 100644 index ed1bf1d83c..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/intrusive_list.h +++ /dev/null @@ -1,255 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef _TBB_intrusive_list_H -#define _TBB_intrusive_list_H - -#include "tbb/tbb_stddef.h" - -#if __TBB_ARENA_PER_MASTER - -namespace tbb { -namespace internal { - -//! Data structure to be inherited by the types that can form intrusive lists. -/** Intrusive list is formed by means of the member_intrusive_list<T> template class. - Note that type T must derive from intrusive_list_node either publicly or - declare instantiation member_intrusive_list<T> as a friend. - This class implements a limited subset of std::list interface. **/ -struct intrusive_list_node { - intrusive_list_node *my_prev_node, - *my_next_node; -#if TBB_USE_ASSERT - intrusive_list_node () { my_prev_node = my_next_node = this; } -#endif /* TBB_USE_ASSERT */ -}; - -//! List of element of type T, where T is derived from intrusive_list_node -/** The class is not thread safe. **/ -template <class List, class T> -class intrusive_list_base { - //! Pointer to the head node - intrusive_list_node my_head; - - //! Number of list elements - size_t my_size; - - static intrusive_list_node& node ( T& item ) { return List::node(item); } - - static T& item ( intrusive_list_node* node ) { return List::item(node); } - - template<class Iterator> - class iterator_impl { - Iterator& self () { return *static_cast<Iterator*>(this); } - - //! Pointer to the head of the list being iterated - intrusive_list_node *my_list_head; - - //! Node the iterator points to at the moment - intrusive_list_node *my_pos; - - protected: - iterator_impl ( intrusive_list_node* head, intrusive_list_node* pos ) - : my_list_head(head), my_pos(pos) - {} - - T& item () const { - //return *reinterpret_cast<T*>((char*)my_pos - ((ptrdiff_t)&(reinterpret_cast<T*>(0x1000)->*NodePtr) - 0x1000)); - return intrusive_list_base::item(my_pos); - } - - public: - iterator_impl () : my_list_head(NULL), my_pos(NULL) {} - - bool operator == ( const Iterator& it ) const { - return my_pos == it.my_pos; - } - - bool operator != ( const Iterator& it ) const { - return my_pos != it.my_pos; - } - - Iterator& operator++ () { - my_pos = my_pos->my_next_node; - return self(); - } - - Iterator& operator-- () { - my_pos = my_pos->my_prev_node; - return self(); - } - - Iterator operator++ ( int ) { - Iterator result = self(); - ++(*this); - return result; - } - - Iterator operator-- ( int ) { - Iterator result = self(); - --(*this); - return result; - } - }; // intrusive_list_base::iterator_impl - - void assert_ok () const { - __TBB_ASSERT( (my_head.my_prev_node == &my_head && !my_size) || - (my_head.my_next_node != &my_head && my_size >0), "intrusive_list_base corrupted" ); -#if TBB_USE_ASSERT >= 2 - size_t i = 0; - for ( intrusive_list_node *n = my_head.my_next_node; n != &my_head; n = n->my_next_node ) - ++i; - __TBB_ASSERT( my_size == i, "Wrong size" ); -#endif /* TBB_USE_ASSERT >= 2 */ - } - -public: - class iterator : public iterator_impl<iterator> { - template <class U, class V> friend class intrusive_list_base; - - iterator ( intrusive_list_node* head, intrusive_list_node* pos ) - : iterator_impl<iterator>( head, pos ) - {} - public: - iterator () {} - - T* operator-> () const { return &this->item(); } - - T& operator* () const { return this->item(); } - }; // class iterator - - class const_iterator : public iterator_impl<const_iterator> { - template <class U, class V> friend class intrusive_list_base; - - const_iterator ( const intrusive_list_node* head, const intrusive_list_node* pos ) - : iterator_impl<const_iterator>( const_cast<intrusive_list_node*>(head), const_cast<intrusive_list_node*>(pos) ) - {} - public: - const_iterator () {} - - const T* operator-> () const { return &this->item(); } - - const T& operator* () const { return this->item(); } - }; // class iterator - - intrusive_list_base () : my_size(0) { - my_head.my_prev_node = &my_head; - my_head.my_next_node = &my_head; - } - - bool empty () const { return my_head.my_next_node == &my_head; } - - size_t size () const { return my_size; } - - iterator begin () { return iterator(&my_head, my_head.my_next_node); } - - iterator end () { return iterator(&my_head, &my_head); } - - const_iterator begin () const { return const_iterator(&my_head, my_head.my_next_node); } - - const_iterator end () const { return const_iterator(&my_head, &my_head); } - - void push_front ( T& val ) { - __TBB_ASSERT( node(val).my_prev_node == &node(val) && node(val).my_next_node == &node(val), - "Object with intrusive list node can be part of only one intrusive list simultaneously" ); - // An object can be part of only one intrusive list at the given moment via the given node member - node(val).my_prev_node = &my_head; - node(val).my_next_node = my_head.my_next_node; - my_head.my_next_node->my_prev_node = &node(val); - my_head.my_next_node = &node(val); - ++my_size; - assert_ok(); - } - - void remove( T& val ) { - __TBB_ASSERT( node(val).my_prev_node != &node(val) && node(val).my_next_node != &node(val), "Element to remove is not in the list" ); - --my_size; - node(val).my_next_node->my_prev_node = node(val).my_prev_node; - node(val).my_prev_node->my_next_node = node(val).my_next_node; -#if TBB_USE_ASSERT - node(val).my_prev_node = node(val).my_next_node = &node(val); -#endif - assert_ok(); - } - - iterator erase ( iterator it ) { - T& val = *it; - ++it; - remove( val ); - return it; - } - -}; // intrusive_list_base - - -//! Double linked list of items of type T containing a member of type intrusive_list_node. -/** NodePtr is a member pointer to the node data field. Class U is either T or - a base class of T containing the node member. Default values exist for the sake - of a partial specialization working with inheritance case. - - The list does not have ownership of its items. Its purpose is to avoid dynamic - memory allocation when forming lists of existing objects. - - The class is not thread safe. **/ -template <class T, class U, intrusive_list_node U::*NodePtr> -class memptr_intrusive_list : public intrusive_list_base<memptr_intrusive_list<T, U, NodePtr>, T> -{ - friend class intrusive_list_base<memptr_intrusive_list<T, U, NodePtr>, T>; - - static intrusive_list_node& node ( T& val ) { return val.*NodePtr; } - - static T& item ( intrusive_list_node* node ) { - // Cannot use __TBB_offestof (and consequently __TBB_get_object_ref) macro - // with *NodePtr argument because gcc refuses to interpret pasted "->" and "*" - // as member pointer dereferencing operator, and explicit usage of ## in - // __TBB_offestof implementation breaks operations with normal member names. - return *reinterpret_cast<T*>((char*)node - ((ptrdiff_t)&(reinterpret_cast<T*>(0x1000)->*NodePtr) - 0x1000)); - } -}; // intrusive_list<T, U, NodePtr> - -//! Double linked list of items of type T that is derived from intrusive_list_node class. -/** The list does not have ownership of its items. Its purpose is to avoid dynamic - memory allocation when forming lists of existing objects. - - The class is not thread safe. **/ -template <class T> -class intrusive_list : public intrusive_list_base<intrusive_list<T>, T> -{ - friend class intrusive_list_base<intrusive_list<T>, T>; - - static intrusive_list_node& node ( T& val ) { return val; } - - static T& item ( intrusive_list_node* node ) { return *static_cast<T*>(node); } -}; // intrusive_list<T> - -} // namespace internal -} // namespace tbb - -#endif /* __TBB_ARENA_PER_MASTER */ - -#endif /* _TBB_intrusive_list_H */ diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/itt_notify.cpp b/deal.II/bundled/tbb30_104oss/src/tbb/itt_notify.cpp deleted file mode 100644 index 9c528386ed..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/itt_notify.cpp +++ /dev/null @@ -1,80 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#if DO_ITT_NOTIFY - -#if _WIN32||_WIN64 - #ifndef UNICODE - #define UNICODE - #endif -#endif /* WIN */ - -extern "C" void ITT_DoOneTimeInitialization(); - -#define ITT_SIMPLE_INIT 1 -#define __itt_init_ittlib_name(x,y) (ITT_DoOneTimeInitialization(), true) - -#include "tools_api/ittnotify_static.c" - -namespace tbb { -namespace internal { -int __TBB_load_ittnotify() { - return __itt_init_ittlib(NULL, __itt_group_none); -} - -}} // namespaces - -#endif /* DO_ITT_NOTIFY */ - -#define __TBB_NO_IMPLICIT_LINKAGE 1 -#include "itt_notify.h" - -namespace tbb { - -#if DO_ITT_NOTIFY - const tchar - *SyncType_GlobalLock = _T("TbbGlobalLock"), - *SyncType_Scheduler = _T("%Constant") - ; - const tchar - *SyncObj_SchedulerInitialization = _T("TbbSchedulerInitialization"), - *SyncObj_SchedulersList = _T("TbbSchedulersList"), - *SyncObj_WorkerLifeCycleMgmt = _T("TBB Scheduler"), - *SyncObj_TaskStealingLoop = _T("TBB Scheduler"), - *SyncObj_WorkerTaskPool = _T("TBB Scheduler"), - *SyncObj_MasterTaskPool = _T("TBB Scheduler"), - *SyncObj_TaskPoolSpinning = _T("TBB Scheduler"), - *SyncObj_Mailbox = _T("TBB Scheduler"), - *SyncObj_TaskReturnList = _T("TBB Scheduler"), - *SyncObj_TaskStream = _T("TBB Scheduler"), - *SyncObj_ContextsList = _T("TBB Scheduler") - ; -#endif /* DO_ITT_NOTIFY */ - -} // namespace tbb - diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/itt_notify.h b/deal.II/bundled/tbb30_104oss/src/tbb/itt_notify.h deleted file mode 100644 index 38cafd8473..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/itt_notify.h +++ /dev/null @@ -1,126 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef _TBB_ITT_NOTIFY -#define _TBB_ITT_NOTIFY - -#include "tbb/tbb_stddef.h" - -#if DO_ITT_NOTIFY - -#if _WIN32||_WIN64 - #ifndef UNICODE - #define UNICODE - #endif -#endif /* WIN */ - -#include "tools_api/ittnotify.h" -#include "tools_api/legacy/ittnotify.h" -#include "tools_api/internal/ittnotify.h" - -#if _WIN32||_WIN64 - #undef _T - #undef __itt_event_create - #define __itt_event_create __itt_event_createA -#endif /* WIN */ - - -#endif /* DO_ITT_NOTIFY */ - -#if !ITT_CALLER_NULL -#define ITT_CALLER_NULL ((__itt_caller)0) -#endif - -namespace tbb { -//! Unicode support -#if (_WIN32||_WIN64) && !__MINGW32__ - //! Unicode character type. Always wchar_t on Windows. - /** We do not use typedefs from Windows TCHAR family to keep consistence of TBB coding style. **/ - typedef wchar_t tchar; - //! Standard Windows macro to markup the string literals. - #define _T(string_literal) L ## string_literal -#else /* !WIN */ - typedef char tchar; - //! Standard Windows style macro to markup the string literals. - #define _T(string_literal) string_literal -#endif /* !WIN */ -} // namespace tbb - -#if DO_ITT_NOTIFY -namespace tbb { - //! Display names of internal synchronization types - extern const tchar - *SyncType_GlobalLock, - *SyncType_Scheduler; - //! Display names of internal synchronization components/scenarios - extern const tchar - *SyncObj_SchedulerInitialization, - *SyncObj_SchedulersList, - *SyncObj_WorkerLifeCycleMgmt, - *SyncObj_TaskStealingLoop, - *SyncObj_WorkerTaskPool, - *SyncObj_MasterTaskPool, - *SyncObj_TaskPoolSpinning, - *SyncObj_Mailbox, - *SyncObj_TaskReturnList, - *SyncObj_TaskStream, - *SyncObj_ContextsList - ; - - namespace internal { - void __TBB_EXPORTED_FUNC itt_set_sync_name_v3( void* obj, const tchar* name); - - } // namespace internal - -} // namespace tbb - -// const_cast<void*>() is necessary to cast off volatility -#define ITT_NOTIFY(name,obj) __itt_notify_##name(const_cast<void*>(static_cast<volatile void*>(obj))) -#define ITT_THREAD_SET_NAME(name) __itt_thread_set_name(name) -#define ITT_SYNC_CREATE(obj, type, name) __itt_sync_create(obj, type, name, 2) -#define ITT_SYNC_RENAME(obj, name) __itt_sync_rename(obj, name) -#define ITT_STACK_CREATE(obj) obj = __itt_stack_caller_create() -#define ITT_STACK(name, obj) __itt_stack_##name(obj) - -#else /* !DO_ITT_NOTIFY */ - -#define ITT_NOTIFY(name,obj) ((void)0) -#define ITT_THREAD_SET_NAME(name) ((void)0) -#define ITT_SYNC_CREATE(obj, type, name) ((void)0) -#define ITT_SYNC_RENAME(obj, name) ((void)0) -#define ITT_STACK_CREATE(obj) ((void)0) -#define ITT_STACK(name, obj) ((void)0) - -#endif /* !DO_ITT_NOTIFY */ - -namespace tbb { -namespace internal { -int __TBB_load_ittnotify(); -}} - -#endif /* _TBB_ITT_NOTIFY */ diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/lin32-tbb-export.def b/deal.II/bundled/tbb30_104oss/src/tbb/lin32-tbb-export.def deleted file mode 100644 index dadf982546..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/lin32-tbb-export.def +++ /dev/null @@ -1,375 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#include "tbb/tbb_config.h" - -{ -global: - -/* cache_aligned_allocator.cpp */ -_ZN3tbb8internal12NFS_AllocateEjjPv; -_ZN3tbb8internal15NFS_GetLineSizeEv; -_ZN3tbb8internal8NFS_FreeEPv; -_ZN3tbb8internal23allocate_via_handler_v3Ej; -_ZN3tbb8internal25deallocate_via_handler_v3EPv; -_ZN3tbb8internal17is_malloc_used_v3Ev; - -/* task.cpp v3 */ -_ZN3tbb4task13note_affinityEt; -_ZN3tbb4task22internal_set_ref_countEi; -_ZN3tbb4task28internal_decrement_ref_countEv; -_ZN3tbb4task22spawn_and_wait_for_allERNS_9task_listE; -_ZN3tbb4task4selfEv; -_ZN3tbb10interface58internal9task_base7destroyERNS_4taskE; -_ZNK3tbb4task26is_owned_by_current_threadEv; -_ZN3tbb8internal19allocate_root_proxy4freeERNS_4taskE; -_ZN3tbb8internal19allocate_root_proxy8allocateEj; -_ZN3tbb8internal28affinity_partitioner_base_v36resizeEj; -_ZNK3tbb8internal20allocate_child_proxy4freeERNS_4taskE; -_ZNK3tbb8internal20allocate_child_proxy8allocateEj; -_ZNK3tbb8internal27allocate_continuation_proxy4freeERNS_4taskE; -_ZNK3tbb8internal27allocate_continuation_proxy8allocateEj; -_ZNK3tbb8internal34allocate_additional_child_of_proxy4freeERNS_4taskE; -_ZNK3tbb8internal34allocate_additional_child_of_proxy8allocateEj; -_ZTIN3tbb4taskE; -_ZTSN3tbb4taskE; -_ZTVN3tbb4taskE; -_ZN3tbb19task_scheduler_init19default_num_threadsEv; -_ZN3tbb19task_scheduler_init10initializeEij; -_ZN3tbb19task_scheduler_init10initializeEi; -_ZN3tbb19task_scheduler_init9terminateEv; -_ZN3tbb8internal26task_scheduler_observer_v37observeEb; -_ZN3tbb10empty_task7executeEv; -_ZN3tbb10empty_taskD0Ev; -_ZN3tbb10empty_taskD1Ev; -_ZTIN3tbb10empty_taskE; -_ZTSN3tbb10empty_taskE; -_ZTVN3tbb10empty_taskE; - -#if !TBB_NO_LEGACY -/* task_v2.cpp */ -_ZN3tbb4task7destroyERS0_; -#endif /* !TBB_NO_LEGACY */ - -/* Exception handling in task scheduler */ -#if __TBB_TASK_GROUP_CONTEXT -_ZNK3tbb8internal32allocate_root_with_context_proxy8allocateEj; -_ZNK3tbb8internal32allocate_root_with_context_proxy4freeERNS_4taskE; -_ZNK3tbb18task_group_context28is_group_execution_cancelledEv; -_ZN3tbb18task_group_context22cancel_group_executionEv; -_ZN3tbb18task_group_context26register_pending_exceptionEv; -_ZN3tbb18task_group_context5resetEv; -_ZN3tbb18task_group_context4initEv; -_ZN3tbb18task_group_contextD1Ev; -_ZN3tbb18task_group_contextD2Ev; -_ZNK3tbb18captured_exception4nameEv; -_ZNK3tbb18captured_exception4whatEv; -_ZN3tbb18captured_exception10throw_selfEv; -_ZN3tbb18captured_exception3setEPKcS2_; -_ZN3tbb18captured_exception4moveEv; -_ZN3tbb18captured_exception5clearEv; -_ZN3tbb18captured_exception7destroyEv; -_ZN3tbb18captured_exception8allocateEPKcS2_; -_ZN3tbb18captured_exceptionD0Ev; -_ZN3tbb18captured_exceptionD1Ev; -_ZTIN3tbb18captured_exceptionE; -_ZTSN3tbb18captured_exceptionE; -_ZTVN3tbb18captured_exceptionE; -_ZN3tbb13tbb_exceptionD2Ev; -_ZTIN3tbb13tbb_exceptionE; -_ZTSN3tbb13tbb_exceptionE; -_ZTVN3tbb13tbb_exceptionE; -#endif /* __TBB_TASK_GROUP_CONTEXT */ - -/* Symbols for exceptions thrown from TBB */ -_ZN3tbb8internal33throw_bad_last_alloc_exception_v4Ev; -_ZN3tbb8internal18throw_exception_v4ENS0_12exception_idE; -_ZN3tbb14bad_last_allocD0Ev; -_ZN3tbb14bad_last_allocD1Ev; -_ZNK3tbb14bad_last_alloc4whatEv; -_ZTIN3tbb14bad_last_allocE; -_ZTSN3tbb14bad_last_allocE; -_ZTVN3tbb14bad_last_allocE; -_ZN3tbb12missing_waitD0Ev; -_ZN3tbb12missing_waitD1Ev; -_ZNK3tbb12missing_wait4whatEv; -_ZTIN3tbb12missing_waitE; -_ZTSN3tbb12missing_waitE; -_ZTVN3tbb12missing_waitE; -_ZN3tbb27invalid_multiple_schedulingD0Ev; -_ZN3tbb27invalid_multiple_schedulingD1Ev; -_ZNK3tbb27invalid_multiple_scheduling4whatEv; -_ZTIN3tbb27invalid_multiple_schedulingE; -_ZTSN3tbb27invalid_multiple_schedulingE; -_ZTVN3tbb27invalid_multiple_schedulingE; -_ZN3tbb13improper_lockD0Ev; -_ZN3tbb13improper_lockD1Ev; -_ZNK3tbb13improper_lock4whatEv; -_ZTIN3tbb13improper_lockE; -_ZTSN3tbb13improper_lockE; -_ZTVN3tbb13improper_lockE; - -/* tbb_misc.cpp */ -_ZN3tbb17assertion_failureEPKciS1_S1_; -_ZN3tbb21set_assertion_handlerEPFvPKciS1_S1_E; -_ZN3tbb8internal36get_initial_auto_partitioner_divisorEv; -_ZN3tbb8internal13handle_perrorEiPKc; -_ZN3tbb8internal15runtime_warningEPKcz; -__TBB_machine_store8_slow_perf_warning; -__TBB_machine_store8_slow; -TBB_runtime_interface_version; - -/* itt_notify.cpp */ -_ZN3tbb8internal32itt_load_pointer_with_acquire_v3EPKv; -_ZN3tbb8internal33itt_store_pointer_with_release_v3EPvS1_; -_ZN3tbb8internal20itt_set_sync_name_v3EPvPKc; -_ZN3tbb8internal19itt_load_pointer_v3EPKv; - -/* pipeline.cpp */ -_ZTIN3tbb6filterE; -_ZTSN3tbb6filterE; -_ZTVN3tbb6filterE; -_ZN3tbb6filterD2Ev; -_ZN3tbb8pipeline10add_filterERNS_6filterE; -_ZN3tbb8pipeline12inject_tokenERNS_4taskE; -_ZN3tbb8pipeline13remove_filterERNS_6filterE; -_ZN3tbb8pipeline3runEj; -#if __TBB_TASK_GROUP_CONTEXT -_ZN3tbb8pipeline3runEjRNS_18task_group_contextE; -#endif -_ZN3tbb8pipeline5clearEv; -_ZN3tbb19thread_bound_filter12process_itemEv; -_ZN3tbb19thread_bound_filter16try_process_itemEv; -_ZTIN3tbb8pipelineE; -_ZTSN3tbb8pipelineE; -_ZTVN3tbb8pipelineE; -_ZN3tbb8pipelineC1Ev; -_ZN3tbb8pipelineC2Ev; -_ZN3tbb8pipelineD0Ev; -_ZN3tbb8pipelineD1Ev; -_ZN3tbb8pipelineD2Ev; - -/* queuing_rw_mutex.cpp */ -_ZN3tbb16queuing_rw_mutex18internal_constructEv; -_ZN3tbb16queuing_rw_mutex11scoped_lock17upgrade_to_writerEv; -_ZN3tbb16queuing_rw_mutex11scoped_lock19downgrade_to_readerEv; -_ZN3tbb16queuing_rw_mutex11scoped_lock7acquireERS0_b; -_ZN3tbb16queuing_rw_mutex11scoped_lock7releaseEv; -_ZN3tbb16queuing_rw_mutex11scoped_lock11try_acquireERS0_b; - -/* reader_writer_lock.cpp */ -_ZN3tbb10interface518reader_writer_lock11scoped_lock16internal_destroyEv; -_ZN3tbb10interface518reader_writer_lock11scoped_lock18internal_constructERS1_; -_ZN3tbb10interface518reader_writer_lock13try_lock_readEv; -_ZN3tbb10interface518reader_writer_lock16scoped_lock_read16internal_destroyEv; -_ZN3tbb10interface518reader_writer_lock16scoped_lock_read18internal_constructERS1_; -_ZN3tbb10interface518reader_writer_lock16internal_destroyEv; -_ZN3tbb10interface518reader_writer_lock18internal_constructEv; -_ZN3tbb10interface518reader_writer_lock4lockEv; -_ZN3tbb10interface518reader_writer_lock6unlockEv; -_ZN3tbb10interface518reader_writer_lock8try_lockEv; -_ZN3tbb10interface518reader_writer_lock9lock_readEv; - -#if !TBB_NO_LEGACY -/* spin_rw_mutex.cpp v2 */ -_ZN3tbb13spin_rw_mutex16internal_upgradeEPS0_; -_ZN3tbb13spin_rw_mutex22internal_itt_releasingEPS0_; -_ZN3tbb13spin_rw_mutex23internal_acquire_readerEPS0_; -_ZN3tbb13spin_rw_mutex23internal_acquire_writerEPS0_; -_ZN3tbb13spin_rw_mutex18internal_downgradeEPS0_; -_ZN3tbb13spin_rw_mutex23internal_release_readerEPS0_; -_ZN3tbb13spin_rw_mutex23internal_release_writerEPS0_; -_ZN3tbb13spin_rw_mutex27internal_try_acquire_readerEPS0_; -_ZN3tbb13spin_rw_mutex27internal_try_acquire_writerEPS0_; -#endif - -/* spin_rw_mutex v3 */ -_ZN3tbb16spin_rw_mutex_v318internal_constructEv; -_ZN3tbb16spin_rw_mutex_v316internal_upgradeEv; -_ZN3tbb16spin_rw_mutex_v318internal_downgradeEv; -_ZN3tbb16spin_rw_mutex_v323internal_acquire_readerEv; -_ZN3tbb16spin_rw_mutex_v323internal_acquire_writerEv; -_ZN3tbb16spin_rw_mutex_v323internal_release_readerEv; -_ZN3tbb16spin_rw_mutex_v323internal_release_writerEv; -_ZN3tbb16spin_rw_mutex_v327internal_try_acquire_readerEv; -_ZN3tbb16spin_rw_mutex_v327internal_try_acquire_writerEv; - -/* spin_mutex.cpp */ -_ZN3tbb10spin_mutex18internal_constructEv; -_ZN3tbb10spin_mutex11scoped_lock16internal_acquireERS0_; -_ZN3tbb10spin_mutex11scoped_lock16internal_releaseEv; -_ZN3tbb10spin_mutex11scoped_lock20internal_try_acquireERS0_; - -/* mutex.cpp */ -_ZN3tbb5mutex11scoped_lock16internal_acquireERS0_; -_ZN3tbb5mutex11scoped_lock16internal_releaseEv; -_ZN3tbb5mutex11scoped_lock20internal_try_acquireERS0_; -_ZN3tbb5mutex16internal_destroyEv; -_ZN3tbb5mutex18internal_constructEv; - -/* recursive_mutex.cpp */ -_ZN3tbb15recursive_mutex11scoped_lock16internal_acquireERS0_; -_ZN3tbb15recursive_mutex11scoped_lock16internal_releaseEv; -_ZN3tbb15recursive_mutex11scoped_lock20internal_try_acquireERS0_; -_ZN3tbb15recursive_mutex16internal_destroyEv; -_ZN3tbb15recursive_mutex18internal_constructEv; - -/* QueuingMutex.cpp */ -_ZN3tbb13queuing_mutex18internal_constructEv; -_ZN3tbb13queuing_mutex11scoped_lock7acquireERS0_; -_ZN3tbb13queuing_mutex11scoped_lock7releaseEv; -_ZN3tbb13queuing_mutex11scoped_lock11try_acquireERS0_; - -/* critical_section.cpp */ -_ZN3tbb8internal19critical_section_v418internal_constructEv; - -#if !TBB_NO_LEGACY -/* concurrent_hash_map */ -_ZNK3tbb8internal21hash_map_segment_base23internal_grow_predicateEv; - -/* concurrent_queue.cpp v2 */ -_ZN3tbb8internal21concurrent_queue_base12internal_popEPv; -_ZN3tbb8internal21concurrent_queue_base13internal_pushEPKv; -_ZN3tbb8internal21concurrent_queue_base21internal_set_capacityEij; -_ZN3tbb8internal21concurrent_queue_base23internal_pop_if_presentEPv; -_ZN3tbb8internal21concurrent_queue_base25internal_push_if_not_fullEPKv; -_ZN3tbb8internal21concurrent_queue_baseC2Ej; -_ZN3tbb8internal21concurrent_queue_baseD2Ev; -_ZTIN3tbb8internal21concurrent_queue_baseE; -_ZTSN3tbb8internal21concurrent_queue_baseE; -_ZTVN3tbb8internal21concurrent_queue_baseE; -_ZN3tbb8internal30concurrent_queue_iterator_base6assignERKS1_; -_ZN3tbb8internal30concurrent_queue_iterator_base7advanceEv; -_ZN3tbb8internal30concurrent_queue_iterator_baseC2ERKNS0_21concurrent_queue_baseE; -_ZN3tbb8internal30concurrent_queue_iterator_baseD2Ev; -_ZNK3tbb8internal21concurrent_queue_base13internal_sizeEv; -#endif - -/* concurrent_queue v3 */ -/* constructors */ -_ZN3tbb8internal24concurrent_queue_base_v3C2Ej; -_ZN3tbb8internal33concurrent_queue_iterator_base_v3C2ERKNS0_24concurrent_queue_base_v3E; -_ZN3tbb8internal33concurrent_queue_iterator_base_v3C2ERKNS0_24concurrent_queue_base_v3Ej; -/* destructors */ -_ZN3tbb8internal24concurrent_queue_base_v3D2Ev; -_ZN3tbb8internal33concurrent_queue_iterator_base_v3D2Ev; -/* typeinfo */ -_ZTIN3tbb8internal24concurrent_queue_base_v3E; -_ZTSN3tbb8internal24concurrent_queue_base_v3E; -/* vtable */ -_ZTVN3tbb8internal24concurrent_queue_base_v3E; -/* methods */ -_ZN3tbb8internal33concurrent_queue_iterator_base_v37advanceEv; -_ZN3tbb8internal33concurrent_queue_iterator_base_v36assignERKS1_; -_ZN3tbb8internal24concurrent_queue_base_v313internal_pushEPKv; -_ZN3tbb8internal24concurrent_queue_base_v325internal_push_if_not_fullEPKv; -_ZN3tbb8internal24concurrent_queue_base_v312internal_popEPv; -_ZN3tbb8internal24concurrent_queue_base_v323internal_pop_if_presentEPv; -_ZN3tbb8internal24concurrent_queue_base_v321internal_set_capacityEij; -_ZNK3tbb8internal24concurrent_queue_base_v313internal_sizeEv; -_ZNK3tbb8internal24concurrent_queue_base_v314internal_emptyEv; -_ZN3tbb8internal24concurrent_queue_base_v321internal_finish_clearEv; -_ZNK3tbb8internal24concurrent_queue_base_v324internal_throw_exceptionEv; -_ZN3tbb8internal24concurrent_queue_base_v36assignERKS1_; - -#if !TBB_NO_LEGACY -/* concurrent_vector.cpp v2 */ -_ZN3tbb8internal22concurrent_vector_base13internal_copyERKS1_jPFvPvPKvjE; -_ZN3tbb8internal22concurrent_vector_base14internal_clearEPFvPvjEb; -_ZN3tbb8internal22concurrent_vector_base15internal_assignERKS1_jPFvPvjEPFvS4_PKvjESA_; -_ZN3tbb8internal22concurrent_vector_base16internal_grow_byEjjPFvPvjE; -_ZN3tbb8internal22concurrent_vector_base16internal_reserveEjjj; -_ZN3tbb8internal22concurrent_vector_base18internal_push_backEjRj; -_ZN3tbb8internal22concurrent_vector_base25internal_grow_to_at_leastEjjPFvPvjE; -_ZNK3tbb8internal22concurrent_vector_base17internal_capacityEv; -#endif - -/* concurrent_vector v3 */ -_ZN3tbb8internal25concurrent_vector_base_v313internal_copyERKS1_jPFvPvPKvjE; -_ZN3tbb8internal25concurrent_vector_base_v314internal_clearEPFvPvjE; -_ZN3tbb8internal25concurrent_vector_base_v315internal_assignERKS1_jPFvPvjEPFvS4_PKvjESA_; -_ZN3tbb8internal25concurrent_vector_base_v316internal_grow_byEjjPFvPvPKvjES4_; -_ZN3tbb8internal25concurrent_vector_base_v316internal_reserveEjjj; -_ZN3tbb8internal25concurrent_vector_base_v318internal_push_backEjRj; -_ZN3tbb8internal25concurrent_vector_base_v325internal_grow_to_at_leastEjjPFvPvPKvjES4_; -_ZNK3tbb8internal25concurrent_vector_base_v317internal_capacityEv; -_ZN3tbb8internal25concurrent_vector_base_v316internal_compactEjPvPFvS2_jEPFvS2_PKvjE; -_ZN3tbb8internal25concurrent_vector_base_v313internal_swapERS1_; -_ZNK3tbb8internal25concurrent_vector_base_v324internal_throw_exceptionEj; -_ZN3tbb8internal25concurrent_vector_base_v3D2Ev; -_ZN3tbb8internal25concurrent_vector_base_v315internal_resizeEjjjPKvPFvPvjEPFvS4_S3_jE; -_ZN3tbb8internal25concurrent_vector_base_v337internal_grow_to_at_least_with_resultEjjPFvPvPKvjES4_; - -/* tbb_thread */ -#if __MINGW32__ -_ZN3tbb8internal13tbb_thread_v314internal_startEPFjPvES2_; -#else -_ZN3tbb8internal13tbb_thread_v314internal_startEPFPvS2_ES2_; -#endif -_ZN3tbb8internal13tbb_thread_v320hardware_concurrencyEv; -_ZN3tbb8internal13tbb_thread_v34joinEv; -_ZN3tbb8internal13tbb_thread_v36detachEv; -_ZN3tbb8internal15free_closure_v3EPv; -_ZN3tbb8internal15thread_sleep_v3ERKNS_10tick_count10interval_tE; -_ZN3tbb8internal15thread_yield_v3Ev; -_ZN3tbb8internal16thread_get_id_v3Ev; -_ZN3tbb8internal19allocate_closure_v3Ej; -_ZN3tbb8internal7move_v3ERNS0_13tbb_thread_v3ES2_; - -#if __MINGW32__ -/* condition_variable */ -_ZN3tbb10interface58internal32internal_condition_variable_waitERNS1_14condvar_impl_tEPNS_5mutexEPKNS_10tick_count10interval_tE; -_ZN3tbb10interface58internal35internal_destroy_condition_variableERNS1_14condvar_impl_tE; -_ZN3tbb10interface58internal38internal_condition_variable_notify_allERNS1_14condvar_impl_tE; -_ZN3tbb10interface58internal38internal_condition_variable_notify_oneERNS1_14condvar_impl_tE; -_ZN3tbb10interface58internal38internal_initialize_condition_variableERNS1_14condvar_impl_tE; -#endif - -local: - -/* TBB symbols */ -*3tbb*; -*__TBB*; - -/* ITT symbols */ -__itt_*; - -/* Intel Compiler (libirc) symbols */ -__intel_*; -_intel_*; -get_memcpy_largest_cachelinesize; -get_memcpy_largest_cache_size; -get_mem_ops_method; -init_mem_ops_method; -irc__get_msg; -irc__print; -override_mem_ops_method; -set_memcpy_largest_cachelinesize; -set_memcpy_largest_cache_size; - -}; diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/lin64-tbb-export.def b/deal.II/bundled/tbb30_104oss/src/tbb/lin64-tbb-export.def deleted file mode 100644 index 159aec610a..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/lin64-tbb-export.def +++ /dev/null @@ -1,357 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#include "tbb/tbb_config.h" - -{ -global: - -/* cache_aligned_allocator.cpp */ -_ZN3tbb8internal12NFS_AllocateEmmPv; -_ZN3tbb8internal15NFS_GetLineSizeEv; -_ZN3tbb8internal8NFS_FreeEPv; -_ZN3tbb8internal23allocate_via_handler_v3Em; -_ZN3tbb8internal25deallocate_via_handler_v3EPv; -_ZN3tbb8internal17is_malloc_used_v3Ev; - -/* task.cpp v3 */ -_ZN3tbb4task13note_affinityEt; -_ZN3tbb4task22internal_set_ref_countEi; -_ZN3tbb4task28internal_decrement_ref_countEv; -_ZN3tbb4task22spawn_and_wait_for_allERNS_9task_listE; -_ZN3tbb4task4selfEv; -_ZN3tbb10interface58internal9task_base7destroyERNS_4taskE; -_ZNK3tbb4task26is_owned_by_current_threadEv; -_ZN3tbb8internal19allocate_root_proxy4freeERNS_4taskE; -_ZN3tbb8internal19allocate_root_proxy8allocateEm; -_ZN3tbb8internal28affinity_partitioner_base_v36resizeEj; -_ZNK3tbb8internal20allocate_child_proxy4freeERNS_4taskE; -_ZNK3tbb8internal20allocate_child_proxy8allocateEm; -_ZNK3tbb8internal27allocate_continuation_proxy4freeERNS_4taskE; -_ZNK3tbb8internal27allocate_continuation_proxy8allocateEm; -_ZNK3tbb8internal34allocate_additional_child_of_proxy4freeERNS_4taskE; -_ZNK3tbb8internal34allocate_additional_child_of_proxy8allocateEm; -_ZTIN3tbb4taskE; -_ZTSN3tbb4taskE; -_ZTVN3tbb4taskE; -_ZN3tbb19task_scheduler_init19default_num_threadsEv; -_ZN3tbb19task_scheduler_init10initializeEim; -_ZN3tbb19task_scheduler_init10initializeEi; -_ZN3tbb19task_scheduler_init9terminateEv; -_ZN3tbb8internal26task_scheduler_observer_v37observeEb; -_ZN3tbb10empty_task7executeEv; -_ZN3tbb10empty_taskD0Ev; -_ZN3tbb10empty_taskD1Ev; -_ZTIN3tbb10empty_taskE; -_ZTSN3tbb10empty_taskE; -_ZTVN3tbb10empty_taskE; - -#if !TBB_NO_LEGACY -/* task_v2.cpp */ -_ZN3tbb4task7destroyERS0_; -#endif /* !TBB_NO_LEGACY */ - -/* Exception handling in task scheduler */ -#if __TBB_TASK_GROUP_CONTEXT -_ZNK3tbb8internal32allocate_root_with_context_proxy8allocateEm; -_ZNK3tbb8internal32allocate_root_with_context_proxy4freeERNS_4taskE; -_ZNK3tbb18task_group_context28is_group_execution_cancelledEv; -_ZN3tbb18task_group_context22cancel_group_executionEv; -_ZN3tbb18task_group_context26register_pending_exceptionEv; -_ZN3tbb18task_group_context5resetEv; -_ZN3tbb18task_group_context4initEv; -_ZN3tbb18task_group_contextD1Ev; -_ZN3tbb18task_group_contextD2Ev; -_ZNK3tbb18captured_exception4nameEv; -_ZNK3tbb18captured_exception4whatEv; -_ZN3tbb18captured_exception10throw_selfEv; -_ZN3tbb18captured_exception3setEPKcS2_; -_ZN3tbb18captured_exception4moveEv; -_ZN3tbb18captured_exception5clearEv; -_ZN3tbb18captured_exception7destroyEv; -_ZN3tbb18captured_exception8allocateEPKcS2_; -_ZN3tbb18captured_exceptionD0Ev; -_ZN3tbb18captured_exceptionD1Ev; -_ZTIN3tbb18captured_exceptionE; -_ZTSN3tbb18captured_exceptionE; -_ZTVN3tbb18captured_exceptionE; -_ZN3tbb13tbb_exceptionD2Ev; -_ZTIN3tbb13tbb_exceptionE; -_ZTSN3tbb13tbb_exceptionE; -_ZTVN3tbb13tbb_exceptionE; -#endif /* __TBB_TASK_GROUP_CONTEXT */ - -/* Symbols for exceptions thrown from TBB */ -_ZN3tbb8internal33throw_bad_last_alloc_exception_v4Ev; -_ZN3tbb8internal18throw_exception_v4ENS0_12exception_idE; -_ZN3tbb14bad_last_allocD0Ev; -_ZN3tbb14bad_last_allocD1Ev; -_ZNK3tbb14bad_last_alloc4whatEv; -_ZTIN3tbb14bad_last_allocE; -_ZTSN3tbb14bad_last_allocE; -_ZTVN3tbb14bad_last_allocE; -_ZN3tbb12missing_waitD0Ev; -_ZN3tbb12missing_waitD1Ev; -_ZNK3tbb12missing_wait4whatEv; -_ZTIN3tbb12missing_waitE; -_ZTSN3tbb12missing_waitE; -_ZTVN3tbb12missing_waitE; -_ZN3tbb27invalid_multiple_schedulingD0Ev; -_ZN3tbb27invalid_multiple_schedulingD1Ev; -_ZNK3tbb27invalid_multiple_scheduling4whatEv; -_ZTIN3tbb27invalid_multiple_schedulingE; -_ZTSN3tbb27invalid_multiple_schedulingE; -_ZTVN3tbb27invalid_multiple_schedulingE; -_ZN3tbb13improper_lockD0Ev; -_ZN3tbb13improper_lockD1Ev; -_ZNK3tbb13improper_lock4whatEv; -_ZTIN3tbb13improper_lockE; -_ZTSN3tbb13improper_lockE; -_ZTVN3tbb13improper_lockE; - -/* tbb_misc.cpp */ -_ZN3tbb17assertion_failureEPKciS1_S1_; -_ZN3tbb21set_assertion_handlerEPFvPKciS1_S1_E; -_ZN3tbb8internal36get_initial_auto_partitioner_divisorEv; -_ZN3tbb8internal13handle_perrorEiPKc; -_ZN3tbb8internal15runtime_warningEPKcz; -TBB_runtime_interface_version; - -/* itt_notify.cpp */ -_ZN3tbb8internal32itt_load_pointer_with_acquire_v3EPKv; -_ZN3tbb8internal33itt_store_pointer_with_release_v3EPvS1_; -_ZN3tbb8internal20itt_set_sync_name_v3EPvPKc; -_ZN3tbb8internal19itt_load_pointer_v3EPKv; - -/* pipeline.cpp */ -_ZTIN3tbb6filterE; -_ZTSN3tbb6filterE; -_ZTVN3tbb6filterE; -_ZN3tbb6filterD2Ev; -_ZN3tbb8pipeline10add_filterERNS_6filterE; -_ZN3tbb8pipeline12inject_tokenERNS_4taskE; -_ZN3tbb8pipeline13remove_filterERNS_6filterE; -_ZN3tbb8pipeline3runEm; -#if __TBB_TASK_GROUP_CONTEXT -_ZN3tbb8pipeline3runEmRNS_18task_group_contextE; -#endif -_ZN3tbb8pipeline5clearEv; -_ZN3tbb19thread_bound_filter12process_itemEv; -_ZN3tbb19thread_bound_filter16try_process_itemEv; -_ZTIN3tbb8pipelineE; -_ZTSN3tbb8pipelineE; -_ZTVN3tbb8pipelineE; -_ZN3tbb8pipelineC1Ev; -_ZN3tbb8pipelineC2Ev; -_ZN3tbb8pipelineD0Ev; -_ZN3tbb8pipelineD1Ev; -_ZN3tbb8pipelineD2Ev; - -/* queuing_rw_mutex.cpp */ -_ZN3tbb16queuing_rw_mutex18internal_constructEv; -_ZN3tbb16queuing_rw_mutex11scoped_lock17upgrade_to_writerEv; -_ZN3tbb16queuing_rw_mutex11scoped_lock19downgrade_to_readerEv; -_ZN3tbb16queuing_rw_mutex11scoped_lock7acquireERS0_b; -_ZN3tbb16queuing_rw_mutex11scoped_lock7releaseEv; -_ZN3tbb16queuing_rw_mutex11scoped_lock11try_acquireERS0_b; - -/* reader_writer_lock.cpp */ -_ZN3tbb10interface518reader_writer_lock11scoped_lock16internal_destroyEv; -_ZN3tbb10interface518reader_writer_lock11scoped_lock18internal_constructERS1_; -_ZN3tbb10interface518reader_writer_lock13try_lock_readEv; -_ZN3tbb10interface518reader_writer_lock16scoped_lock_read16internal_destroyEv; -_ZN3tbb10interface518reader_writer_lock16scoped_lock_read18internal_constructERS1_; -_ZN3tbb10interface518reader_writer_lock16internal_destroyEv; -_ZN3tbb10interface518reader_writer_lock18internal_constructEv; -_ZN3tbb10interface518reader_writer_lock4lockEv; -_ZN3tbb10interface518reader_writer_lock6unlockEv; -_ZN3tbb10interface518reader_writer_lock8try_lockEv; -_ZN3tbb10interface518reader_writer_lock9lock_readEv; - -#if !TBB_NO_LEGACY -/* spin_rw_mutex.cpp v2 */ -_ZN3tbb13spin_rw_mutex16internal_upgradeEPS0_; -_ZN3tbb13spin_rw_mutex22internal_itt_releasingEPS0_; -_ZN3tbb13spin_rw_mutex23internal_acquire_readerEPS0_; -_ZN3tbb13spin_rw_mutex23internal_acquire_writerEPS0_; -_ZN3tbb13spin_rw_mutex18internal_downgradeEPS0_; -_ZN3tbb13spin_rw_mutex23internal_release_readerEPS0_; -_ZN3tbb13spin_rw_mutex23internal_release_writerEPS0_; -_ZN3tbb13spin_rw_mutex27internal_try_acquire_readerEPS0_; -_ZN3tbb13spin_rw_mutex27internal_try_acquire_writerEPS0_; -#endif - -/* spin_rw_mutex v3 */ -_ZN3tbb16spin_rw_mutex_v318internal_constructEv; -_ZN3tbb16spin_rw_mutex_v316internal_upgradeEv; -_ZN3tbb16spin_rw_mutex_v318internal_downgradeEv; -_ZN3tbb16spin_rw_mutex_v323internal_acquire_readerEv; -_ZN3tbb16spin_rw_mutex_v323internal_acquire_writerEv; -_ZN3tbb16spin_rw_mutex_v323internal_release_readerEv; -_ZN3tbb16spin_rw_mutex_v323internal_release_writerEv; -_ZN3tbb16spin_rw_mutex_v327internal_try_acquire_readerEv; -_ZN3tbb16spin_rw_mutex_v327internal_try_acquire_writerEv; - -/* spin_mutex.cpp */ -_ZN3tbb10spin_mutex11scoped_lock16internal_acquireERS0_; -_ZN3tbb10spin_mutex11scoped_lock16internal_releaseEv; -_ZN3tbb10spin_mutex11scoped_lock20internal_try_acquireERS0_; -_ZN3tbb10spin_mutex18internal_constructEv; - -/* mutex.cpp */ -_ZN3tbb5mutex11scoped_lock16internal_acquireERS0_; -_ZN3tbb5mutex11scoped_lock16internal_releaseEv; -_ZN3tbb5mutex11scoped_lock20internal_try_acquireERS0_; -_ZN3tbb5mutex16internal_destroyEv; -_ZN3tbb5mutex18internal_constructEv; - -/* recursive_mutex.cpp */ -_ZN3tbb15recursive_mutex11scoped_lock16internal_acquireERS0_; -_ZN3tbb15recursive_mutex11scoped_lock16internal_releaseEv; -_ZN3tbb15recursive_mutex11scoped_lock20internal_try_acquireERS0_; -_ZN3tbb15recursive_mutex16internal_destroyEv; -_ZN3tbb15recursive_mutex18internal_constructEv; - -/* QueuingMutex.cpp */ -_ZN3tbb13queuing_mutex18internal_constructEv; -_ZN3tbb13queuing_mutex11scoped_lock7acquireERS0_; -_ZN3tbb13queuing_mutex11scoped_lock7releaseEv; -_ZN3tbb13queuing_mutex11scoped_lock11try_acquireERS0_; - -/* critical_section.cpp */ -_ZN3tbb8internal19critical_section_v418internal_constructEv; - -#if !TBB_NO_LEGACY -/* concurrent_hash_map */ -_ZNK3tbb8internal21hash_map_segment_base23internal_grow_predicateEv; - -/* concurrent_queue.cpp v2 */ -_ZN3tbb8internal21concurrent_queue_base12internal_popEPv; -_ZN3tbb8internal21concurrent_queue_base13internal_pushEPKv; -_ZN3tbb8internal21concurrent_queue_base21internal_set_capacityElm; -_ZN3tbb8internal21concurrent_queue_base23internal_pop_if_presentEPv; -_ZN3tbb8internal21concurrent_queue_base25internal_push_if_not_fullEPKv; -_ZN3tbb8internal21concurrent_queue_baseC2Em; -_ZN3tbb8internal21concurrent_queue_baseD2Ev; -_ZTIN3tbb8internal21concurrent_queue_baseE; -_ZTSN3tbb8internal21concurrent_queue_baseE; -_ZTVN3tbb8internal21concurrent_queue_baseE; -_ZN3tbb8internal30concurrent_queue_iterator_base6assignERKS1_; -_ZN3tbb8internal30concurrent_queue_iterator_base7advanceEv; -_ZN3tbb8internal30concurrent_queue_iterator_baseC2ERKNS0_21concurrent_queue_baseE; -_ZN3tbb8internal30concurrent_queue_iterator_baseD2Ev; -_ZNK3tbb8internal21concurrent_queue_base13internal_sizeEv; -#endif - -/* concurrent_queue v3 */ -/* constructors */ -_ZN3tbb8internal24concurrent_queue_base_v3C2Em; -_ZN3tbb8internal33concurrent_queue_iterator_base_v3C2ERKNS0_24concurrent_queue_base_v3E; -_ZN3tbb8internal33concurrent_queue_iterator_base_v3C2ERKNS0_24concurrent_queue_base_v3Em; -/* destructors */ -_ZN3tbb8internal24concurrent_queue_base_v3D2Ev; -_ZN3tbb8internal33concurrent_queue_iterator_base_v3D2Ev; -/* typeinfo */ -_ZTIN3tbb8internal24concurrent_queue_base_v3E; -_ZTSN3tbb8internal24concurrent_queue_base_v3E; -/* vtable */ -_ZTVN3tbb8internal24concurrent_queue_base_v3E; -/* methods */ -_ZN3tbb8internal33concurrent_queue_iterator_base_v36assignERKS1_; -_ZN3tbb8internal33concurrent_queue_iterator_base_v37advanceEv; -_ZN3tbb8internal24concurrent_queue_base_v313internal_pushEPKv; -_ZN3tbb8internal24concurrent_queue_base_v325internal_push_if_not_fullEPKv; -_ZN3tbb8internal24concurrent_queue_base_v312internal_popEPv; -_ZN3tbb8internal24concurrent_queue_base_v323internal_pop_if_presentEPv; -_ZN3tbb8internal24concurrent_queue_base_v321internal_finish_clearEv; -_ZN3tbb8internal24concurrent_queue_base_v321internal_set_capacityElm; -_ZNK3tbb8internal24concurrent_queue_base_v313internal_sizeEv; -_ZNK3tbb8internal24concurrent_queue_base_v314internal_emptyEv; -_ZNK3tbb8internal24concurrent_queue_base_v324internal_throw_exceptionEv; -_ZN3tbb8internal24concurrent_queue_base_v36assignERKS1_; - -#if !TBB_NO_LEGACY -/* concurrent_vector.cpp v2 */ -_ZN3tbb8internal22concurrent_vector_base13internal_copyERKS1_mPFvPvPKvmE; -_ZN3tbb8internal22concurrent_vector_base14internal_clearEPFvPvmEb; -_ZN3tbb8internal22concurrent_vector_base15internal_assignERKS1_mPFvPvmEPFvS4_PKvmESA_; -_ZN3tbb8internal22concurrent_vector_base16internal_grow_byEmmPFvPvmE; -_ZN3tbb8internal22concurrent_vector_base16internal_reserveEmmm; -_ZN3tbb8internal22concurrent_vector_base18internal_push_backEmRm; -_ZN3tbb8internal22concurrent_vector_base25internal_grow_to_at_leastEmmPFvPvmE; -_ZNK3tbb8internal22concurrent_vector_base17internal_capacityEv; -#endif - -/* concurrent_vector v3 */ -_ZN3tbb8internal25concurrent_vector_base_v313internal_copyERKS1_mPFvPvPKvmE; -_ZN3tbb8internal25concurrent_vector_base_v314internal_clearEPFvPvmE; -_ZN3tbb8internal25concurrent_vector_base_v315internal_assignERKS1_mPFvPvmEPFvS4_PKvmESA_; -_ZN3tbb8internal25concurrent_vector_base_v316internal_grow_byEmmPFvPvPKvmES4_; -_ZN3tbb8internal25concurrent_vector_base_v316internal_reserveEmmm; -_ZN3tbb8internal25concurrent_vector_base_v318internal_push_backEmRm; -_ZN3tbb8internal25concurrent_vector_base_v325internal_grow_to_at_leastEmmPFvPvPKvmES4_; -_ZNK3tbb8internal25concurrent_vector_base_v317internal_capacityEv; -_ZN3tbb8internal25concurrent_vector_base_v316internal_compactEmPvPFvS2_mEPFvS2_PKvmE; -_ZN3tbb8internal25concurrent_vector_base_v313internal_swapERS1_; -_ZNK3tbb8internal25concurrent_vector_base_v324internal_throw_exceptionEm; -_ZN3tbb8internal25concurrent_vector_base_v3D2Ev; -_ZN3tbb8internal25concurrent_vector_base_v315internal_resizeEmmmPKvPFvPvmEPFvS4_S3_mE; -_ZN3tbb8internal25concurrent_vector_base_v337internal_grow_to_at_least_with_resultEmmPFvPvPKvmES4_; - -/* tbb_thread */ -_ZN3tbb8internal13tbb_thread_v320hardware_concurrencyEv; -_ZN3tbb8internal13tbb_thread_v36detachEv; -_ZN3tbb8internal16thread_get_id_v3Ev; -_ZN3tbb8internal15free_closure_v3EPv; -_ZN3tbb8internal13tbb_thread_v34joinEv; -_ZN3tbb8internal13tbb_thread_v314internal_startEPFPvS2_ES2_; -_ZN3tbb8internal19allocate_closure_v3Em; -_ZN3tbb8internal7move_v3ERNS0_13tbb_thread_v3ES2_; -_ZN3tbb8internal15thread_yield_v3Ev; -_ZN3tbb8internal15thread_sleep_v3ERKNS_10tick_count10interval_tE; - -local: - -/* TBB symbols */ -*3tbb*; -*__TBB*; - -/* ITT symbols */ -__itt_*; - -/* Intel Compiler (libirc) symbols */ -__intel_*; -_intel_*; -get_msg_buf; -get_text_buf; -message_catalog; -print_buf; -irc__get_msg; -irc__print; - -}; diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/lin64ipf-tbb-export.def b/deal.II/bundled/tbb30_104oss/src/tbb/lin64ipf-tbb-export.def deleted file mode 100644 index ba5a0be8ca..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/lin64ipf-tbb-export.def +++ /dev/null @@ -1,401 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#include "tbb/tbb_config.h" - -{ -global: - -/* cache_aligned_allocator.cpp */ -_ZN3tbb8internal12NFS_AllocateEmmPv; -_ZN3tbb8internal15NFS_GetLineSizeEv; -_ZN3tbb8internal8NFS_FreeEPv; -_ZN3tbb8internal23allocate_via_handler_v3Em; -_ZN3tbb8internal25deallocate_via_handler_v3EPv; -_ZN3tbb8internal17is_malloc_used_v3Ev; - -/* task.cpp v3 */ -_ZN3tbb4task13note_affinityEt; -_ZN3tbb4task22internal_set_ref_countEi; -_ZN3tbb4task28internal_decrement_ref_countEv; -_ZN3tbb4task22spawn_and_wait_for_allERNS_9task_listE; -_ZN3tbb4task4selfEv; -_ZN3tbb10interface58internal9task_base7destroyERNS_4taskE; -_ZNK3tbb4task26is_owned_by_current_threadEv; -_ZN3tbb8internal19allocate_root_proxy4freeERNS_4taskE; -_ZN3tbb8internal19allocate_root_proxy8allocateEm; -_ZN3tbb8internal28affinity_partitioner_base_v36resizeEj; -_ZNK3tbb8internal20allocate_child_proxy4freeERNS_4taskE; -_ZNK3tbb8internal20allocate_child_proxy8allocateEm; -_ZNK3tbb8internal27allocate_continuation_proxy4freeERNS_4taskE; -_ZNK3tbb8internal27allocate_continuation_proxy8allocateEm; -_ZNK3tbb8internal34allocate_additional_child_of_proxy4freeERNS_4taskE; -_ZNK3tbb8internal34allocate_additional_child_of_proxy8allocateEm; -_ZTIN3tbb4taskE; -_ZTSN3tbb4taskE; -_ZTVN3tbb4taskE; -_ZN3tbb19task_scheduler_init19default_num_threadsEv; -_ZN3tbb19task_scheduler_init10initializeEim; -_ZN3tbb19task_scheduler_init10initializeEi; -_ZN3tbb19task_scheduler_init9terminateEv; -_ZN3tbb8internal26task_scheduler_observer_v37observeEb; -_ZN3tbb10empty_task7executeEv; -_ZN3tbb10empty_taskD0Ev; -_ZN3tbb10empty_taskD1Ev; -_ZTIN3tbb10empty_taskE; -_ZTSN3tbb10empty_taskE; -_ZTVN3tbb10empty_taskE; - -#if !TBB_NO_LEGACY -/* task_v2.cpp */ -_ZN3tbb4task7destroyERS0_; -#endif /* !TBB_NO_LEGACY */ - -/* Exception handling in task scheduler */ -#if __TBB_TASK_GROUP_CONTEXT -_ZNK3tbb8internal32allocate_root_with_context_proxy8allocateEm; -_ZNK3tbb8internal32allocate_root_with_context_proxy4freeERNS_4taskE; -_ZNK3tbb18task_group_context28is_group_execution_cancelledEv; -_ZN3tbb18task_group_context22cancel_group_executionEv; -_ZN3tbb18task_group_context26register_pending_exceptionEv; -_ZN3tbb18task_group_context5resetEv; -_ZN3tbb18task_group_context4initEv; -_ZN3tbb18task_group_contextD1Ev; -_ZN3tbb18task_group_contextD2Ev; -_ZNK3tbb18captured_exception4nameEv; -_ZNK3tbb18captured_exception4whatEv; -_ZN3tbb18captured_exception10throw_selfEv; -_ZN3tbb18captured_exception3setEPKcS2_; -_ZN3tbb18captured_exception4moveEv; -_ZN3tbb18captured_exception5clearEv; -_ZN3tbb18captured_exception7destroyEv; -_ZN3tbb18captured_exception8allocateEPKcS2_; -_ZN3tbb18captured_exceptionD0Ev; -_ZN3tbb18captured_exceptionD1Ev; -_ZTIN3tbb18captured_exceptionE; -_ZTSN3tbb18captured_exceptionE; -_ZTVN3tbb18captured_exceptionE; -_ZN3tbb13tbb_exceptionD2Ev; -_ZTIN3tbb13tbb_exceptionE; -_ZTSN3tbb13tbb_exceptionE; -_ZTVN3tbb13tbb_exceptionE; -#endif /* __TBB_TASK_GROUP_CONTEXT */ - -/* Symbols for exceptions thrown from TBB */ -_ZN3tbb8internal33throw_bad_last_alloc_exception_v4Ev; -_ZN3tbb8internal18throw_exception_v4ENS0_12exception_idE; -_ZN3tbb14bad_last_allocD0Ev; -_ZN3tbb14bad_last_allocD1Ev; -_ZNK3tbb14bad_last_alloc4whatEv; -_ZTIN3tbb14bad_last_allocE; -_ZTSN3tbb14bad_last_allocE; -_ZTVN3tbb14bad_last_allocE; -_ZN3tbb12missing_waitD0Ev; -_ZN3tbb12missing_waitD1Ev; -_ZNK3tbb12missing_wait4whatEv; -_ZTIN3tbb12missing_waitE; -_ZTSN3tbb12missing_waitE; -_ZTVN3tbb12missing_waitE; -_ZN3tbb27invalid_multiple_schedulingD0Ev; -_ZN3tbb27invalid_multiple_schedulingD1Ev; -_ZNK3tbb27invalid_multiple_scheduling4whatEv; -_ZTIN3tbb27invalid_multiple_schedulingE; -_ZTSN3tbb27invalid_multiple_schedulingE; -_ZTVN3tbb27invalid_multiple_schedulingE; -_ZN3tbb13improper_lockD0Ev; -_ZN3tbb13improper_lockD1Ev; -_ZNK3tbb13improper_lock4whatEv; -_ZTIN3tbb13improper_lockE; -_ZTSN3tbb13improper_lockE; -_ZTVN3tbb13improper_lockE; - -/* tbb_misc.cpp */ -_ZN3tbb17assertion_failureEPKciS1_S1_; -_ZN3tbb21set_assertion_handlerEPFvPKciS1_S1_E; -_ZN3tbb8internal36get_initial_auto_partitioner_divisorEv; -_ZN3tbb8internal13handle_perrorEiPKc; -_ZN3tbb8internal15runtime_warningEPKcz; -TBB_runtime_interface_version; - -/* itt_notify.cpp */ -_ZN3tbb8internal32itt_load_pointer_with_acquire_v3EPKv; -_ZN3tbb8internal33itt_store_pointer_with_release_v3EPvS1_; -_ZN3tbb8internal20itt_set_sync_name_v3EPvPKc; -_ZN3tbb8internal19itt_load_pointer_v3EPKv; - -/* pipeline.cpp */ -_ZTIN3tbb6filterE; -_ZTSN3tbb6filterE; -_ZTVN3tbb6filterE; -_ZN3tbb6filterD2Ev; -_ZN3tbb8pipeline10add_filterERNS_6filterE; -_ZN3tbb8pipeline12inject_tokenERNS_4taskE; -_ZN3tbb8pipeline13remove_filterERNS_6filterE; -_ZN3tbb8pipeline3runEm; -#if __TBB_TASK_GROUP_CONTEXT -_ZN3tbb8pipeline3runEmRNS_18task_group_contextE; -#endif -_ZN3tbb8pipeline5clearEv; -_ZN3tbb19thread_bound_filter12process_itemEv; -_ZN3tbb19thread_bound_filter16try_process_itemEv; -_ZTIN3tbb8pipelineE; -_ZTSN3tbb8pipelineE; -_ZTVN3tbb8pipelineE; -_ZN3tbb8pipelineC1Ev; -_ZN3tbb8pipelineC2Ev; -_ZN3tbb8pipelineD0Ev; -_ZN3tbb8pipelineD1Ev; -_ZN3tbb8pipelineD2Ev; - -/* queuing_rw_mutex.cpp */ -_ZN3tbb16queuing_rw_mutex18internal_constructEv; -_ZN3tbb16queuing_rw_mutex11scoped_lock17upgrade_to_writerEv; -_ZN3tbb16queuing_rw_mutex11scoped_lock19downgrade_to_readerEv; -_ZN3tbb16queuing_rw_mutex11scoped_lock7acquireERS0_b; -_ZN3tbb16queuing_rw_mutex11scoped_lock7releaseEv; -_ZN3tbb16queuing_rw_mutex11scoped_lock11try_acquireERS0_b; - -/* reader_writer_lock.cpp */ -_ZN3tbb10interface518reader_writer_lock11scoped_lock16internal_destroyEv; -_ZN3tbb10interface518reader_writer_lock11scoped_lock18internal_constructERS1_; -_ZN3tbb10interface518reader_writer_lock13try_lock_readEv; -_ZN3tbb10interface518reader_writer_lock16scoped_lock_read16internal_destroyEv; -_ZN3tbb10interface518reader_writer_lock16scoped_lock_read18internal_constructERS1_; -_ZN3tbb10interface518reader_writer_lock16internal_destroyEv; -_ZN3tbb10interface518reader_writer_lock18internal_constructEv; -_ZN3tbb10interface518reader_writer_lock4lockEv; -_ZN3tbb10interface518reader_writer_lock6unlockEv; -_ZN3tbb10interface518reader_writer_lock8try_lockEv; -_ZN3tbb10interface518reader_writer_lock9lock_readEv; - -#if !TBB_NO_LEGACY -/* spin_rw_mutex.cpp v2 */ -_ZN3tbb13spin_rw_mutex16internal_upgradeEPS0_; -_ZN3tbb13spin_rw_mutex22internal_itt_releasingEPS0_; -_ZN3tbb13spin_rw_mutex23internal_acquire_readerEPS0_; -_ZN3tbb13spin_rw_mutex23internal_acquire_writerEPS0_; -_ZN3tbb13spin_rw_mutex18internal_downgradeEPS0_; -_ZN3tbb13spin_rw_mutex23internal_release_readerEPS0_; -_ZN3tbb13spin_rw_mutex23internal_release_writerEPS0_; -_ZN3tbb13spin_rw_mutex27internal_try_acquire_readerEPS0_; -_ZN3tbb13spin_rw_mutex27internal_try_acquire_writerEPS0_; -#endif - -/* spin_rw_mutex v3 */ -_ZN3tbb16spin_rw_mutex_v318internal_constructEv; -_ZN3tbb16spin_rw_mutex_v316internal_upgradeEv; -_ZN3tbb16spin_rw_mutex_v318internal_downgradeEv; -_ZN3tbb16spin_rw_mutex_v323internal_acquire_readerEv; -_ZN3tbb16spin_rw_mutex_v323internal_acquire_writerEv; -_ZN3tbb16spin_rw_mutex_v323internal_release_readerEv; -_ZN3tbb16spin_rw_mutex_v323internal_release_writerEv; -_ZN3tbb16spin_rw_mutex_v327internal_try_acquire_readerEv; -_ZN3tbb16spin_rw_mutex_v327internal_try_acquire_writerEv; - -/* spin_mutex.cpp */ -_ZN3tbb10spin_mutex18internal_constructEv; -_ZN3tbb10spin_mutex11scoped_lock16internal_acquireERS0_; -_ZN3tbb10spin_mutex11scoped_lock16internal_releaseEv; -_ZN3tbb10spin_mutex11scoped_lock20internal_try_acquireERS0_; - -/* mutex.cpp */ -_ZN3tbb5mutex11scoped_lock16internal_acquireERS0_; -_ZN3tbb5mutex11scoped_lock16internal_releaseEv; -_ZN3tbb5mutex11scoped_lock20internal_try_acquireERS0_; -_ZN3tbb5mutex16internal_destroyEv; -_ZN3tbb5mutex18internal_constructEv; - -/* recursive_mutex.cpp */ -_ZN3tbb15recursive_mutex11scoped_lock16internal_acquireERS0_; -_ZN3tbb15recursive_mutex11scoped_lock16internal_releaseEv; -_ZN3tbb15recursive_mutex11scoped_lock20internal_try_acquireERS0_; -_ZN3tbb15recursive_mutex16internal_destroyEv; -_ZN3tbb15recursive_mutex18internal_constructEv; - -/* QueuingMutex.cpp */ -_ZN3tbb13queuing_mutex18internal_constructEv; -_ZN3tbb13queuing_mutex11scoped_lock7acquireERS0_; -_ZN3tbb13queuing_mutex11scoped_lock7releaseEv; -_ZN3tbb13queuing_mutex11scoped_lock11try_acquireERS0_; - -/* critical_section.cpp */ -_ZN3tbb8internal19critical_section_v418internal_constructEv; - -#if !TBB_NO_LEGACY -/* concurrent_hash_map */ -_ZNK3tbb8internal21hash_map_segment_base23internal_grow_predicateEv; - -/* concurrent_queue.cpp v2 */ -_ZN3tbb8internal21concurrent_queue_base12internal_popEPv; -_ZN3tbb8internal21concurrent_queue_base13internal_pushEPKv; -_ZN3tbb8internal21concurrent_queue_base21internal_set_capacityElm; -_ZN3tbb8internal21concurrent_queue_base23internal_pop_if_presentEPv; -_ZN3tbb8internal21concurrent_queue_base25internal_push_if_not_fullEPKv; -_ZN3tbb8internal21concurrent_queue_baseC2Em; -_ZN3tbb8internal21concurrent_queue_baseD2Ev; -_ZTIN3tbb8internal21concurrent_queue_baseE; -_ZTSN3tbb8internal21concurrent_queue_baseE; -_ZTVN3tbb8internal21concurrent_queue_baseE; -_ZN3tbb8internal30concurrent_queue_iterator_base6assignERKS1_; -_ZN3tbb8internal30concurrent_queue_iterator_base7advanceEv; -_ZN3tbb8internal30concurrent_queue_iterator_baseC2ERKNS0_21concurrent_queue_baseE; -_ZN3tbb8internal30concurrent_queue_iterator_baseD2Ev; -_ZNK3tbb8internal21concurrent_queue_base13internal_sizeEv; -#endif - -/* concurrent_queue v3 */ -/* constructors */ -_ZN3tbb8internal24concurrent_queue_base_v3C2Em; -_ZN3tbb8internal33concurrent_queue_iterator_base_v3C2ERKNS0_24concurrent_queue_base_v3E; -_ZN3tbb8internal33concurrent_queue_iterator_base_v3C2ERKNS0_24concurrent_queue_base_v3Em; -/* destructors */ -_ZN3tbb8internal24concurrent_queue_base_v3D2Ev; -_ZN3tbb8internal33concurrent_queue_iterator_base_v3D2Ev; -/* typeinfo */ -_ZTIN3tbb8internal24concurrent_queue_base_v3E; -_ZTSN3tbb8internal24concurrent_queue_base_v3E; -/* vtable */ -_ZTVN3tbb8internal24concurrent_queue_base_v3E; -/* methods */ -_ZN3tbb8internal33concurrent_queue_iterator_base_v36assignERKS1_; -_ZN3tbb8internal33concurrent_queue_iterator_base_v37advanceEv; -_ZN3tbb8internal24concurrent_queue_base_v313internal_pushEPKv; -_ZN3tbb8internal24concurrent_queue_base_v325internal_push_if_not_fullEPKv; -_ZN3tbb8internal24concurrent_queue_base_v312internal_popEPv; -_ZN3tbb8internal24concurrent_queue_base_v323internal_pop_if_presentEPv; -_ZN3tbb8internal24concurrent_queue_base_v321internal_finish_clearEv; -_ZN3tbb8internal24concurrent_queue_base_v321internal_set_capacityElm; -_ZNK3tbb8internal24concurrent_queue_base_v313internal_sizeEv; -_ZNK3tbb8internal24concurrent_queue_base_v314internal_emptyEv; -_ZNK3tbb8internal24concurrent_queue_base_v324internal_throw_exceptionEv; -_ZN3tbb8internal24concurrent_queue_base_v36assignERKS1_; - -#if !TBB_NO_LEGACY -/* concurrent_vector.cpp v2 */ -_ZN3tbb8internal22concurrent_vector_base13internal_copyERKS1_mPFvPvPKvmE; -_ZN3tbb8internal22concurrent_vector_base14internal_clearEPFvPvmEb; -_ZN3tbb8internal22concurrent_vector_base15internal_assignERKS1_mPFvPvmEPFvS4_PKvmESA_; -_ZN3tbb8internal22concurrent_vector_base16internal_grow_byEmmPFvPvmE; -_ZN3tbb8internal22concurrent_vector_base16internal_reserveEmmm; -_ZN3tbb8internal22concurrent_vector_base18internal_push_backEmRm; -_ZN3tbb8internal22concurrent_vector_base25internal_grow_to_at_leastEmmPFvPvmE; -_ZNK3tbb8internal22concurrent_vector_base17internal_capacityEv; -#endif - -/* concurrent_vector v3 */ -_ZN3tbb8internal25concurrent_vector_base_v313internal_copyERKS1_mPFvPvPKvmE; -_ZN3tbb8internal25concurrent_vector_base_v314internal_clearEPFvPvmE; -_ZN3tbb8internal25concurrent_vector_base_v315internal_assignERKS1_mPFvPvmEPFvS4_PKvmESA_; -_ZN3tbb8internal25concurrent_vector_base_v316internal_grow_byEmmPFvPvPKvmES4_; -_ZN3tbb8internal25concurrent_vector_base_v316internal_reserveEmmm; -_ZN3tbb8internal25concurrent_vector_base_v318internal_push_backEmRm; -_ZN3tbb8internal25concurrent_vector_base_v325internal_grow_to_at_leastEmmPFvPvPKvmES4_; -_ZNK3tbb8internal25concurrent_vector_base_v317internal_capacityEv; -_ZN3tbb8internal25concurrent_vector_base_v316internal_compactEmPvPFvS2_mEPFvS2_PKvmE; -_ZN3tbb8internal25concurrent_vector_base_v313internal_swapERS1_; -_ZNK3tbb8internal25concurrent_vector_base_v324internal_throw_exceptionEm; -_ZN3tbb8internal25concurrent_vector_base_v3D2Ev; -_ZN3tbb8internal25concurrent_vector_base_v315internal_resizeEmmmPKvPFvPvmEPFvS4_S3_mE; -_ZN3tbb8internal25concurrent_vector_base_v337internal_grow_to_at_least_with_resultEmmPFvPvPKvmES4_; - -/* tbb_thread */ -_ZN3tbb8internal13tbb_thread_v320hardware_concurrencyEv; -_ZN3tbb8internal13tbb_thread_v36detachEv; -_ZN3tbb8internal16thread_get_id_v3Ev; -_ZN3tbb8internal15free_closure_v3EPv; -_ZN3tbb8internal13tbb_thread_v34joinEv; -_ZN3tbb8internal13tbb_thread_v314internal_startEPFPvS2_ES2_; -_ZN3tbb8internal19allocate_closure_v3Em; -_ZN3tbb8internal7move_v3ERNS0_13tbb_thread_v3ES2_; -_ZN3tbb8internal15thread_yield_v3Ev; -_ZN3tbb8internal15thread_sleep_v3ERKNS_10tick_count10interval_tE; - -/* asm functions */ -__TBB_machine_fetchadd1__TBB_full_fence; -__TBB_machine_fetchadd2__TBB_full_fence; -__TBB_machine_fetchadd4__TBB_full_fence; -__TBB_machine_fetchadd8__TBB_full_fence; -__TBB_machine_fetchstore1__TBB_full_fence; -__TBB_machine_fetchstore2__TBB_full_fence; -__TBB_machine_fetchstore4__TBB_full_fence; -__TBB_machine_fetchstore8__TBB_full_fence; -__TBB_machine_fetchadd1acquire; -__TBB_machine_fetchadd1release; -__TBB_machine_fetchadd2acquire; -__TBB_machine_fetchadd2release; -__TBB_machine_fetchadd4acquire; -__TBB_machine_fetchadd4release; -__TBB_machine_fetchadd8acquire; -__TBB_machine_fetchadd8release; -__TBB_machine_fetchstore1acquire; -__TBB_machine_fetchstore1release; -__TBB_machine_fetchstore2acquire; -__TBB_machine_fetchstore2release; -__TBB_machine_fetchstore4acquire; -__TBB_machine_fetchstore4release; -__TBB_machine_fetchstore8acquire; -__TBB_machine_fetchstore8release; -__TBB_machine_cmpswp1acquire; -__TBB_machine_cmpswp1release; -__TBB_machine_cmpswp1__TBB_full_fence; -__TBB_machine_cmpswp2acquire; -__TBB_machine_cmpswp2release; -__TBB_machine_cmpswp2__TBB_full_fence; -__TBB_machine_cmpswp4acquire; -__TBB_machine_cmpswp4release; -__TBB_machine_cmpswp4__TBB_full_fence; -__TBB_machine_cmpswp8acquire; -__TBB_machine_cmpswp8release; -__TBB_machine_cmpswp8__TBB_full_fence; -__TBB_machine_lg; -__TBB_machine_lockbyte; -__TBB_machine_pause; -__TBB_machine_trylockbyte; - -local: - -/* TBB symbols */ -*3tbb*; -*__TBB*; - -/* ITT symbols */ -__itt_*; - -/* Intel Compiler (libirc) symbols */ -__intel_*; -_intel_*; -?0_memcopyA; -?0_memcopyDu; -?0_memcpyD; -?1__memcpy; -?1__memmove; -?1__serial_memmove; -memcpy; -memset; - -}; diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/mac32-tbb-export.def b/deal.II/bundled/tbb30_104oss/src/tbb/mac32-tbb-export.def deleted file mode 100644 index 8725d9dd9f..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/mac32-tbb-export.def +++ /dev/null @@ -1,343 +0,0 @@ -# Copyright 2005-2010 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. -# -# Threading Building Blocks is free software; you can redistribute it -# and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. -# -# Threading Building Blocks is distributed in the hope that it will be -# useful, but WITHOUT ANY WARRANTY; without even the implied warranty -# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Threading Building Blocks; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software -# library without restriction. Specifically, if other files instantiate -# templates or use macros or inline functions from this file, or you compile -# this file and link it with other files to produce an executable, this -# file does not by itself cause the resulting executable to be covered by -# the GNU General Public License. This exception does not however -# invalidate any other reasons why the executable file might be covered by -# the GNU General Public License. - -# cache_aligned_allocator.cpp -__ZN3tbb8internal12NFS_AllocateEmmPv -__ZN3tbb8internal15NFS_GetLineSizeEv -__ZN3tbb8internal8NFS_FreeEPv -__ZN3tbb8internal23allocate_via_handler_v3Em -__ZN3tbb8internal25deallocate_via_handler_v3EPv -__ZN3tbb8internal17is_malloc_used_v3Ev - -# task.cpp v3 -__ZN3tbb4task13note_affinityEt -__ZN3tbb4task22internal_set_ref_countEi -__ZN3tbb4task28internal_decrement_ref_countEv -__ZN3tbb4task22spawn_and_wait_for_allERNS_9task_listE -__ZN3tbb4task4selfEv -__ZN3tbb10interface58internal9task_base7destroyERNS_4taskE -__ZNK3tbb4task26is_owned_by_current_threadEv -__ZN3tbb8internal19allocate_root_proxy4freeERNS_4taskE -__ZN3tbb8internal19allocate_root_proxy8allocateEm -__ZN3tbb8internal28affinity_partitioner_base_v36resizeEj -__ZN3tbb8internal36get_initial_auto_partitioner_divisorEv -__ZNK3tbb8internal20allocate_child_proxy4freeERNS_4taskE -__ZNK3tbb8internal20allocate_child_proxy8allocateEm -__ZNK3tbb8internal27allocate_continuation_proxy4freeERNS_4taskE -__ZNK3tbb8internal27allocate_continuation_proxy8allocateEm -__ZNK3tbb8internal34allocate_additional_child_of_proxy4freeERNS_4taskE -__ZNK3tbb8internal34allocate_additional_child_of_proxy8allocateEm -__ZTIN3tbb4taskE -__ZTSN3tbb4taskE -__ZTVN3tbb4taskE -__ZN3tbb19task_scheduler_init19default_num_threadsEv -__ZN3tbb19task_scheduler_init10initializeEim -__ZN3tbb19task_scheduler_init10initializeEi -__ZN3tbb19task_scheduler_init9terminateEv -__ZN3tbb8internal26task_scheduler_observer_v37observeEb -__ZN3tbb10empty_task7executeEv -__ZN3tbb10empty_taskD0Ev -__ZN3tbb10empty_taskD1Ev -__ZTIN3tbb10empty_taskE -__ZTSN3tbb10empty_taskE -__ZTVN3tbb10empty_taskE - -#if !TBB_NO_LEGACY -# task_v2.cpp -__ZN3tbb4task7destroyERS0_ -#endif - -# Exception handling in task scheduler -__ZNK3tbb8internal32allocate_root_with_context_proxy8allocateEm -__ZNK3tbb8internal32allocate_root_with_context_proxy4freeERNS_4taskE -__ZNK3tbb18task_group_context28is_group_execution_cancelledEv -__ZN3tbb18task_group_context22cancel_group_executionEv -__ZN3tbb18task_group_context26register_pending_exceptionEv -__ZN3tbb18task_group_context5resetEv -__ZN3tbb18task_group_context4initEv -__ZN3tbb18task_group_contextD1Ev -__ZN3tbb18task_group_contextD2Ev -__ZNK3tbb18captured_exception4nameEv -__ZNK3tbb18captured_exception4whatEv -__ZN3tbb18captured_exception10throw_selfEv -__ZN3tbb18captured_exception3setEPKcS2_ -__ZN3tbb18captured_exception4moveEv -__ZN3tbb18captured_exception5clearEv -__ZN3tbb18captured_exception7destroyEv -__ZN3tbb18captured_exception8allocateEPKcS2_ -__ZN3tbb18captured_exceptionD0Ev -__ZN3tbb18captured_exceptionD1Ev -__ZTIN3tbb18captured_exceptionE -__ZTSN3tbb18captured_exceptionE -__ZTVN3tbb18captured_exceptionE -__ZTIN3tbb13tbb_exceptionE -__ZTSN3tbb13tbb_exceptionE -__ZTVN3tbb13tbb_exceptionE - -# Symbols for exceptions thrown from TBB -__ZN3tbb8internal33throw_bad_last_alloc_exception_v4Ev -__ZN3tbb8internal18throw_exception_v4ENS0_12exception_idE -__ZNSt13runtime_errorD1Ev -__ZTISt13runtime_error -__ZTSSt13runtime_error -__ZNSt16invalid_argumentD1Ev -__ZTISt16invalid_argument -__ZTSSt16invalid_argument -__ZNSt11range_errorD1Ev -__ZTISt11range_error -__ZTSSt11range_error -__ZNSt12length_errorD1Ev -__ZTISt12length_error -__ZTSSt12length_error -__ZNSt12out_of_rangeD1Ev -__ZTISt12out_of_range -__ZTSSt12out_of_range -__ZN3tbb14bad_last_allocD0Ev -__ZN3tbb14bad_last_allocD1Ev -__ZNK3tbb14bad_last_alloc4whatEv -__ZTIN3tbb14bad_last_allocE -__ZTSN3tbb14bad_last_allocE -__ZTVN3tbb14bad_last_allocE -__ZN3tbb12missing_waitD0Ev -__ZN3tbb12missing_waitD1Ev -__ZNK3tbb12missing_wait4whatEv -__ZTIN3tbb12missing_waitE -__ZTSN3tbb12missing_waitE -__ZTVN3tbb12missing_waitE -__ZN3tbb27invalid_multiple_schedulingD0Ev -__ZN3tbb27invalid_multiple_schedulingD1Ev -__ZNK3tbb27invalid_multiple_scheduling4whatEv -__ZTIN3tbb27invalid_multiple_schedulingE -__ZTSN3tbb27invalid_multiple_schedulingE -__ZTVN3tbb27invalid_multiple_schedulingE -__ZN3tbb13improper_lockD0Ev -__ZN3tbb13improper_lockD1Ev -__ZNK3tbb13improper_lock4whatEv -__ZTIN3tbb13improper_lockE -__ZTSN3tbb13improper_lockE -__ZTVN3tbb13improper_lockE - -# tbb_misc.cpp -__ZN3tbb17assertion_failureEPKciS1_S1_ -__ZN3tbb21set_assertion_handlerEPFvPKciS1_S1_E -__ZN3tbb8internal13handle_perrorEiPKc -__ZN3tbb8internal15runtime_warningEPKcz -#ifndef __POWERPC__ -___TBB_machine_store8_slow_perf_warning -___TBB_machine_store8_slow -#endif -_TBB_runtime_interface_version - -# itt_notify.cpp -__ZN3tbb8internal32itt_load_pointer_with_acquire_v3EPKv -__ZN3tbb8internal33itt_store_pointer_with_release_v3EPvS1_ -__ZN3tbb8internal19itt_load_pointer_v3EPKv -__ZN3tbb8internal20itt_set_sync_name_v3EPvPKc - -# pipeline.cpp -__ZTIN3tbb6filterE -__ZTSN3tbb6filterE -__ZTVN3tbb6filterE -__ZN3tbb6filterD2Ev -__ZN3tbb8pipeline10add_filterERNS_6filterE -__ZN3tbb8pipeline12inject_tokenERNS_4taskE -__ZN3tbb8pipeline13remove_filterERNS_6filterE -__ZN3tbb8pipeline3runEm -__ZN3tbb8pipeline3runEmRNS_18task_group_contextE -__ZN3tbb8pipeline5clearEv -__ZN3tbb19thread_bound_filter12process_itemEv -__ZN3tbb19thread_bound_filter16try_process_itemEv -__ZN3tbb8pipelineC1Ev -__ZN3tbb8pipelineC2Ev -__ZN3tbb8pipelineD0Ev -__ZN3tbb8pipelineD1Ev -__ZN3tbb8pipelineD2Ev -__ZTIN3tbb8pipelineE -__ZTSN3tbb8pipelineE -__ZTVN3tbb8pipelineE - -# queuing_rw_mutex.cpp -__ZN3tbb16queuing_rw_mutex11scoped_lock17upgrade_to_writerEv -__ZN3tbb16queuing_rw_mutex11scoped_lock19downgrade_to_readerEv -__ZN3tbb16queuing_rw_mutex11scoped_lock7acquireERS0_b -__ZN3tbb16queuing_rw_mutex11scoped_lock7releaseEv -__ZN3tbb16queuing_rw_mutex11scoped_lock11try_acquireERS0_b -__ZN3tbb16queuing_rw_mutex18internal_constructEv - -# reader_writer_lock.cpp -__ZN3tbb10interface518reader_writer_lock11scoped_lock16internal_destroyEv -__ZN3tbb10interface518reader_writer_lock11scoped_lock18internal_constructERS1_ -__ZN3tbb10interface518reader_writer_lock13try_lock_readEv -__ZN3tbb10interface518reader_writer_lock16scoped_lock_read16internal_destroyEv -__ZN3tbb10interface518reader_writer_lock16scoped_lock_read18internal_constructERS1_ -__ZN3tbb10interface518reader_writer_lock16internal_destroyEv -__ZN3tbb10interface518reader_writer_lock18internal_constructEv -__ZN3tbb10interface518reader_writer_lock4lockEv -__ZN3tbb10interface518reader_writer_lock6unlockEv -__ZN3tbb10interface518reader_writer_lock8try_lockEv -__ZN3tbb10interface518reader_writer_lock9lock_readEv - -#if !TBB_NO_LEGACY -# spin_rw_mutex.cpp v2 -__ZN3tbb13spin_rw_mutex16internal_upgradeEPS0_ -__ZN3tbb13spin_rw_mutex22internal_itt_releasingEPS0_ -__ZN3tbb13spin_rw_mutex23internal_acquire_readerEPS0_ -__ZN3tbb13spin_rw_mutex23internal_acquire_writerEPS0_ -__ZN3tbb13spin_rw_mutex18internal_downgradeEPS0_ -__ZN3tbb13spin_rw_mutex23internal_release_readerEPS0_ -__ZN3tbb13spin_rw_mutex23internal_release_writerEPS0_ -__ZN3tbb13spin_rw_mutex27internal_try_acquire_readerEPS0_ -__ZN3tbb13spin_rw_mutex27internal_try_acquire_writerEPS0_ -#endif - -# spin_rw_mutex v3 -__ZN3tbb16spin_rw_mutex_v316internal_upgradeEv -__ZN3tbb16spin_rw_mutex_v318internal_downgradeEv -__ZN3tbb16spin_rw_mutex_v323internal_acquire_readerEv -__ZN3tbb16spin_rw_mutex_v323internal_acquire_writerEv -__ZN3tbb16spin_rw_mutex_v323internal_release_readerEv -__ZN3tbb16spin_rw_mutex_v323internal_release_writerEv -__ZN3tbb16spin_rw_mutex_v327internal_try_acquire_readerEv -__ZN3tbb16spin_rw_mutex_v327internal_try_acquire_writerEv -__ZN3tbb16spin_rw_mutex_v318internal_constructEv - -# spin_mutex.cpp -__ZN3tbb10spin_mutex11scoped_lock16internal_acquireERS0_ -__ZN3tbb10spin_mutex11scoped_lock16internal_releaseEv -__ZN3tbb10spin_mutex11scoped_lock20internal_try_acquireERS0_ -__ZN3tbb10spin_mutex18internal_constructEv - -# mutex.cpp -__ZN3tbb5mutex11scoped_lock16internal_acquireERS0_ -__ZN3tbb5mutex11scoped_lock16internal_releaseEv -__ZN3tbb5mutex11scoped_lock20internal_try_acquireERS0_ -__ZN3tbb5mutex16internal_destroyEv -__ZN3tbb5mutex18internal_constructEv - -# recursive_mutex.cpp -__ZN3tbb15recursive_mutex11scoped_lock16internal_acquireERS0_ -__ZN3tbb15recursive_mutex11scoped_lock16internal_releaseEv -__ZN3tbb15recursive_mutex11scoped_lock20internal_try_acquireERS0_ -__ZN3tbb15recursive_mutex16internal_destroyEv -__ZN3tbb15recursive_mutex18internal_constructEv - -# queuing_mutex.cpp -__ZN3tbb13queuing_mutex11scoped_lock7acquireERS0_ -__ZN3tbb13queuing_mutex11scoped_lock7releaseEv -__ZN3tbb13queuing_mutex11scoped_lock11try_acquireERS0_ -__ZN3tbb13queuing_mutex18internal_constructEv - -# critical_section.cpp -__ZN3tbb8internal19critical_section_v418internal_constructEv - -#if !TBB_NO_LEGACY -# concurrent_hash_map -__ZNK3tbb8internal21hash_map_segment_base23internal_grow_predicateEv - -# concurrent_queue.cpp v2 -__ZN3tbb8internal21concurrent_queue_base12internal_popEPv -__ZN3tbb8internal21concurrent_queue_base13internal_pushEPKv -__ZN3tbb8internal21concurrent_queue_base21internal_set_capacityEim -__ZN3tbb8internal21concurrent_queue_base23internal_pop_if_presentEPv -__ZN3tbb8internal21concurrent_queue_base25internal_push_if_not_fullEPKv -__ZN3tbb8internal21concurrent_queue_baseC2Em -__ZN3tbb8internal21concurrent_queue_baseD2Ev -__ZTIN3tbb8internal21concurrent_queue_baseE -__ZTSN3tbb8internal21concurrent_queue_baseE -__ZTVN3tbb8internal21concurrent_queue_baseE -__ZN3tbb8internal30concurrent_queue_iterator_base6assignERKS1_ -__ZN3tbb8internal30concurrent_queue_iterator_base7advanceEv -__ZN3tbb8internal30concurrent_queue_iterator_baseC2ERKNS0_21concurrent_queue_baseE -__ZN3tbb8internal30concurrent_queue_iterator_baseD2Ev -__ZNK3tbb8internal21concurrent_queue_base13internal_sizeEv -#endif - -# concurrent_queue v3 -# constructors -__ZN3tbb8internal33concurrent_queue_iterator_base_v3C2ERKNS0_24concurrent_queue_base_v3E -__ZN3tbb8internal33concurrent_queue_iterator_base_v3C2ERKNS0_24concurrent_queue_base_v3Em -__ZN3tbb8internal24concurrent_queue_base_v3C2Em -# destructors -__ZN3tbb8internal33concurrent_queue_iterator_base_v3D2Ev -__ZN3tbb8internal24concurrent_queue_base_v3D2Ev -# typeinfo -__ZTIN3tbb8internal24concurrent_queue_base_v3E -__ZTSN3tbb8internal24concurrent_queue_base_v3E -#vtable -__ZTVN3tbb8internal24concurrent_queue_base_v3E -# methods -__ZN3tbb8internal33concurrent_queue_iterator_base_v37advanceEv -__ZN3tbb8internal33concurrent_queue_iterator_base_v36assignERKS1_ -__ZN3tbb8internal24concurrent_queue_base_v313internal_pushEPKv -__ZN3tbb8internal24concurrent_queue_base_v325internal_push_if_not_fullEPKv -__ZN3tbb8internal24concurrent_queue_base_v312internal_popEPv -__ZN3tbb8internal24concurrent_queue_base_v323internal_pop_if_presentEPv -__ZN3tbb8internal24concurrent_queue_base_v321internal_set_capacityEim -__ZNK3tbb8internal24concurrent_queue_base_v313internal_sizeEv -__ZNK3tbb8internal24concurrent_queue_base_v314internal_emptyEv -__ZN3tbb8internal24concurrent_queue_base_v321internal_finish_clearEv -__ZNK3tbb8internal24concurrent_queue_base_v324internal_throw_exceptionEv -__ZN3tbb8internal24concurrent_queue_base_v36assignERKS1_ - -#if !TBB_NO_LEGACY -# concurrent_vector.cpp v2 -__ZN3tbb8internal22concurrent_vector_base13internal_copyERKS1_mPFvPvPKvmE -__ZN3tbb8internal22concurrent_vector_base14internal_clearEPFvPvmEb -__ZN3tbb8internal22concurrent_vector_base15internal_assignERKS1_mPFvPvmEPFvS4_PKvmESA_ -__ZN3tbb8internal22concurrent_vector_base16internal_grow_byEmmPFvPvmE -__ZN3tbb8internal22concurrent_vector_base16internal_reserveEmmm -__ZN3tbb8internal22concurrent_vector_base18internal_push_backEmRm -__ZN3tbb8internal22concurrent_vector_base25internal_grow_to_at_leastEmmPFvPvmE -__ZNK3tbb8internal22concurrent_vector_base17internal_capacityEv -#endif - -# concurrent_vector v3 -__ZN3tbb8internal25concurrent_vector_base_v313internal_copyERKS1_mPFvPvPKvmE -__ZN3tbb8internal25concurrent_vector_base_v314internal_clearEPFvPvmE -__ZN3tbb8internal25concurrent_vector_base_v315internal_assignERKS1_mPFvPvmEPFvS4_PKvmESA_ -__ZN3tbb8internal25concurrent_vector_base_v316internal_grow_byEmmPFvPvPKvmES4_ -__ZN3tbb8internal25concurrent_vector_base_v316internal_reserveEmmm -__ZN3tbb8internal25concurrent_vector_base_v318internal_push_backEmRm -__ZN3tbb8internal25concurrent_vector_base_v325internal_grow_to_at_leastEmmPFvPvPKvmES4_ -__ZNK3tbb8internal25concurrent_vector_base_v317internal_capacityEv -__ZN3tbb8internal25concurrent_vector_base_v316internal_compactEmPvPFvS2_mEPFvS2_PKvmE -__ZN3tbb8internal25concurrent_vector_base_v313internal_swapERS1_ -__ZNK3tbb8internal25concurrent_vector_base_v324internal_throw_exceptionEm -__ZN3tbb8internal25concurrent_vector_base_v3D2Ev -__ZN3tbb8internal25concurrent_vector_base_v315internal_resizeEmmmPKvPFvPvmEPFvS4_S3_mE -__ZN3tbb8internal25concurrent_vector_base_v337internal_grow_to_at_least_with_resultEmmPFvPvPKvmES4_ - -# tbb_thread -__ZN3tbb8internal13tbb_thread_v314internal_startEPFPvS2_ES2_ -__ZN3tbb8internal13tbb_thread_v320hardware_concurrencyEv -__ZN3tbb8internal13tbb_thread_v34joinEv -__ZN3tbb8internal13tbb_thread_v36detachEv -__ZN3tbb8internal15free_closure_v3EPv -__ZN3tbb8internal15thread_sleep_v3ERKNS_10tick_count10interval_tE -__ZN3tbb8internal15thread_yield_v3Ev -__ZN3tbb8internal16thread_get_id_v3Ev -__ZN3tbb8internal19allocate_closure_v3Em -__ZN3tbb8internal7move_v3ERNS0_13tbb_thread_v3ES2_ diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/mac64-tbb-export.def b/deal.II/bundled/tbb30_104oss/src/tbb/mac64-tbb-export.def deleted file mode 100644 index 9d8615c52e..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/mac64-tbb-export.def +++ /dev/null @@ -1,339 +0,0 @@ -# Copyright 2005-2010 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. -# -# Threading Building Blocks is free software; you can redistribute it -# and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. -# -# Threading Building Blocks is distributed in the hope that it will be -# useful, but WITHOUT ANY WARRANTY; without even the implied warranty -# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Threading Building Blocks; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software -# library without restriction. Specifically, if other files instantiate -# templates or use macros or inline functions from this file, or you compile -# this file and link it with other files to produce an executable, this -# file does not by itself cause the resulting executable to be covered by -# the GNU General Public License. This exception does not however -# invalidate any other reasons why the executable file might be covered by -# the GNU General Public License. - -# cache_aligned_allocator.cpp -__ZN3tbb8internal12NFS_AllocateEmmPv -__ZN3tbb8internal15NFS_GetLineSizeEv -__ZN3tbb8internal8NFS_FreeEPv -__ZN3tbb8internal23allocate_via_handler_v3Em -__ZN3tbb8internal25deallocate_via_handler_v3EPv -__ZN3tbb8internal17is_malloc_used_v3Ev - -# task.cpp v3 -__ZN3tbb4task13note_affinityEt -__ZN3tbb4task22internal_set_ref_countEi -__ZN3tbb4task28internal_decrement_ref_countEv -__ZN3tbb4task22spawn_and_wait_for_allERNS_9task_listE -__ZN3tbb4task4selfEv -__ZN3tbb10interface58internal9task_base7destroyERNS_4taskE -__ZNK3tbb4task26is_owned_by_current_threadEv -__ZN3tbb8internal19allocate_root_proxy4freeERNS_4taskE -__ZN3tbb8internal19allocate_root_proxy8allocateEm -__ZN3tbb8internal28affinity_partitioner_base_v36resizeEj -__ZN3tbb8internal36get_initial_auto_partitioner_divisorEv -__ZNK3tbb8internal20allocate_child_proxy4freeERNS_4taskE -__ZNK3tbb8internal20allocate_child_proxy8allocateEm -__ZNK3tbb8internal27allocate_continuation_proxy4freeERNS_4taskE -__ZNK3tbb8internal27allocate_continuation_proxy8allocateEm -__ZNK3tbb8internal34allocate_additional_child_of_proxy4freeERNS_4taskE -__ZNK3tbb8internal34allocate_additional_child_of_proxy8allocateEm -__ZTIN3tbb4taskE -__ZTSN3tbb4taskE -__ZTVN3tbb4taskE -__ZN3tbb19task_scheduler_init19default_num_threadsEv -__ZN3tbb19task_scheduler_init10initializeEim -__ZN3tbb19task_scheduler_init10initializeEi -__ZN3tbb19task_scheduler_init9terminateEv -__ZN3tbb8internal26task_scheduler_observer_v37observeEb -__ZN3tbb10empty_task7executeEv -__ZN3tbb10empty_taskD0Ev -__ZN3tbb10empty_taskD1Ev -__ZTIN3tbb10empty_taskE -__ZTSN3tbb10empty_taskE -__ZTVN3tbb10empty_taskE - -#if !TBB_NO_LEGACY -# task_v2.cpp -__ZN3tbb4task7destroyERS0_ -#endif - -# Exception handling in task scheduler -__ZNK3tbb8internal32allocate_root_with_context_proxy8allocateEm -__ZNK3tbb8internal32allocate_root_with_context_proxy4freeERNS_4taskE -__ZNK3tbb18task_group_context28is_group_execution_cancelledEv -__ZN3tbb18task_group_context22cancel_group_executionEv -__ZN3tbb18task_group_context26register_pending_exceptionEv -__ZN3tbb18task_group_context5resetEv -__ZN3tbb18task_group_context4initEv -__ZN3tbb18task_group_contextD1Ev -__ZN3tbb18task_group_contextD2Ev -__ZNK3tbb18captured_exception4nameEv -__ZNK3tbb18captured_exception4whatEv -__ZN3tbb18captured_exception10throw_selfEv -__ZN3tbb18captured_exception3setEPKcS2_ -__ZN3tbb18captured_exception4moveEv -__ZN3tbb18captured_exception5clearEv -__ZN3tbb18captured_exception7destroyEv -__ZN3tbb18captured_exception8allocateEPKcS2_ -__ZN3tbb18captured_exceptionD0Ev -__ZN3tbb18captured_exceptionD1Ev -__ZTIN3tbb18captured_exceptionE -__ZTSN3tbb18captured_exceptionE -__ZTVN3tbb18captured_exceptionE -__ZTIN3tbb13tbb_exceptionE -__ZTSN3tbb13tbb_exceptionE -__ZTVN3tbb13tbb_exceptionE - -# Symbols for exceptions thrown from TBB -__ZN3tbb8internal33throw_bad_last_alloc_exception_v4Ev -__ZN3tbb8internal18throw_exception_v4ENS0_12exception_idE -__ZNSt13runtime_errorD1Ev -__ZTISt13runtime_error -__ZTSSt13runtime_error -__ZNSt16invalid_argumentD1Ev -__ZTISt16invalid_argument -__ZTSSt16invalid_argument -__ZNSt11range_errorD1Ev -__ZTISt11range_error -__ZTSSt11range_error -__ZNSt12length_errorD1Ev -__ZTISt12length_error -__ZTSSt12length_error -__ZNSt12out_of_rangeD1Ev -__ZTISt12out_of_range -__ZTSSt12out_of_range -__ZN3tbb14bad_last_allocD0Ev -__ZN3tbb14bad_last_allocD1Ev -__ZNK3tbb14bad_last_alloc4whatEv -__ZTIN3tbb14bad_last_allocE -__ZTSN3tbb14bad_last_allocE -__ZTVN3tbb14bad_last_allocE -__ZN3tbb12missing_waitD0Ev -__ZN3tbb12missing_waitD1Ev -__ZNK3tbb12missing_wait4whatEv -__ZTIN3tbb12missing_waitE -__ZTSN3tbb12missing_waitE -__ZTVN3tbb12missing_waitE -__ZN3tbb27invalid_multiple_schedulingD0Ev -__ZN3tbb27invalid_multiple_schedulingD1Ev -__ZNK3tbb27invalid_multiple_scheduling4whatEv -__ZTIN3tbb27invalid_multiple_schedulingE -__ZTSN3tbb27invalid_multiple_schedulingE -__ZTVN3tbb27invalid_multiple_schedulingE -__ZN3tbb13improper_lockD0Ev -__ZN3tbb13improper_lockD1Ev -__ZNK3tbb13improper_lock4whatEv -__ZTIN3tbb13improper_lockE -__ZTSN3tbb13improper_lockE -__ZTVN3tbb13improper_lockE - -# tbb_misc.cpp -__ZN3tbb17assertion_failureEPKciS1_S1_ -__ZN3tbb21set_assertion_handlerEPFvPKciS1_S1_E -__ZN3tbb8internal13handle_perrorEiPKc -__ZN3tbb8internal15runtime_warningEPKcz -_TBB_runtime_interface_version - -# itt_notify.cpp -__ZN3tbb8internal32itt_load_pointer_with_acquire_v3EPKv -__ZN3tbb8internal33itt_store_pointer_with_release_v3EPvS1_ -__ZN3tbb8internal19itt_load_pointer_v3EPKv -__ZN3tbb8internal20itt_set_sync_name_v3EPvPKc - -# pipeline.cpp -__ZTIN3tbb6filterE -__ZTSN3tbb6filterE -__ZTVN3tbb6filterE -__ZN3tbb6filterD2Ev -__ZN3tbb8pipeline10add_filterERNS_6filterE -__ZN3tbb8pipeline12inject_tokenERNS_4taskE -__ZN3tbb8pipeline13remove_filterERNS_6filterE -__ZN3tbb8pipeline3runEm -__ZN3tbb8pipeline3runEmRNS_18task_group_contextE -__ZN3tbb8pipeline5clearEv -__ZN3tbb19thread_bound_filter12process_itemEv -__ZN3tbb19thread_bound_filter16try_process_itemEv -__ZN3tbb8pipelineC1Ev -__ZN3tbb8pipelineC2Ev -__ZN3tbb8pipelineD0Ev -__ZN3tbb8pipelineD1Ev -__ZN3tbb8pipelineD2Ev -__ZTIN3tbb8pipelineE -__ZTSN3tbb8pipelineE -__ZTVN3tbb8pipelineE - -# queuing_rw_mutex.cpp -__ZN3tbb16queuing_rw_mutex11scoped_lock17upgrade_to_writerEv -__ZN3tbb16queuing_rw_mutex11scoped_lock19downgrade_to_readerEv -__ZN3tbb16queuing_rw_mutex11scoped_lock7acquireERS0_b -__ZN3tbb16queuing_rw_mutex11scoped_lock7releaseEv -__ZN3tbb16queuing_rw_mutex11scoped_lock11try_acquireERS0_b -__ZN3tbb16queuing_rw_mutex18internal_constructEv - -# reader_writer_lock.cpp -__ZN3tbb10interface518reader_writer_lock11scoped_lock16internal_destroyEv -__ZN3tbb10interface518reader_writer_lock11scoped_lock18internal_constructERS1_ -__ZN3tbb10interface518reader_writer_lock13try_lock_readEv -__ZN3tbb10interface518reader_writer_lock16scoped_lock_read16internal_destroyEv -__ZN3tbb10interface518reader_writer_lock16scoped_lock_read18internal_constructERS1_ -__ZN3tbb10interface518reader_writer_lock16internal_destroyEv -__ZN3tbb10interface518reader_writer_lock18internal_constructEv -__ZN3tbb10interface518reader_writer_lock4lockEv -__ZN3tbb10interface518reader_writer_lock6unlockEv -__ZN3tbb10interface518reader_writer_lock8try_lockEv -__ZN3tbb10interface518reader_writer_lock9lock_readEv - -#if !TBB_NO_LEGACY -# spin_rw_mutex.cpp v2 -__ZN3tbb13spin_rw_mutex16internal_upgradeEPS0_ -__ZN3tbb13spin_rw_mutex22internal_itt_releasingEPS0_ -__ZN3tbb13spin_rw_mutex23internal_acquire_readerEPS0_ -__ZN3tbb13spin_rw_mutex23internal_acquire_writerEPS0_ -__ZN3tbb13spin_rw_mutex18internal_downgradeEPS0_ -__ZN3tbb13spin_rw_mutex23internal_release_readerEPS0_ -__ZN3tbb13spin_rw_mutex23internal_release_writerEPS0_ -__ZN3tbb13spin_rw_mutex27internal_try_acquire_readerEPS0_ -__ZN3tbb13spin_rw_mutex27internal_try_acquire_writerEPS0_ -#endif - -# spin_rw_mutex v3 -__ZN3tbb16spin_rw_mutex_v316internal_upgradeEv -__ZN3tbb16spin_rw_mutex_v318internal_downgradeEv -__ZN3tbb16spin_rw_mutex_v323internal_acquire_readerEv -__ZN3tbb16spin_rw_mutex_v323internal_acquire_writerEv -__ZN3tbb16spin_rw_mutex_v323internal_release_readerEv -__ZN3tbb16spin_rw_mutex_v323internal_release_writerEv -__ZN3tbb16spin_rw_mutex_v327internal_try_acquire_readerEv -__ZN3tbb16spin_rw_mutex_v327internal_try_acquire_writerEv -__ZN3tbb16spin_rw_mutex_v318internal_constructEv - -# spin_mutex.cpp -__ZN3tbb10spin_mutex11scoped_lock16internal_acquireERS0_ -__ZN3tbb10spin_mutex11scoped_lock16internal_releaseEv -__ZN3tbb10spin_mutex11scoped_lock20internal_try_acquireERS0_ -__ZN3tbb10spin_mutex18internal_constructEv - -# mutex.cpp -__ZN3tbb5mutex11scoped_lock16internal_acquireERS0_ -__ZN3tbb5mutex11scoped_lock16internal_releaseEv -__ZN3tbb5mutex11scoped_lock20internal_try_acquireERS0_ -__ZN3tbb5mutex16internal_destroyEv -__ZN3tbb5mutex18internal_constructEv - -# recursive_mutex.cpp -__ZN3tbb15recursive_mutex11scoped_lock16internal_acquireERS0_ -__ZN3tbb15recursive_mutex11scoped_lock16internal_releaseEv -__ZN3tbb15recursive_mutex11scoped_lock20internal_try_acquireERS0_ -__ZN3tbb15recursive_mutex16internal_destroyEv -__ZN3tbb15recursive_mutex18internal_constructEv - -# queuing_mutex.cpp -__ZN3tbb13queuing_mutex11scoped_lock7acquireERS0_ -__ZN3tbb13queuing_mutex11scoped_lock7releaseEv -__ZN3tbb13queuing_mutex11scoped_lock11try_acquireERS0_ -__ZN3tbb13queuing_mutex18internal_constructEv - -# critical_section.cpp -__ZN3tbb8internal19critical_section_v418internal_constructEv - -#if !TBB_NO_LEGACY -# concurrent_hash_map -__ZNK3tbb8internal21hash_map_segment_base23internal_grow_predicateEv - -# concurrent_queue.cpp v2 -__ZN3tbb8internal21concurrent_queue_base12internal_popEPv -__ZN3tbb8internal21concurrent_queue_base13internal_pushEPKv -__ZN3tbb8internal21concurrent_queue_base21internal_set_capacityElm -__ZN3tbb8internal21concurrent_queue_base23internal_pop_if_presentEPv -__ZN3tbb8internal21concurrent_queue_base25internal_push_if_not_fullEPKv -__ZN3tbb8internal21concurrent_queue_baseC2Em -__ZN3tbb8internal21concurrent_queue_baseD2Ev -__ZTIN3tbb8internal21concurrent_queue_baseE -__ZTSN3tbb8internal21concurrent_queue_baseE -__ZTVN3tbb8internal21concurrent_queue_baseE -__ZN3tbb8internal30concurrent_queue_iterator_base6assignERKS1_ -__ZN3tbb8internal30concurrent_queue_iterator_base7advanceEv -__ZN3tbb8internal30concurrent_queue_iterator_baseC2ERKNS0_21concurrent_queue_baseE -__ZN3tbb8internal30concurrent_queue_iterator_baseD2Ev -__ZNK3tbb8internal21concurrent_queue_base13internal_sizeEv -#endif - -# concurrent_queue v3 -# constructors -__ZN3tbb8internal33concurrent_queue_iterator_base_v3C2ERKNS0_24concurrent_queue_base_v3E -__ZN3tbb8internal33concurrent_queue_iterator_base_v3C2ERKNS0_24concurrent_queue_base_v3Em -__ZN3tbb8internal24concurrent_queue_base_v3C2Em -# destructors -__ZN3tbb8internal33concurrent_queue_iterator_base_v3D2Ev -__ZN3tbb8internal24concurrent_queue_base_v3D2Ev -# typeinfo -__ZTIN3tbb8internal24concurrent_queue_base_v3E -__ZTSN3tbb8internal24concurrent_queue_base_v3E -#vtable -__ZTVN3tbb8internal24concurrent_queue_base_v3E -# methods -__ZN3tbb8internal33concurrent_queue_iterator_base_v36assignERKS1_ -__ZN3tbb8internal33concurrent_queue_iterator_base_v37advanceEv -__ZN3tbb8internal24concurrent_queue_base_v313internal_pushEPKv -__ZN3tbb8internal24concurrent_queue_base_v325internal_push_if_not_fullEPKv -__ZN3tbb8internal24concurrent_queue_base_v312internal_popEPv -__ZN3tbb8internal24concurrent_queue_base_v323internal_pop_if_presentEPv -__ZN3tbb8internal24concurrent_queue_base_v321internal_finish_clearEv -__ZN3tbb8internal24concurrent_queue_base_v321internal_set_capacityElm -__ZNK3tbb8internal24concurrent_queue_base_v313internal_sizeEv -__ZNK3tbb8internal24concurrent_queue_base_v314internal_emptyEv -__ZNK3tbb8internal24concurrent_queue_base_v324internal_throw_exceptionEv -__ZN3tbb8internal24concurrent_queue_base_v36assignERKS1_ - -#if !TBB_NO_LEGACY -# concurrent_vector.cpp v2 -__ZN3tbb8internal22concurrent_vector_base13internal_copyERKS1_mPFvPvPKvmE -__ZN3tbb8internal22concurrent_vector_base14internal_clearEPFvPvmEb -__ZN3tbb8internal22concurrent_vector_base15internal_assignERKS1_mPFvPvmEPFvS4_PKvmESA_ -__ZN3tbb8internal22concurrent_vector_base16internal_grow_byEmmPFvPvmE -__ZN3tbb8internal22concurrent_vector_base16internal_reserveEmmm -__ZN3tbb8internal22concurrent_vector_base18internal_push_backEmRm -__ZN3tbb8internal22concurrent_vector_base25internal_grow_to_at_leastEmmPFvPvmE -__ZNK3tbb8internal22concurrent_vector_base17internal_capacityEv -#endif - -# concurrent_vector v3 -__ZN3tbb8internal25concurrent_vector_base_v313internal_copyERKS1_mPFvPvPKvmE -__ZN3tbb8internal25concurrent_vector_base_v314internal_clearEPFvPvmE -__ZN3tbb8internal25concurrent_vector_base_v315internal_assignERKS1_mPFvPvmEPFvS4_PKvmESA_ -__ZN3tbb8internal25concurrent_vector_base_v316internal_grow_byEmmPFvPvPKvmES4_ -__ZN3tbb8internal25concurrent_vector_base_v316internal_reserveEmmm -__ZN3tbb8internal25concurrent_vector_base_v318internal_push_backEmRm -__ZN3tbb8internal25concurrent_vector_base_v325internal_grow_to_at_leastEmmPFvPvPKvmES4_ -__ZNK3tbb8internal25concurrent_vector_base_v317internal_capacityEv -__ZN3tbb8internal25concurrent_vector_base_v316internal_compactEmPvPFvS2_mEPFvS2_PKvmE -__ZN3tbb8internal25concurrent_vector_base_v313internal_swapERS1_ -__ZNK3tbb8internal25concurrent_vector_base_v324internal_throw_exceptionEm -__ZN3tbb8internal25concurrent_vector_base_v3D2Ev -__ZN3tbb8internal25concurrent_vector_base_v315internal_resizeEmmmPKvPFvPvmEPFvS4_S3_mE -__ZN3tbb8internal25concurrent_vector_base_v337internal_grow_to_at_least_with_resultEmmPFvPvPKvmES4_ - -# tbb_thread -__ZN3tbb8internal13tbb_thread_v320hardware_concurrencyEv -__ZN3tbb8internal13tbb_thread_v36detachEv -__ZN3tbb8internal16thread_get_id_v3Ev -__ZN3tbb8internal15free_closure_v3EPv -__ZN3tbb8internal13tbb_thread_v34joinEv -__ZN3tbb8internal13tbb_thread_v314internal_startEPFPvS2_ES2_ -__ZN3tbb8internal19allocate_closure_v3Em -__ZN3tbb8internal7move_v3ERNS0_13tbb_thread_v3ES2_ -__ZN3tbb8internal15thread_yield_v3Ev -__ZN3tbb8internal15thread_sleep_v3ERKNS_10tick_count10interval_tE diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/mailbox.h b/deal.II/bundled/tbb30_104oss/src/tbb/mailbox.h deleted file mode 100644 index 1edd9e9bf3..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/mailbox.h +++ /dev/null @@ -1,191 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef _TBB_mailbox_H -#define _TBB_mailbox_H - -#include "tbb/tbb_stddef.h" -#include "tbb/cache_aligned_allocator.h" - -#include "scheduler_common.h" - -namespace tbb { -namespace internal { - -class mail_outbox; - -struct task_proxy : public task { - static const intptr_t pool_bit = 1; - static const intptr_t mailbox_bit = 2; - /* All but two low-order bits represent a (task*). - Two low-order bits mean: - 1 = proxy is/was/will be in task pool - 2 = proxy is/was/will be in mailbox */ - intptr_t task_and_tag; - - //! Pointer to next task_proxy in a mailbox - task_proxy* next_in_mailbox; - - //! Mailbox to which this was mailed. - mail_outbox* outbox; -}; - -//! Internal representation of mail_outbox, without padding. -class unpadded_mail_outbox { -protected: - //! Pointer to first task_proxy in mailbox, or NULL if box is empty. - task_proxy* my_first; - - //! Pointer to pointer that will point to next item in the queue. Never NULL. - task_proxy** my_last; - - //! Owner of mailbox is not executing a task, and has drained its own task pool. - bool my_is_idle; -}; - -//! Class representing where mail is put. -/** Padded to occupy a cache line. */ -class mail_outbox: unpadded_mail_outbox { - char pad[NFS_MaxLineSize-sizeof(unpadded_mail_outbox)]; - - task_proxy* internal_pop() { - //! No fence on load of my_first, because if it is NULL, there's nothing further to read from another thread. - task_proxy* first = my_first; - if( first ) { - // There is a first item in the mailbox. See if there is a second. - if( task_proxy* second = __TBB_load_with_acquire(first->next_in_mailbox) ) { - // There are at least two items, so first item can be popped easily. - __TBB_store_with_release( my_first, second ); - } else { - // There is only one item. Some care is required to pop it. - my_first = NULL; - if( (task_proxy**)__TBB_CompareAndSwapW(&my_last, (intptr_t)&my_first, - (intptr_t)&first->next_in_mailbox)==&first->next_in_mailbox ) - { - // Successfully transitioned mailbox from having one item to having none. - __TBB_ASSERT(!first->next_in_mailbox,NULL); - } else { - // Some other thread updated my_last but has not filled in result->next_in_mailbox - // Wait until first item points to second item. - atomic_backoff backoff; - while( !(second=const_cast<volatile task_proxy*>(first)->next_in_mailbox) ) - backoff.pause(); - my_first = second; - } - } - } - return first; - } -public: - friend class mail_inbox; - - //! Push task_proxy onto the mailbox queue of another thread. - /** Implementation is wait-free. */ - void push( task_proxy& t ) { - __TBB_ASSERT(&t, NULL); - t.next_in_mailbox = NULL; - task_proxy** link = (task_proxy**)__TBB_FetchAndStoreW(&my_last,(intptr_t)&t.next_in_mailbox); - // No release fence required for the next store, because there are no memory operations - // between the previous fully fenced atomic operation and the store. - *link = &t; - } - - //! Construct *this as a mailbox from zeroed memory. - /** Raise assertion if *this is not previously zeored, or sizeof(this) is wrong. - This method is provided instead of a full constructor since we know the objecxt - will be constructed in zeroed memory. */ - void construct() { - __TBB_ASSERT( sizeof(*this)==NFS_MaxLineSize, NULL ); - __TBB_ASSERT( !my_first, NULL ); - __TBB_ASSERT( !my_last, NULL ); - __TBB_ASSERT( !my_is_idle, NULL ); - my_last=&my_first; - } - - //! Drain the mailbox - intptr_t drain() { - intptr_t k = 0; - // No fences here because other threads have already quit. - for( ; task_proxy* t = my_first; ++k ) { - my_first = t->next_in_mailbox; - NFS_Free((char*)t - task_prefix_reservation_size); - } - return k; - } - - //! True if thread that owns this mailbox is looking for work. - bool recipient_is_idle() { - return my_is_idle; - } -}; // class mail_outbox - -//! Class representing source of mail. -class mail_inbox { - //! Corresponding sink where mail that we receive will be put. - mail_outbox* my_putter; -public: - //! Construct unattached inbox - mail_inbox() : my_putter(NULL) {} - - //! Attach inbox to a corresponding outbox. - void attach( mail_outbox& putter ) { - __TBB_ASSERT(!my_putter,"already attached"); - my_putter = &putter; - } - //! Detach inbox from its outbox - void detach() { - __TBB_ASSERT(my_putter,"not attached"); - my_putter = NULL; - } - //! Get next piece of mail, or NULL if mailbox is empty. - task_proxy* pop() { - return my_putter->internal_pop(); - } - //! Indicate whether thread that reads this mailbox is idle. - /** Raises assertion failure if mailbox is redundantly marked as not idle. */ - void set_is_idle( bool value ) { - if( my_putter ) { - __TBB_ASSERT( my_putter->my_is_idle || value, "attempt to redundantly mark mailbox as not idle" ); - my_putter->my_is_idle = value; - } - } - //! Indicate whether thread that reads this mailbox is idle. - bool is_idle_state ( bool value ) const { - return !my_putter || my_putter->my_is_idle == value; - } - -#if DO_ITT_NOTIFY - //! Get pointer to corresponding outbox used for ITT_NOTIFY calls. - void* outbox() const {return my_putter;} -#endif /* DO_ITT_NOTIFY */ -}; // class mail_inbox - -} // namespace internal -} // namespace tbb - -#endif /* _TBB_mailbox_H */ diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/market.cpp b/deal.II/bundled/tbb30_104oss/src/tbb/market.cpp deleted file mode 100644 index 124f9acadb..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/market.cpp +++ /dev/null @@ -1,304 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#include "tbb/tbb_stddef.h" - -#if __TBB_ARENA_PER_MASTER - -#include "market.h" -#include "tbb_main.h" -#include "governor.h" -#include "scheduler.h" -#include "itt_notify.h" - -namespace tbb { -namespace internal { - -//------------------------------------------------------------------------ -// market -//------------------------------------------------------------------------ - -market::market ( unsigned max_num_workers, size_t stack_size ) - : my_ref_count(1) - , my_stack_size(stack_size) - , my_max_num_workers(max_num_workers) -{ - my_next_arena = my_arenas.begin(); - - // Once created RML server will start initializing workers that will need - // global market instance to get worker stack size - my_server = governor::create_rml_server( *this ); - __TBB_ASSERT( my_server, "Failed to create RML server" ); -} - - -market& market::global_market ( unsigned max_num_workers, size_t stack_size ) { - global_market_mutex_type::scoped_lock lock( theMarketMutex ); - market *m = theMarket; - if ( m ) { - ++m->my_ref_count; - if ( m->my_stack_size < stack_size ) - runtime_warning( "Newer master request for larger stack cannot be satisfied\n" ); - } - else { - max_num_workers = max( governor::default_num_threads() - 1, max_num_workers ); - // at least 1 worker is required to support starvation resistant tasks - if( max_num_workers==0 ) max_num_workers = 1; - // Create the global market instance - size_t size = sizeof(market); -#if __TBB_TASK_GROUP_CONTEXT - __TBB_ASSERT( __TBB_offsetof(market, my_workers) + sizeof(generic_scheduler*) == sizeof(market), - "my_workers must be the last data field of the market class"); - size += sizeof(generic_scheduler*) * (max_num_workers - 1); -#endif /* __TBB_TASK_GROUP_CONTEXT */ - __TBB_InitOnce::add_ref(); - void* storage = NFS_Allocate(size, 1, NULL); - memset( storage, 0, size ); - // Initialize and publish global market - m = new (storage) market( max_num_workers, stack_size ); - theMarket = m; - } - return *m; -} - -void market::destroy () { -#if __TBB_COUNT_TASK_NODES - if ( my_task_node_count ) - runtime_warning( "Leaked %ld task objects\n", (intptr_t)my_task_node_count ); -#endif /* __TBB_COUNT_TASK_NODES */ - this->~market(); - NFS_Free( this ); - __TBB_InitOnce::remove_ref(); -} - -void market::release () { - __TBB_ASSERT( theMarket == this, "Global market instance was destroyed prematurely?" ); - bool do_release = false; - { - global_market_mutex_type::scoped_lock lock(theMarketMutex); - if ( --my_ref_count == 0 ) { - do_release = true; - theMarket = NULL; - } - } - if( do_release ) - my_server->request_close_connection(); -} - -arena& market::create_arena ( unsigned max_num_workers, size_t stack_size ) { - market &m = global_market( max_num_workers, stack_size ); // increases market's ref count - arena& a = arena::allocate_arena( m, min(max_num_workers, m.my_max_num_workers) ); - // Add newly created arena into the existing market's list. - spin_mutex::scoped_lock lock(m.my_arenas_list_mutex); - m.my_arenas.push_front( a ); - if ( m.my_arenas.size() == 1 ) - m.my_next_arena = m.my_arenas.begin(); - return a; -} - -void market::detach_arena ( arena& a ) { - __TBB_ASSERT( theMarket == this, "Global market instance was destroyed prematurely?" ); - spin_mutex::scoped_lock lock(my_arenas_list_mutex); - __TBB_ASSERT( my_next_arena != my_arenas.end(), NULL ); - if ( &*my_next_arena == &a ) - if ( ++my_next_arena == my_arenas.end() && my_arenas.size() > 1 ) - my_next_arena = my_arenas.begin(); - my_arenas.remove( a ); -} - -arena* market::arena_in_need () { - spin_mutex::scoped_lock lock(my_arenas_list_mutex); - if ( my_arenas.empty() ) - return NULL; - __TBB_ASSERT( my_next_arena != my_arenas.end(), NULL ); - arena_list_type::iterator it = my_next_arena; - do { - arena& a = *it; - if ( ++it == my_arenas.end() ) - it = my_arenas.begin(); - if ( a.num_workers_active() < a.my_num_workers_allotted ) { - ++a.my_num_threads_active; - my_next_arena = it; - return &a; - } - } while ( it != my_next_arena ); - return NULL; -} - -void market::update_allotment ( int max_workers ) { - unsigned carry = 0; - spin_mutex::scoped_lock lock(my_arenas_list_mutex); - arena_list_type::iterator it = my_arenas.begin(); - int total_demand = my_total_demand; - max_workers = min(max_workers, total_demand); - if ( total_demand > 0 ) { - for ( ; it != my_arenas.end(); ++it ) { - arena& a = *it; - int tmp = a.my_num_workers_requested * max_workers + carry; - int allotted = tmp / total_demand; - carry = tmp % total_demand; - a.my_num_workers_allotted = min( allotted, (int)a.my_max_num_workers ); - } - } - else { - for ( ; it != my_arenas.end(); ++it ) { - it->my_num_workers_allotted = 0; - } - } -} - -/** The balancing algorithm may be liable to data races. However the aberrations - caused by the races are not fatal and generally only temporarily affect fairness - of the workers distribution among arenas. **/ -void market::adjust_demand ( arena& a, int delta ) { - __TBB_ASSERT( theMarket, "market instance was destroyed prematurely?" ); - a.my_num_workers_requested += delta; - my_total_demand += delta; - update_allotment( my_max_num_workers ); - // Must be called outside of any locks - my_server->adjust_job_count_estimate( delta ); - GATHER_STATISTIC( governor::local_scheduler_if_initialized() ? ++governor::local_scheduler_if_initialized()->my_counters.gate_switches : 0 ); -} - -void market::process( job& j ) { - generic_scheduler& s = static_cast<generic_scheduler&>(j); - while ( arena *a = arena_in_need() ) - a->process(s); - GATHER_STATISTIC( ++s.my_counters.market_roundtrips ); -} - -void market::cleanup( job& j ) { - __TBB_ASSERT( theMarket != this, NULL ); - generic_scheduler& s = static_cast<generic_scheduler&>(j); - generic_scheduler* mine = governor::local_scheduler_if_initialized(); - __TBB_ASSERT( !mine || mine->arena_index!=0, NULL ); - if( mine!=&s ) { - governor::assume_scheduler( &s ); - generic_scheduler::cleanup_worker( &s, mine!=NULL ); - governor::assume_scheduler( mine ); - } else { - generic_scheduler::cleanup_worker( &s, true ); - } -} - -void market::acknowledge_close_connection() { - destroy(); -} - -::rml::job* market::create_one_job() { - unsigned index = ++my_num_workers; - __TBB_ASSERT( index > 0, NULL ); - ITT_THREAD_SET_NAME(_T("TBB Worker Thread")); - // index serves as a hint decreasing conflicts between workers when they migrate between arenas - generic_scheduler* s = generic_scheduler::create_worker( *this, index ); -#if __TBB_TASK_GROUP_CONTEXT - __TBB_ASSERT( !my_workers[index - 1], NULL ); - my_workers[index - 1] = s; -#endif /* __TBB_TASK_GROUP_CONTEXT */ - governor::sign_on(s); - return s; -} - -#if __TBB_TASK_GROUP_CONTEXT -/** Propagates cancellation down the tree of dependent contexts by walking each - thread's local list of contexts **/ -void market::propagate_cancellation ( task_group_context& ctx ) { - __TBB_ASSERT ( ctx.my_cancellation_requested, "No cancellation request in the context" ); - // The whole propagation algorithm is under the lock in order to ensure correctness - // in case of parallel cancellations at the different levels of the context tree. - // See the note 1 at the bottom of this file. - global_market_mutex_type::scoped_lock lock(theMarketMutex); - // Advance global cancellation epoch - __TBB_FetchAndAddWrelease(&global_cancel_count, 1); - // Propagate to all workers and masters and sync up their local epochs with the global one - unsigned num_workers = my_num_workers; - for ( unsigned i = 0; i < num_workers; ++i ) { - generic_scheduler *s = my_workers[i]; - // If the worker is only about to be registered, skip it. - if ( s ) - s->propagate_cancellation(); - } - arena_list_type::iterator it = my_arenas.begin(); - for ( ; it != my_arenas.end(); ++it ) { - generic_scheduler *s = it->slot[0].my_scheduler; - // If the master is under construction, skip it. - if ( s ) - s->propagate_cancellation(); - } -} -#endif /* __TBB_TASK_GROUP_CONTEXT */ - -#if __TBB_COUNT_TASK_NODES -intptr_t market::workers_task_node_count() { - intptr_t result = 0; - spin_mutex::scoped_lock lock(my_arenas_list_mutex); - for ( arena_list_type::iterator it = my_arenas.begin(); it != my_arenas.end(); ++it ) - result += it->workers_task_node_count(); - return result; -} -#endif /* __TBB_COUNT_TASK_NODES */ - -} // namespace internal -} // namespace tbb - -#endif /* __TBB_ARENA_PER_MASTER */ - -/* - Notes: - -1. Consider parallel cancellations at the different levels of the context tree: - - Ctx1 <- Cancelled by Thread1 |- Thread2 started processing - | | - Ctx2 |- Thread1 started processing - | T1 |- Thread2 finishes and syncs up local counters - Ctx3 <- Cancelled by Thread2 | - | |- Ctx5 is bound to Ctx2 - Ctx4 | - T2 |- Thread1 reaches Ctx2 - - Thread-propagator of each cancellation increments global counter. However the thread - propagating the cancellation from the outermost context (Thread1) may be the last - to finish. Which means that the local counters may be synchronized earlier (by Thread2, - at Time1) than it propagated cancellation into Ctx2 (at time Time2). If a new context - (Ctx5) is created and bound to Ctx2 between Time1 and Time2, checking its parent only - (Ctx2) may result in cancellation request being lost. - - This issue is solved by doing the whole propagation under the lock (the_scheduler_list_mutex). - - If we need more concurrency while processing parallel cancellations, we could try - the following modification of the propagation algorithm: - - advance global counter and remember it - for each thread: - scan thread's list of contexts - for each thread: - sync up its local counter only if the global counter has not been changed - - However this version of the algorithm requires more analysis and verification. -*/ diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/market.h b/deal.II/bundled/tbb30_104oss/src/tbb/market.h deleted file mode 100644 index 0d70573f0c..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/market.h +++ /dev/null @@ -1,210 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef _TBB_market_H -#define _TBB_market_H - -#include "tbb/tbb_stddef.h" - -#if __TBB_ARENA_PER_MASTER - -#include "tbb/atomic.h" -#include "tbb/spin_mutex.h" -#include "../rml/include/rml_tbb.h" - -#include "intrusive_list.h" - -#if defined(_MSC_VER) && defined(_Wp64) - // Workaround for overzealous compiler warnings in /Wp64 mode - #pragma warning (push) - #pragma warning (disable: 4244) -#endif - -namespace tbb { - -class task_group_context; - -namespace internal { - -class arena; -class generic_scheduler; - -//------------------------------------------------------------------------ -// Class market -//------------------------------------------------------------------------ - -class market : no_copy, rml::tbb_client { - friend void ITT_DoUnsafeOneTimeInitialization (); - - typedef intrusive_list<arena> arena_list_type; - - //! Currently active global market - static market* theMarket; - - typedef spin_mutex global_market_mutex_type; - - //! Mutex guarding creation/destruction of theMarket, insertions/deletions in my_arenas, and cancellation propagation - static global_market_mutex_type theMarketMutex; - - //! Reference count controlling market object lifetime - intptr_t my_ref_count; - - //! List of active arenas - arena_list_type my_arenas; - - //! The first arena to be checked when idle worker seeks for an arena to enter - /** The check happens in round-robin fashion. **/ - arena_list_type::iterator my_next_arena; - - //! Lightweight mutex guarding accounting operations with arenas list - spin_mutex my_arenas_list_mutex; - - //! Number of workers that were requested by all arenas - atomic<int> my_total_demand; - - //! Pointer to the RML server object that services this TBB instance. - rml::tbb_server* my_server; - - //! Stack size of worker threads - size_t my_stack_size; - - //! Number of workers requested from the underlying resource manager - unsigned my_max_num_workers; - -#if __TBB_COUNT_TASK_NODES - //! Net number of nodes that have been allocated from heap. - /** Updated each time a scheduler or arena is destroyed. */ - atomic<intptr_t> my_task_node_count; -#endif /* __TBB_COUNT_TASK_NODES */ - - //! Number of workers that have been delivered by RML - atomic<unsigned> my_num_workers; - - //! Constructor - market ( unsigned max_num_workers, size_t stack_size ); - - //! Factory method creating new market object - static market& global_market ( unsigned max_num_workers, size_t stack_size ); - - //! Destroys and deallocates market object created by market::create() - void destroy (); - - //! Returns next arena that needs more workers, or NULL. - arena* arena_in_need (); - - //! Recalculates the number of workers assigned to each arena. - /** The actual number of workers servicing a particular arena may temporarily - deviate from the calculated value. **/ - void update_allotment ( int max_workers ); - - //! Returns number of masters doing computational (CPU-intensive) work - int num_active_masters () { return 1; } // APM TODO: replace with a real mechanism - - // // // - // Implementation of rml::tbb_client interface methods - - /*override*/ version_type version () const { return 0; } - - /*override*/ unsigned max_job_count () const { return my_max_num_workers; } - - /*override*/ size_t min_stack_size () const { return worker_stack_size(); } - - /*override*/ policy_type policy () const { return throughput; } - - /*override*/ job* create_one_job (); - - /*override*/ void cleanup( job& j ); - - /*override*/ void acknowledge_close_connection (); - - /*override*/ void process( job& j ); - -public: - //! Creates an arena object - /** If necessary, also creates global market instance, and boosts its ref count. - Each call to create_arena() must be matched by the call to arena::free_arena(). **/ - static arena& create_arena ( unsigned max_num_workers, size_t stack_size ); - - //! Removes the arena from the market's list - void detach_arena ( arena& ); - - //! Decrements market's refcount and destroys it in the end - void release (); - - //! Request that arena's need in workers should be adjusted. - /** Concurrent invocations are possible only on behalf of different arenas. **/ - void adjust_demand ( arena&, int delta ); - - //! Returns the requested stack size of worker threads. - size_t worker_stack_size () const { return my_stack_size; } - -#if __TBB_COUNT_TASK_NODES - //! Returns the number of task objects "living" in worker threads - intptr_t workers_task_node_count(); - - //! Net number of nodes that have been allocated from heap. - /** Updated each time a scheduler or arena is destroyed. */ - void update_task_node_count( intptr_t delta ) { my_task_node_count += delta; } -#endif /* __TBB_COUNT_TASK_NODES */ - -#if __TBB_TASK_GROUP_CONTEXT - //! Propagates cancellation request to all descendants of the context. - void propagate_cancellation ( task_group_context& ctx ); - - //! Array of pointers to the registered workers - /** Used by cancellation propagation mechanism. - Must be the last data member of the class market. **/ - generic_scheduler* my_workers[1]; -#endif /* __TBB_TASK_GROUP_CONTEXT */ -#if __TBB_ARENA_PER_MASTER && ( _WIN32||_WIN64 ) - //! register master with the resource manager - void register_master( ::rml::server::execution_resource_t& rsc_handle ) { - __TBB_ASSERT( my_server, "RML server not defined?" ); - // the server may ignore registration and set master_exec_resource to NULL. - my_server->register_master( rsc_handle ); - } - - //! unregister master with the resource manager - void unregister_master( ::rml::server::execution_resource_t& rsc_handle ) const { - my_server->unregister_master( rsc_handle ); - } -#endif /* !__TBB_ARENA_PER_MASTER && ( _WIN32||_WIN64 ) */ - -}; // class market - -} // namespace internal -} // namespace tbb - -#if defined(_MSC_VER) && defined(_Wp64) - // Workaround for overzealous compiler warnings in /Wp64 mode - #pragma warning (pop) -#endif // warning 4244 is back - -#endif /* __TBB_ARENA_PER_MASTER */ - -#endif /* _TBB_market_H */ diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/mutex.cpp b/deal.II/bundled/tbb30_104oss/src/tbb/mutex.cpp deleted file mode 100644 index 7ade57e72c..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/mutex.cpp +++ /dev/null @@ -1,148 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#include "tbb/mutex.h" -#include "itt_notify.h" - -namespace tbb { - void mutex::scoped_lock::internal_acquire( mutex& m ) { - -#if _WIN32||_WIN64 - switch( m.state ) { - case INITIALIZED: - case HELD: - EnterCriticalSection( &m.impl ); - // If a thread comes here, and another thread holds the lock, it will block - // in EnterCriticalSection. When it returns from EnterCriticalSection, - // m.state must be set to INITIALIZED. If the same thread tries to acquire a lock it - // aleady holds, the the lock is in HELD state, thus will cause the assertion to fail. - __TBB_ASSERT(m.state!=HELD, "mutex::scoped_lock: deadlock caused by attempt to reacquire held mutex"); - m.state = HELD; - break; - case DESTROYED: - __TBB_ASSERT(false,"mutex::scoped_lock: mutex already destroyed"); - break; - default: - __TBB_ASSERT(false,"mutex::scoped_lock: illegal mutex state"); - break; - } -#else - int error_code = pthread_mutex_lock(&m.impl); - __TBB_ASSERT_EX(!error_code,"mutex::scoped_lock: pthread_mutex_lock failed"); -#endif /* _WIN32||_WIN64 */ - my_mutex = &m; - } - -void mutex::scoped_lock::internal_release() { - __TBB_ASSERT( my_mutex, "mutex::scoped_lock: not holding a mutex" ); -#if _WIN32||_WIN64 - switch( my_mutex->state ) { - case INITIALIZED: - __TBB_ASSERT(false,"mutex::scoped_lock: try to release the lock without acquisition"); - break; - case HELD: - my_mutex->state = INITIALIZED; - LeaveCriticalSection(&my_mutex->impl); - break; - case DESTROYED: - __TBB_ASSERT(false,"mutex::scoped_lock: mutex already destroyed"); - break; - default: - __TBB_ASSERT(false,"mutex::scoped_lock: illegal mutex state"); - break; - } -#else - int error_code = pthread_mutex_unlock(&my_mutex->impl); - __TBB_ASSERT_EX(!error_code, "mutex::scoped_lock: pthread_mutex_unlock failed"); -#endif /* _WIN32||_WIN64 */ - my_mutex = NULL; -} - -bool mutex::scoped_lock::internal_try_acquire( mutex& m ) { -#if _WIN32||_WIN64 - switch( m.state ) { - case INITIALIZED: - case HELD: - break; - case DESTROYED: - __TBB_ASSERT(false,"mutex::scoped_lock: mutex already destroyed"); - break; - default: - __TBB_ASSERT(false,"mutex::scoped_lock: illegal mutex state"); - break; - } -#endif /* _WIN32||_WIN64 */ - - bool result; -#if _WIN32||_WIN64 - result = TryEnterCriticalSection(&m.impl)!=0; - if( result ) { - __TBB_ASSERT(m.state!=HELD, "mutex::scoped_lock: deadlock caused by attempt to reacquire held mutex"); - m.state = HELD; - } -#else - result = pthread_mutex_trylock(&m.impl)==0; -#endif /* _WIN32||_WIN64 */ - if( result ) - my_mutex = &m; - return result; -} - -void mutex::internal_construct() { -#if _WIN32||_WIN64 - InitializeCriticalSection(&impl); - state = INITIALIZED; -#else - int error_code = pthread_mutex_init(&impl,NULL); - if( error_code ) - tbb::internal::handle_perror(error_code,"mutex: pthread_mutex_init failed"); -#endif /* _WIN32||_WIN64*/ - ITT_SYNC_CREATE(&impl, _T("tbb::mutex"), _T("")); -} - -void mutex::internal_destroy() { -#if _WIN32||_WIN64 - switch( state ) { - case INITIALIZED: - DeleteCriticalSection(&impl); - break; - case DESTROYED: - __TBB_ASSERT(false,"mutex: already destroyed"); - break; - default: - __TBB_ASSERT(false,"mutex: illegal state for destruction"); - break; - } - state = DESTROYED; -#else - int error_code = pthread_mutex_destroy(&impl); - __TBB_ASSERT_EX(!error_code,"mutex: pthread_mutex_destroy failed"); -#endif /* _WIN32||_WIN64 */ -} - -} // namespace tbb diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/observer_proxy.cpp b/deal.II/bundled/tbb30_104oss/src/tbb/observer_proxy.cpp deleted file mode 100644 index ddb8321330..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/observer_proxy.cpp +++ /dev/null @@ -1,237 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#include "tbb/tbb_config.h" - -#if __TBB_SCHEDULER_OBSERVER - -#include "tbb/spin_rw_mutex.h" -#include "tbb/aligned_space.h" - -#include "observer_proxy.h" -#include "tbb_main.h" -#include "governor.h" -#include "scheduler.h" - -namespace tbb { -namespace internal { - -typedef spin_rw_mutex::scoped_lock task_scheduler_observer_mutex_scoped_lock; - -/** aligned_space used here to shut up warnings when mutex destructor is called while threads are still using it. */ -static aligned_space<spin_rw_mutex,1> the_task_scheduler_observer_mutex; -static observer_proxy* global_first_observer_proxy; -observer_proxy* global_last_observer_proxy; - - -#if TBB_USE_ASSERT -static atomic<int> observer_proxy_count; - -struct check_observer_proxy_count { - ~check_observer_proxy_count() { - if( observer_proxy_count!=0 ) { - runtime_warning( "Leaked %ld observer_proxy objects\n", long(observer_proxy_count) ); - } - } -}; - -static check_observer_proxy_count the_check_observer_proxy_count; -#endif /* TBB_USE_ASSERT */ - -observer_proxy::observer_proxy( task_scheduler_observer_v3& tso ) : next(NULL), observer(&tso) { -#if TBB_USE_ASSERT - ++observer_proxy_count; -#endif /* TBB_USE_ASSERT */ - // 1 for observer - gc_ref_count = 1; - { - // Append to the global list - task_scheduler_observer_mutex_scoped_lock lock(the_task_scheduler_observer_mutex.begin()[0],/*is_writer=*/true); - observer_proxy* p = global_last_observer_proxy; - prev = p; - if( p ) - p->next=this; - else - global_first_observer_proxy = this; - global_last_observer_proxy = this; - } -} - -void observer_proxy::remove_from_list() { - // Take myself off the global list. - if( next ) - next->prev = prev; - else - global_last_observer_proxy = prev; - if( prev ) - prev->next = next; - else - global_first_observer_proxy = next; -#if TBB_USE_ASSERT - poison_pointer(prev); - poison_pointer(next); - gc_ref_count = -666; -#endif /* TBB_USE_ASSERT */ -} - -void observer_proxy::remove_ref_slow() { - int r = gc_ref_count; - while(r>1) { - __TBB_ASSERT( r!=0, NULL ); - int r_old = gc_ref_count.compare_and_swap(r-1,r); - if( r_old==r ) { - // Successfully decremented count. - return; - } - r = r_old; - } - __TBB_ASSERT( r==1, NULL ); - // Reference count might go to zero - { - task_scheduler_observer_mutex_scoped_lock lock(the_task_scheduler_observer_mutex.begin()[0],/*is_writer=*/true); - r = --gc_ref_count; - if( !r ) { - remove_from_list(); - } - } - if( !r ) { - __TBB_ASSERT( gc_ref_count == -666, NULL ); -#if TBB_USE_ASSERT - --observer_proxy_count; -#endif /* TBB_USE_ASSERT */ - delete this; - } -} - -observer_proxy* observer_proxy::process_list( observer_proxy* local_last, bool is_worker, bool is_entry ) { - // Pointer p marches though the list. - // If is_entry, start with our previous list position, otherwise start at beginning of list. - observer_proxy* p = is_entry ? local_last : NULL; - for(;;) { - task_scheduler_observer* tso=NULL; - // Hold lock on list only long enough to advance to next proxy in list. - { - task_scheduler_observer_mutex_scoped_lock lock(the_task_scheduler_observer_mutex.begin()[0],/*is_writer=*/false); - do { - if( local_last && local_last->observer ) { - // 2 = 1 for observer and 1 for local_last - __TBB_ASSERT( local_last->gc_ref_count>=2, NULL ); - // Can decrement count quickly, because it cannot become zero here. - --local_last->gc_ref_count; - local_last = NULL; - } else { - // Use slow form of decrementing the reference count, after lock is released. - } - if( p ) { - // We were already processing the list. - if( observer_proxy* q = p->next ) { - // Step to next item in list. - p=q; - } else { - // At end of list. - if( is_entry ) { - // Remember current position in the list, so we can start at on the next call. - ++p->gc_ref_count; - } else { - // Finishin running off the end of the list - p=NULL; - } - goto done; - } - } else { - // Starting pass through the list - p = global_first_observer_proxy; - if( !p ) - goto done; - } - tso = p->observer; - } while( !tso ); - ++p->gc_ref_count; - ++tso->my_busy_count; - } - __TBB_ASSERT( !local_last || p!=local_last, NULL ); - if( local_last ) - local_last->remove_ref_slow(); - // Do not hold any locks on the list while calling user's code. - __TBB_TRY { - if( is_entry ) - tso->on_scheduler_entry( is_worker ); - else - tso->on_scheduler_exit( is_worker ); - } __TBB_CATCH(...) { - // Suppress exception, because user routines are supposed to be observing, not changing - // behavior of a master or worker thread. -#if TBB_USE_ASSERT - runtime_warning( "%s threw exception\n", is_entry ? "on_scheduler_entry" : "on_scheduler_exit"); -#endif /* __TBB_USE_ASSERT */ - } - intptr_t bc = --tso->my_busy_count; - __TBB_ASSERT_EX( bc>=0, "my_busy_count underflowed" ); - local_last = p; - } -done: - // Return new value to be used as local_last next time. - if( local_last ) - local_last->remove_ref_slow(); - __TBB_ASSERT( !p || is_entry, NULL ); - return p; -} - -void task_scheduler_observer_v3::observe( bool state ) { - if( state ) { - if( !my_proxy ) { - if( !__TBB_InitOnce::initialization_done() ) - DoOneTimeInitializations(); - my_busy_count = 0; - my_proxy = new observer_proxy(*this); - if( generic_scheduler* s = governor::local_scheduler_if_initialized() ) { - // Notify newly created observer of its own thread. - // Any other pending observers are notified too. - s->notify_entry_observers(); - } - } - } else { - if( observer_proxy* proxy = my_proxy ) { - my_proxy = NULL; - __TBB_ASSERT( proxy->gc_ref_count>=1, "reference for observer missing" ); - { - task_scheduler_observer_mutex_scoped_lock lock(the_task_scheduler_observer_mutex.begin()[0],/*is_writer=*/true); - proxy->observer = NULL; - } - proxy->remove_ref_slow(); - while( my_busy_count ) { - __TBB_Yield(); - } - } - } -} - -} // namespace internal -} // namespace tbb - -#endif /* __TBB_SCHEDULER_OBSERVER */ diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/observer_proxy.h b/deal.II/bundled/tbb30_104oss/src/tbb/observer_proxy.h deleted file mode 100644 index 04e36b57b9..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/observer_proxy.h +++ /dev/null @@ -1,69 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef _TBB_observer_proxy_H -#define _TBB_observer_proxy_H - -#if __TBB_SCHEDULER_OBSERVER - -#include "tbb/task_scheduler_observer.h" - -namespace tbb { -namespace internal { - -class observer_proxy { - friend class task_scheduler_observer_v3; - //! Reference count used for garbage collection. - /** 1 for reference from my task_scheduler_observer. - 1 for each local_last_observer_proxy that points to me. - No accounting for predecessor in the global list. - No accounting for global_last_observer_proxy that points to me. */ - atomic<int> gc_ref_count; - //! Pointer to next task_scheduler_observer - /** Valid even when *this has been removed from the global list. */ - observer_proxy* next; - //! Pointer to previous task_scheduler_observer in global list. - observer_proxy* prev; - //! Associated observer - task_scheduler_observer* observer; - //! Account for removing reference from p. No effect if p is NULL. - void remove_ref_slow(); - void remove_from_list(); - observer_proxy( task_scheduler_observer_v3& wo ); -public: - static observer_proxy* process_list( observer_proxy* local_last, bool is_worker, bool is_entry ); -}; - -extern observer_proxy* global_last_observer_proxy; - -} // namespace internal -} // namespace tbb - -#endif /* __TBB_SCHEDULER_OBSERVER */ - -#endif /* _TBB_observer_proxy_H */ diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/pipeline.cpp b/deal.II/bundled/tbb30_104oss/src/tbb/pipeline.cpp deleted file mode 100644 index 29ec1fbc4c..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/pipeline.cpp +++ /dev/null @@ -1,748 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#include "tbb/pipeline.h" -#include "tbb/spin_mutex.h" -#include "tbb/cache_aligned_allocator.h" -#include "itt_notify.h" -#include "semaphore.h" - - -namespace tbb { - -namespace internal { - -//! This structure is used to store task information in a input buffer -struct task_info { - void* my_object; - //! Invalid unless a task went through an ordered stage. - Token my_token; - //! False until my_token is set. - bool my_token_ready; - //! True if my_object is valid. - bool is_valid; - //! Set to initial state (no object, no token) - void reset() { - my_object = NULL; - my_token = 0; - my_token_ready = false; - is_valid = false; - } -}; -//! A buffer of input items for a filter. -/** Each item is a task_info, inserted into a position in the buffer corresponding to a Token. */ -class input_buffer { - friend class tbb::internal::pipeline_root_task; - friend class tbb::filter; - friend class tbb::thread_bound_filter; - friend class tbb::internal::stage_task; - friend class tbb::pipeline; - - typedef Token size_type; - - //! Array of deferred tasks that cannot yet start executing. - task_info* array; - - //! for thread-bound filter, semaphore for waiting, NULL otherwise. - semaphore* my_sem; - - //! Size of array - /** Always 0 or a power of 2 */ - size_type array_size; - - //! Lowest token that can start executing. - /** All prior Token have already been seen. */ - Token low_token; - - //! Serializes updates. - spin_mutex array_mutex; - - //! Resize "array". - /** Caller is responsible to acquiring a lock on "array_mutex". */ - void grow( size_type minimum_size ); - - //! Initial size for "array" - /** Must be a power of 2 */ - static const size_type initial_buffer_size = 4; - - //! Used for out of order buffer, and for assigning my_token if is_ordered and my_token not already assigned - Token high_token; - - //! True for ordered filter, false otherwise. - bool is_ordered; - - //! True for thread-bound filter, false otherwise. - bool is_bound; - - void create_sema(size_t initial_tokens) { __TBB_ASSERT(!my_sem,NULL); my_sem = new internal::semaphore(initial_tokens); } - void free_sema() { __TBB_ASSERT(my_sem,NULL); delete my_sem; } - void sema_P() { __TBB_ASSERT(my_sem,NULL); my_sem->P(); } - void sema_V() { __TBB_ASSERT(my_sem,NULL); my_sem->V(); } -public: - //! Construct empty buffer. - input_buffer( bool is_ordered_, bool is_bound_ ) : - array(NULL), my_sem(NULL), array_size(0), - low_token(0), high_token(0), - is_ordered(is_ordered_), is_bound(is_bound_) { - grow(initial_buffer_size); - __TBB_ASSERT( array, NULL ); - if(is_bound) create_sema(0); - } - - //! Destroy the buffer. - ~input_buffer() { - __TBB_ASSERT( array, NULL ); - cache_aligned_allocator<task_info>().deallocate(array,array_size); - poison_pointer( array ); - if(my_sem) { - free_sema(); - my_sem = NULL; - } - } - - //! Put a token into the buffer. - /** If task information was placed into buffer, returns true; - otherwise returns false, informing the caller to create and spawn a task. - If input buffer owned by thread-bound filter and the item at - low_token was not valid, issue a V() - If the input_buffer is owned by a successor to a thread-bound filter, - the force_put parameter should be true to ensure the token is inserted - in the buffer. - */ - bool put_token( task_info& info_, bool force_put = false ) { - { - info_.is_valid = true; - spin_mutex::scoped_lock lock( array_mutex ); - Token token; - bool was_empty = !array[low_token&(array_size-1)].is_valid; - if( is_ordered ) { - if( !info_.my_token_ready ) { - info_.my_token = high_token++; - info_.my_token_ready = true; - } - token = info_.my_token; - } else - token = high_token++; - __TBB_ASSERT( (tokendiff_t)(token-low_token)>=0, NULL ); - if( token!=low_token || is_bound || force_put ) { - // Trying to put token that is beyond low_token. - // Need to wait until low_token catches up before dispatching. - if( token-low_token>=array_size ) - grow( token-low_token+1 ); - ITT_NOTIFY( sync_releasing, this ); - array[token&(array_size-1)] = info_; - if(was_empty && is_bound) { - sema_V(); - } - return true; - } - } - return false; - } - - //! Note that processing of a token is finished. - /** Fires up processing of the next token, if processing was deferred. */ - // Using template to avoid explicit dependency on stage_task - // this is only called for serial filters, and is the reason for the - // advance parameter in return_item (we're incrementing low_token here.) - // Non-TBF serial stages don't advance the token at the start because the presence - // of the current token in the buffer keeps another stage from being spawned. - template<typename StageTask> - void note_done( Token token, StageTask& spawner ) { - task_info wakee; - wakee.reset(); - { - spin_mutex::scoped_lock lock( array_mutex ); - if( !is_ordered || token==low_token ) { - // Wake the next task - task_info& item = array[++low_token & (array_size-1)]; - ITT_NOTIFY( sync_acquired, this ); - wakee = item; - item.is_valid = false; - } - } - if( wakee.is_valid ) - spawner.spawn_stage_task(wakee); - } - -#if __TBB_TASK_GROUP_CONTEXT - //! The method destroys all data in filters to prevent memory leaks - void clear( filter* my_filter ) { - long t=low_token; - for( size_type i=0; i<array_size; ++i, ++t ){ - task_info& temp = array[t&(array_size-1)]; - if (temp.is_valid ) { - my_filter->finalize(temp.my_object); - temp.is_valid = false; - } - } - } -#endif - - //! return an item, invalidate the queued item, but only advance if advance - // advance == true for parallel filters. If the filter is serial, leave the - // item in the buffer to keep another stage from being spawned. - bool return_item(task_info& info, bool advance) { - spin_mutex::scoped_lock lock( array_mutex ); - task_info& item = array[low_token&(array_size-1)]; - ITT_NOTIFY( sync_acquired, this ); - if( item.is_valid ) { - info = item; - item.is_valid = false; - if (advance) low_token++; - return true; - } - return false; - } - - //! true if the current low_token is valid. - bool has_item() { spin_mutex::scoped_lock lock(array_mutex); return array[low_token&(array_size -1)].is_valid; } -}; - -void input_buffer::grow( size_type minimum_size ) { - size_type old_size = array_size; - size_type new_size = old_size ? 2*old_size : initial_buffer_size; - while( new_size<minimum_size ) - new_size*=2; - task_info* new_array = cache_aligned_allocator<task_info>().allocate(new_size); - task_info* old_array = array; - for( size_type i=0; i<new_size; ++i ) - new_array[i].is_valid = false; - long t=low_token; - for( size_type i=0; i<old_size; ++i, ++t ) - new_array[t&(new_size-1)] = old_array[t&(old_size-1)]; - array = new_array; - array_size = new_size; - if( old_array ) - cache_aligned_allocator<task_info>().deallocate(old_array,old_size); -} - -class stage_task: public task, public task_info { -private: - friend class tbb::pipeline; - pipeline& my_pipeline; - filter* my_filter; - //! True if this task has not yet read the input. - bool my_at_start; - -public: - //! Construct stage_task for first stage in a pipeline. - /** Such a stage has not read any input yet. */ - stage_task( pipeline& pipeline ) : - my_pipeline(pipeline), - my_filter(pipeline.filter_list), - my_at_start(true) - { - task_info::reset(); - } - //! Construct stage_task for a subsequent stage in a pipeline. - stage_task( pipeline& pipeline, filter* filter_, const task_info& info ) : - task_info(info), - my_pipeline(pipeline), - my_filter(filter_), - my_at_start(false) - {} - //! Roughly equivalent to the constructor of input stage task - void reset() { - task_info::reset(); - my_filter = my_pipeline.filter_list; - my_at_start = true; - } - //! The virtual task execution method - /*override*/ task* execute(); -#if __TBB_TASK_GROUP_CONTEXT - ~stage_task() - { - if (my_filter && my_object && (my_filter->my_filter_mode & filter::version_mask) >= __TBB_PIPELINE_VERSION(4)) { - __TBB_ASSERT(is_cancelled(), "Trying to finalize the task that wasn't cancelled"); - my_filter->finalize(my_object); - my_object = NULL; - } - } -#endif // __TBB_TASK_GROUP_CONTEXT - //! Creates and spawns stage_task from task_info - void spawn_stage_task(const task_info& info) - { - stage_task* clone = new (allocate_additional_child_of(*parent())) - stage_task( my_pipeline, my_filter, info ); - spawn(*clone); - } -}; - -task* stage_task::execute() { - __TBB_ASSERT( !my_at_start || !my_object, NULL ); - __TBB_ASSERT( !my_filter->is_bound(), NULL ); - if( my_at_start ) { - if( my_filter->is_serial() ) { - my_object = (*my_filter)(my_object); - if( my_object ) { - if( my_filter->is_ordered() ) { - my_token = my_pipeline.token_counter++; // ideally, with relaxed semantics - my_token_ready = true; - } else if( (my_filter->my_filter_mode & my_filter->version_mask) >= __TBB_PIPELINE_VERSION(5) ) { - if( my_pipeline.has_thread_bound_filters ) - my_pipeline.token_counter++; // ideally, with relaxed semantics - } - if( !my_filter->next_filter_in_pipeline ) { // we're only filter in pipeline - reset(); - goto process_another_stage; - } else { - ITT_NOTIFY( sync_releasing, &my_pipeline.input_tokens ); - if( --my_pipeline.input_tokens>0 ) - spawn( *new( allocate_additional_child_of(*parent()) ) stage_task( my_pipeline ) ); - } - } else { - my_pipeline.end_of_input = true; - return NULL; - } - } else /*not is_serial*/ { - if( my_pipeline.end_of_input ) - return NULL; - if( (my_filter->my_filter_mode & my_filter->version_mask) >= __TBB_PIPELINE_VERSION(5) ) { - if( my_pipeline.has_thread_bound_filters ) - my_pipeline.token_counter++; - } - ITT_NOTIFY( sync_releasing, &my_pipeline.input_tokens ); - if( --my_pipeline.input_tokens>0 ) - spawn( *new( allocate_additional_child_of(*parent()) ) stage_task( my_pipeline ) ); - my_object = (*my_filter)(my_object); - if( !my_object ) { - my_pipeline.end_of_input = true; - if( (my_filter->my_filter_mode & my_filter->version_mask) >= __TBB_PIPELINE_VERSION(5) ) { - if( my_pipeline.has_thread_bound_filters ) - my_pipeline.token_counter--; // fix token_counter - } - return NULL; - } - } - my_at_start = false; - } else { - my_object = (*my_filter)(my_object); - if( my_filter->is_serial() ) - my_filter->my_input_buffer->note_done(my_token, *this); - } - my_filter = my_filter->next_filter_in_pipeline; - if( my_filter ) { - // There is another filter to execute. - // Crank up priority a notch. - add_to_depth(1); - if( my_filter->is_serial() ) { - // The next filter must execute tokens in order - if( my_filter->my_input_buffer->put_token(*this) ){ - // Can't proceed with the same item - if( my_filter->is_bound() ) { - // Find the next non-thread-bound filter - do { - my_filter = my_filter->next_filter_in_pipeline; - } while( my_filter && my_filter->is_bound() ); - // Check if there is an item ready to process - if( my_filter && my_filter->my_input_buffer->return_item(*this, !my_filter->is_serial())) - goto process_another_stage; - } - my_filter = NULL; // To prevent deleting my_object twice if exception occurs - return NULL; - } - } - } else { - // Reached end of the pipe. - size_t ntokens_avail = ++my_pipeline.input_tokens; - if(my_pipeline.filter_list->is_bound() ) { - if(ntokens_avail == 1) { - my_pipeline.filter_list->my_input_buffer->sema_V(); - } - return NULL; - } - if( ntokens_avail>1 // Only recycle if there is one available token - || my_pipeline.end_of_input ) { - return NULL; // No need to recycle for new input - } - ITT_NOTIFY( sync_acquired, &my_pipeline.input_tokens ); - // Recycle as an input stage task. - reset(); - } -process_another_stage: - /* A semi-hackish way to reexecute the same task object immediately without spawning. - recycle_as_continuation marks the task for future execution, - and then 'this' pointer is returned to bypass spawning. */ - recycle_as_continuation(); - return this; -} - -class pipeline_root_task: public task { - pipeline& my_pipeline; - bool do_segment_scanning; - - /*override*/ task* execute() { - if( !my_pipeline.end_of_input ) - if( !my_pipeline.filter_list->is_bound() ) - if( my_pipeline.input_tokens > 0 ) { - recycle_as_continuation(); - set_ref_count(1); - return new( allocate_child() ) stage_task( my_pipeline ); - } - if( do_segment_scanning ) { - filter* current_filter = my_pipeline.filter_list->next_segment; - /* first non-thread-bound filter that follows thread-bound one - and may have valid items to process */ - filter* first_suitable_filter = current_filter; - while( current_filter ) { - __TBB_ASSERT( !current_filter->is_bound(), "filter is thread-bound?" ); - __TBB_ASSERT( current_filter->prev_filter_in_pipeline->is_bound(), "previous filter is not thread-bound?" ); - if( !my_pipeline.end_of_input || current_filter->has_more_work()) - { - task_info info; - info.reset(); - if( current_filter->my_input_buffer->return_item(info, !current_filter->is_serial()) ) { - set_ref_count(1); - recycle_as_continuation(); - return new( allocate_child() ) stage_task( my_pipeline, current_filter, info); - } - current_filter = current_filter->next_segment; - if( !current_filter ) { - if( !my_pipeline.end_of_input ) { - recycle_as_continuation(); - return this; - } - current_filter = first_suitable_filter; - __TBB_Yield(); - } - } else { - /* The preceding pipeline segment is empty. - Fast-forward to the next post-TBF segment. */ - first_suitable_filter = first_suitable_filter->next_segment; - current_filter = first_suitable_filter; - } - } /* while( current_filter ) */ - return NULL; - } else { - if( !my_pipeline.end_of_input ) { - recycle_as_continuation(); - return this; - } - return NULL; - } - } -public: - pipeline_root_task( pipeline& pipeline ): my_pipeline(pipeline), do_segment_scanning(false) - { - __TBB_ASSERT( my_pipeline.filter_list, NULL ); - filter* first = my_pipeline.filter_list; - if( (first->my_filter_mode & first->version_mask) >= __TBB_PIPELINE_VERSION(5) ) { - // Scanning the pipeline for segments - filter* head_of_previous_segment = first; - for( filter* subfilter=first->next_filter_in_pipeline; - subfilter!=NULL; - subfilter=subfilter->next_filter_in_pipeline ) - { - if( subfilter->prev_filter_in_pipeline->is_bound() && !subfilter->is_bound() ) { - do_segment_scanning = true; - head_of_previous_segment->next_segment = subfilter; - head_of_previous_segment = subfilter; - } - } - } - } -}; - -#if _MSC_VER && !defined(__INTEL_COMPILER) - // Workaround for overzealous compiler warnings - // Suppress compiler warning about constant conditional expression - #pragma warning (disable: 4127) -#endif - -// The class destroys end_counter and clears all input buffers if pipeline was cancelled. -class pipeline_cleaner: internal::no_copy { - pipeline& my_pipeline; -public: - pipeline_cleaner(pipeline& _pipeline) : - my_pipeline(_pipeline) - {} - ~pipeline_cleaner(){ -#if __TBB_TASK_GROUP_CONTEXT - if (my_pipeline.end_counter->is_cancelled()) // Pipeline was cancelled - my_pipeline.clear_filters(); -#endif - my_pipeline.end_counter = NULL; - } -}; - -} // namespace internal - -void pipeline::inject_token( task& ) { - __TBB_ASSERT(0,"illegal call to inject_token"); -} - -#if __TBB_TASK_GROUP_CONTEXT -void pipeline::clear_filters() { - for( filter* f = filter_list; f; f = f->next_filter_in_pipeline ) { - if ((f->my_filter_mode & filter::version_mask) >= __TBB_PIPELINE_VERSION(4)) - if( internal::input_buffer* b = f->my_input_buffer ) - b->clear(f); - } -} -#endif - -pipeline::pipeline() : - filter_list(NULL), - filter_end(NULL), - end_counter(NULL), - end_of_input(false), - has_thread_bound_filters(false) -{ - token_counter = 0; - input_tokens = 0; -} - -pipeline::~pipeline() { - clear(); -} - -void pipeline::clear() { - filter* next; - for( filter* f = filter_list; f; f=next ) { - if( internal::input_buffer* b = f->my_input_buffer ) { - delete b; - f->my_input_buffer = NULL; - } - next=f->next_filter_in_pipeline; - f->next_filter_in_pipeline = filter::not_in_pipeline(); - if ( (f->my_filter_mode & filter::version_mask) >= __TBB_PIPELINE_VERSION(3) ) { - f->prev_filter_in_pipeline = filter::not_in_pipeline(); - f->my_pipeline = NULL; - } - if ( (f->my_filter_mode & filter::version_mask) >= __TBB_PIPELINE_VERSION(5) ) - f->next_segment = NULL; - } - filter_list = filter_end = NULL; -} - -void pipeline::add_filter( filter& filter_ ) { -#if TBB_USE_ASSERT - if ( (filter_.my_filter_mode & filter::version_mask) >= __TBB_PIPELINE_VERSION(3) ) - __TBB_ASSERT( filter_.prev_filter_in_pipeline==filter::not_in_pipeline(), "filter already part of pipeline?" ); - __TBB_ASSERT( filter_.next_filter_in_pipeline==filter::not_in_pipeline(), "filter already part of pipeline?" ); - __TBB_ASSERT( !end_counter, "invocation of add_filter on running pipeline" ); -#endif - if ( (filter_.my_filter_mode & filter::version_mask) >= __TBB_PIPELINE_VERSION(3) ) { - filter_.my_pipeline = this; - filter_.prev_filter_in_pipeline = filter_end; - if ( filter_list == NULL) - filter_list = &filter_; - else - filter_end->next_filter_in_pipeline = &filter_; - filter_.next_filter_in_pipeline = NULL; - filter_end = &filter_; - } - else - { - if( !filter_end ) - filter_end = reinterpret_cast<filter*>(&filter_list); - - *reinterpret_cast<filter**>(filter_end) = &filter_; - filter_end = reinterpret_cast<filter*>(&filter_.next_filter_in_pipeline); - *reinterpret_cast<filter**>(filter_end) = NULL; - } - if( (filter_.my_filter_mode & filter_.version_mask) >= __TBB_PIPELINE_VERSION(5) ) { - if( filter_.is_serial() ) { - if( filter_.is_bound() ) - has_thread_bound_filters = true; - filter_.my_input_buffer = new internal::input_buffer( filter_.is_ordered(), filter_.is_bound() ); - } - else { - if( filter_.prev_filter_in_pipeline && filter_.prev_filter_in_pipeline->is_bound() ) - filter_.my_input_buffer = new internal::input_buffer( false, false ); - } - } else { - if( filter_.is_serial() ) { - filter_.my_input_buffer = new internal::input_buffer( filter_.is_ordered(), false ); - } - } - -} - -void pipeline::remove_filter( filter& filter_ ) { - __TBB_ASSERT( filter_.prev_filter_in_pipeline!=filter::not_in_pipeline(), "filter not part of pipeline" ); - __TBB_ASSERT( filter_.next_filter_in_pipeline!=filter::not_in_pipeline(), "filter not part of pipeline" ); - __TBB_ASSERT( !end_counter, "invocation of remove_filter on running pipeline" ); - if (&filter_ == filter_list) - filter_list = filter_.next_filter_in_pipeline; - else { - __TBB_ASSERT( filter_.prev_filter_in_pipeline, "filter list broken?" ); - filter_.prev_filter_in_pipeline->next_filter_in_pipeline = filter_.next_filter_in_pipeline; - } - if (&filter_ == filter_end) - filter_end = filter_.prev_filter_in_pipeline; - else { - __TBB_ASSERT( filter_.next_filter_in_pipeline, "filter list broken?" ); - filter_.next_filter_in_pipeline->prev_filter_in_pipeline = filter_.prev_filter_in_pipeline; - } - if( internal::input_buffer* b = filter_.my_input_buffer ) { - delete b; - filter_.my_input_buffer = NULL; - } - filter_.next_filter_in_pipeline = filter_.prev_filter_in_pipeline = filter::not_in_pipeline(); - if ( (filter_.my_filter_mode & filter::version_mask) >= __TBB_PIPELINE_VERSION(5) ) - filter_.next_segment = NULL; - filter_.my_pipeline = NULL; -} - -void pipeline::run( size_t max_number_of_live_tokens -#if __TBB_TASK_GROUP_CONTEXT - , tbb::task_group_context& context -#endif - ) { - __TBB_ASSERT( max_number_of_live_tokens>0, "pipeline::run must have at least one token" ); - __TBB_ASSERT( !end_counter, "pipeline already running?" ); - if( filter_list ) { - internal::pipeline_cleaner my_pipeline_cleaner(*this); - end_of_input = false; - input_tokens = internal::Token(max_number_of_live_tokens); - if(has_thread_bound_filters) { - // release input filter if thread-bound - if(filter_list->is_bound()) { - filter_list->my_input_buffer->sema_V(); - } - } -#if __TBB_TASK_GROUP_CONTEXT - end_counter = new( task::allocate_root(context) ) internal::pipeline_root_task( *this ); -#else - end_counter = new( task::allocate_root() ) internal::pipeline_root_task( *this ); -#endif - // Start execution of tasks - task::spawn_root_and_wait( *end_counter ); - - if(has_thread_bound_filters) { - for(filter* f = filter_list->next_filter_in_pipeline; f; f=f->next_filter_in_pipeline) { - if(f->is_bound()) { - f->my_input_buffer->sema_V(); // wake to end - } - } - } - } -} - -#if __TBB_TASK_GROUP_CONTEXT -void pipeline::run( size_t max_number_of_live_tokens ) { - if( filter_list ) { - // Construct task group context with the exception propagation mode expected - // by the pipeline caller. - uintptr_t ctx_traits = filter_list->my_filter_mode & filter::exact_exception_propagation ? - task_group_context::default_traits : - task_group_context::default_traits & ~task_group_context::exact_exception; - task_group_context context(task_group_context::bound, ctx_traits); - run(max_number_of_live_tokens, context); - } -} -#endif // __TBB_TASK_GROUP_CONTEXT - -bool filter::has_more_work() { - __TBB_ASSERT(my_pipeline, NULL); - __TBB_ASSERT(my_input_buffer, "has_more_work() called for filter with no input buffer"); - return (internal::tokendiff_t)(my_pipeline->token_counter - my_input_buffer->low_token) != 0; -} - -filter::~filter() { - if ( (my_filter_mode & version_mask) >= __TBB_PIPELINE_VERSION(3) ) { - if ( next_filter_in_pipeline != filter::not_in_pipeline() ) - my_pipeline->remove_filter(*this); - else - __TBB_ASSERT( prev_filter_in_pipeline == filter::not_in_pipeline(), "probably filter list is broken" ); - } else { - __TBB_ASSERT( next_filter_in_pipeline==filter::not_in_pipeline(), "cannot destroy filter that is part of pipeline" ); - } -} - -thread_bound_filter::result_type thread_bound_filter::process_item() { - return internal_process_item(true); -} - -thread_bound_filter::result_type thread_bound_filter::try_process_item() { - return internal_process_item(false); -} - -thread_bound_filter::result_type thread_bound_filter::internal_process_item(bool is_blocking) { - internal::task_info info; - info.reset(); - - if(my_pipeline && my_pipeline->end_of_input && !has_more_work()) - return end_of_stream; - - if( !prev_filter_in_pipeline ) { - if( my_pipeline->end_of_input ) - return end_of_stream; - while(my_pipeline->input_tokens == 0) { - if( !is_blocking ) - return item_not_available; - my_input_buffer->sema_P(); - } - info.my_object = (*this)(info.my_object); - if( info.my_object ) { - __TBB_ASSERT(my_pipeline->input_tokens > 0, "Token failed in thread-bound filter"); - my_pipeline->input_tokens--; - if( is_ordered() ) { - info.my_token = my_pipeline->token_counter; - info.my_token_ready = true; - } - my_pipeline->token_counter++; // ideally, with relaxed semantics - } else { - my_pipeline->end_of_input = true; - return end_of_stream; - } - } else { /* this is not an input filter */ - while(!my_input_buffer->has_item()) { - if(!is_blocking) { - return item_not_available; - } - my_input_buffer->sema_P(); - if( my_pipeline->end_of_input && !has_more_work()) { - return end_of_stream; - } - } - if(!my_input_buffer->return_item(info, /*advance*/true)) { - __TBB_ASSERT(0,"return_item failed"); - } - info.my_object = (*this)(info.my_object); - } - if( next_filter_in_pipeline ) { - if (!next_filter_in_pipeline->my_input_buffer->put_token(info,/*force_put=*/true) ) { - __TBB_ASSERT(0, "Couldn't put token after thread-bound buffer"); - } - } else { - size_t ntokens_avail = ++(my_pipeline->input_tokens); - if(my_pipeline->filter_list->is_bound()) { - if(ntokens_avail == 1) { - my_pipeline->filter_list->my_input_buffer->sema_V(); - } - } - } - - return success; -} - -} // tbb - diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/private_server.cpp b/deal.II/bundled/tbb30_104oss/src/tbb/private_server.cpp deleted file mode 100644 index 4c84024e73..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/private_server.cpp +++ /dev/null @@ -1,388 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#include "rml_tbb.h" -#include "../server/thread_monitor.h" -#include "tbb/atomic.h" -#include "tbb/cache_aligned_allocator.h" -#include "tbb/spin_mutex.h" -#include "tbb/tbb_thread.h" - -using rml::internal::thread_monitor; - -namespace tbb { -namespace internal { -namespace rml { - -class private_server; - -class private_worker: no_copy { - //! State in finite-state machine that controls the worker. - /** State diagram: - init --------------------\ - | | - V V - starting --> normal --> quit - | - V - plugged - */ - enum state_t { - //! *this is initialized - st_init, - //! *this has associated thread that is starting up. - st_starting, - //! Associated thread is doing normal life sequence. - st_normal, - //! Associated thread has ended normal life sequence and promises to never touch *this again. - st_quit, - //! Associated thread should skip normal life sequence, because private_server is shutting down. - st_plugged - }; - atomic<state_t> my_state; - - //! Associated server - private_server& my_server; - - //! Associated client - tbb_client& my_client; - - //! index used for avoiding the 64K aliasing problem - const size_t my_index; - - //! Monitor for sleeping when there is no work to do. - /** The invariant that holds for sleeping workers is: - "my_slack<=0 && my_state==st_normal && I am on server's list of asleep threads" */ - thread_monitor my_thread_monitor; - - //! Link for list of workers that are sleeping or have no associated thread. - private_worker* my_next; - - friend class private_server; - - //! Actions executed by the associated thread - void run(); - - //! Wake up associated thread (or launch a thread if there is none) - void wake_or_launch(); - - //! Called by a thread (usually not the associated thread) to commence termination. - void start_shutdown(); - - static __RML_DECL_THREAD_ROUTINE thread_routine( void* arg ); - -protected: - private_worker( private_server& server, tbb_client& client, const size_t i ) : - my_server(server), - my_client(client), - my_index(i) - { - my_state = st_init; - } - -}; - -static const size_t cache_line_size = tbb::internal::NFS_MaxLineSize; - - -#if _MSC_VER && !defined(__INTEL_COMPILER) - // Suppress overzealous compiler warnings about uninstantiatble class - #pragma warning(push) - #pragma warning(disable:4510 4610) -#endif -class padded_private_worker: public private_worker { - char pad[cache_line_size - sizeof(private_worker)%cache_line_size]; -public: - padded_private_worker( private_server& server, tbb_client& client, const size_t i ) : private_worker(server,client,i) {} -}; -#if _MSC_VER && !defined(__INTEL_COMPILER) - #pragma warning(pop) -#endif - -class private_server: public tbb_server, no_copy { - tbb_client& my_client; - //! Maximum number of threads to be creatd. - /** Threads are created lazily, so maximum might not actually be reached. */ - const tbb_client::size_type my_n_thread; - - //! Stack size for each thread. */ - const size_t my_stack_size; - - //! Number of jobs that could use their associated thread minus number of active threads. - /** If negative, indicates oversubscription. - If positive, indicates that more threads should run. - Can be lowered asynchronously, but must be raised only while holding my_asleep_list_mutex, - because raising it impacts the invariant for sleeping threads. */ - atomic<int> my_slack; - - //! Counter used to determine when to delete this. - atomic<int> my_ref_count; - - padded_private_worker* my_thread_array; - - //! List of workers that are asleep or committed to sleeping until notified by another thread. - tbb::atomic<private_worker*> my_asleep_list_root; - - //! Protects my_asleep_list_root - tbb::spin_mutex my_asleep_list_mutex; - -#if TBB_USE_ASSERT - atomic<int> my_net_slack_requests; -#endif /* TBB_USE_ASSERT */ - - //! Wake up to two sleeping workers, if there are any sleeping. - /** The call is used to propagate a chain reaction where each thread wakes up two threads, - which in turn each wake up two threads, etc. */ - void propagate_chain_reaction() { - // First test of a double-check idiom. Second test is inside wake_some(0). - if( my_asleep_list_root ) - wake_some(0); - } - - //! Try to add t to list of sleeping workers - bool try_insert_in_asleep_list( private_worker& t ); - - //! Equivalent of adding additional_slack to my_slack and waking up to 2 threads if my_slack permits. - void wake_some( int additional_slack ); - - virtual ~private_server(); - - void remove_server_ref() { - if( --my_ref_count==0 ) { - my_client.acknowledge_close_connection(); - this->~private_server(); - tbb::cache_aligned_allocator<private_server>().deallocate( this, 1 ); - } - } - - friend class private_worker; -public: - private_server( tbb_client& client ); - - /*override*/ version_type version() const { - return 0; - } - - /*override*/ void request_close_connection( bool /*exiting*/ ) { - for( size_t i=0; i<my_n_thread; ++i ) - my_thread_array[i].start_shutdown(); - remove_server_ref(); - } - - /*override*/ void yield() {__TBB_Yield();} - - /*override*/ void independent_thread_number_changed( int ) {__TBB_ASSERT(false,NULL);} - - /*override*/ unsigned default_concurrency() const {return tbb::tbb_thread::hardware_concurrency()-1;} - - /*override*/ void adjust_job_count_estimate( int delta ); - -#if _WIN32||_WIN64 - /*override*/ void register_master ( ::rml::server::execution_resource_t& ) {} - /*override*/ void unregister_master ( ::rml::server::execution_resource_t ) {} -#endif /* _WIN32||_WIN64 */ -}; - -//------------------------------------------------------------------------ -// Methods of private_worker -//------------------------------------------------------------------------ -#if _MSC_VER && !defined(__INTEL_COMPILER) - // Suppress overzealous compiler warnings about an initialized variable 'sink_for_alloca' not referenced - #pragma warning(push) - #pragma warning(disable:4189) -#endif -#if __MINGW32__ && __GNUC__==4 &&__GNUC_MINOR__>=2 && !__MINGW64__ -// ensure that stack is properly aligned for TBB threads -__attribute__((force_align_arg_pointer)) -#endif -__RML_DECL_THREAD_ROUTINE private_worker::thread_routine( void* arg ) { - private_worker* self = static_cast<private_worker*>(arg); - AVOID_64K_ALIASING( self->my_index ); -#if _XBOX - int HWThreadIndex = __TBB_XBOX360_GetHardwareThreadIndex(i); - XSetThreadProcessor(GetCurrentThread(), HWThreadIndex); -#endif - self->run(); - return 0; -} -#if _MSC_VER && !defined(__INTEL_COMPILER) - #pragma warning(pop) -#endif - -void private_worker::start_shutdown() { - state_t s; - // Transition from st_starting or st_normal to st_plugged or st_quit - do { - s = my_state; - __TBB_ASSERT( s==st_init||s==st_starting||s==st_normal, NULL ); - } while( my_state.compare_and_swap( s==st_starting? st_plugged : st_quit, s )!=s ); - if( s==st_normal ) { - // May have invalidated invariant for sleeping, so wake up the thread. - // Note that the notify() here occurs without maintaining invariants for my_slack. - // It does not matter, because my_state==st_quit overrides checking of my_slack. - my_thread_monitor.notify(); - } else if( s==st_init ) { - // Perform action that otherwise would be performed by associated thread when it quits. - my_server.remove_server_ref(); - } -} - -void private_worker::run() { - my_server.propagate_chain_reaction(); - state_t s = my_state.compare_and_swap( st_normal, st_starting ); - if( s==st_starting ) { - ::rml::job& j = *my_client.create_one_job(); - while( my_state==st_normal ) { - if( my_server.my_slack>=0 ) { - my_client.process(j); - } else { - thread_monitor::cookie c; - // Prepare to wait - my_thread_monitor.prepare_wait(c); - // Check/set the invariant for sleeping - if( my_state==st_normal && my_server.try_insert_in_asleep_list(*this) ) { - my_thread_monitor.commit_wait(c); - my_server.propagate_chain_reaction(); - } else { - // Invariant broken - my_thread_monitor.cancel_wait(); - } - } - } - my_client.cleanup(j); - } else { - // Server is already shutting down. - __TBB_ASSERT( s==st_plugged, NULL ); - } - ++my_server.my_slack; - my_server.remove_server_ref(); -} - -inline void private_worker::wake_or_launch() { - if( my_state==st_init && my_state.compare_and_swap( st_starting, st_init )==st_init ) - thread_monitor::launch( thread_routine, this, my_server.my_stack_size ); - else - my_thread_monitor.notify(); -} - -//------------------------------------------------------------------------ -// Methods of private_server -//------------------------------------------------------------------------ -private_server::private_server( tbb_client& client ) : - my_client(client), - my_n_thread(client.max_job_count()), - my_stack_size(client.min_stack_size()), - my_thread_array(NULL) -{ - my_ref_count = my_n_thread+1; - my_slack = 0; -#if TBB_USE_ASSERT - my_net_slack_requests = 0; -#endif /* TBB_USE_ASSERT */ - my_asleep_list_root = NULL; - my_thread_array = tbb::cache_aligned_allocator<padded_private_worker>().allocate( my_n_thread ); - memset( my_thread_array, 0, sizeof(private_worker)*my_n_thread ); - for( size_t i=0; i<my_n_thread; ++i ) { - private_worker* t = new( &my_thread_array[i] ) padded_private_worker( *this, client, i ); - t->my_next = my_asleep_list_root; - my_asleep_list_root = t; - } -} - -private_server::~private_server() { - __TBB_ASSERT( my_net_slack_requests==0, NULL ); - for( size_t i=my_n_thread; i--; ) - my_thread_array[i].~padded_private_worker(); - tbb::cache_aligned_allocator<padded_private_worker>().deallocate( my_thread_array, my_n_thread ); - tbb::internal::poison_pointer( my_thread_array ); -} - -inline bool private_server::try_insert_in_asleep_list( private_worker& t ) { - tbb::spin_mutex::scoped_lock lock(my_asleep_list_mutex); - // Contribute to slack under lock so that if another takes that unit of slack, - // it sees us sleeping on the list and wakes us up. - int k = ++my_slack; - if( k<=0 ) { - t.my_next = my_asleep_list_root; - my_asleep_list_root = &t; - return true; - } else { - --my_slack; - return false; - } -} - -void private_server::wake_some( int additional_slack ) { - __TBB_ASSERT( additional_slack>=0, NULL ); - private_worker* wakee[2]; - private_worker**w = wakee; - { - tbb::spin_mutex::scoped_lock lock(my_asleep_list_mutex); - while( my_asleep_list_root && w<wakee+2 ) { - if( additional_slack>0 ) { - --additional_slack; - } else { - // Try to claim unit of slack - int old; - do { - old = my_slack; - if( old<=0 ) goto done; - } while( my_slack.compare_and_swap(old-1,old)!=old ); - } - // Pop sleeping worker to combine with claimed unit of slack - my_asleep_list_root = (*w++ = my_asleep_list_root)->my_next; - } - if( additional_slack ) { - // Contribute our unused slack to my_slack. - my_slack += additional_slack; - } - } -done: - while( w>wakee ) - (*--w)->wake_or_launch(); -} - -void private_server::adjust_job_count_estimate( int delta ) { -#if TBB_USE_ASSERT - my_net_slack_requests+=delta; -#endif /* TBB_USE_ASSERT */ - if( delta<0 ) { - my_slack+=delta; - } else if( delta>0 ) { - wake_some( delta ); - } -} - -//! Factory method called from task.cpp to create a private_server. -tbb_server* make_private_server( tbb_client& client ) { - return new( tbb::cache_aligned_allocator<private_server>().allocate(1) ) private_server(client); -} - -} // namespace rml -} // namespace internal -} // namespace tbb diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/queuing_mutex.cpp b/deal.II/bundled/tbb30_104oss/src/tbb/queuing_mutex.cpp deleted file mode 100644 index 4edf3718ae..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/queuing_mutex.cpp +++ /dev/null @@ -1,117 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#include "tbb/tbb_machine.h" -#include "tbb/tbb_stddef.h" -#include "tbb_misc.h" -#include "tbb/queuing_mutex.h" -#include "itt_notify.h" - - -namespace tbb { - -using namespace internal; - -//! A method to acquire queuing_mutex lock -void queuing_mutex::scoped_lock::acquire( queuing_mutex& m ) -{ - __TBB_ASSERT( !this->mutex, "scoped_lock is already holding a mutex"); - - // Must set all fields before the fetch_and_store, because once the - // fetch_and_store executes, *this becomes accessible to other threads. - mutex = &m; - next = NULL; - going = 0; - - // The fetch_and_store must have release semantics, because we are - // "sending" the fields initialized above to other processors. - scoped_lock* pred = m.q_tail.fetch_and_store<tbb::release>(this); - if( pred ) { - ITT_NOTIFY(sync_prepare, mutex); - __TBB_ASSERT( !pred->next, "the predecessor has another successor!"); - pred->next = this; - spin_wait_while_eq( going, 0ul ); - } - ITT_NOTIFY(sync_acquired, mutex); - - // Force acquire so that user's critical section receives correct values - // from processor that was previously in the user's critical section. - __TBB_load_with_acquire(going); -} - -//! A method to acquire queuing_mutex if it is free -bool queuing_mutex::scoped_lock::try_acquire( queuing_mutex& m ) -{ - __TBB_ASSERT( !this->mutex, "scoped_lock is already holding a mutex"); - - // Must set all fields before the fetch_and_store, because once the - // fetch_and_store executes, *this becomes accessible to other threads. - next = NULL; - going = 0; - - if( m.q_tail ) return false; - // The CAS must have release semantics, because we are - // "sending" the fields initialized above to other processors. - scoped_lock* pred = m.q_tail.compare_and_swap<tbb::release>(this, NULL); - - // Force acquire so that user's critical section receives correct values - // from processor that was previously in the user's critical section. - // try_acquire should always have acquire semantic, even if failed. - __TBB_load_with_acquire(going); - if( !pred ) { - mutex = &m; - ITT_NOTIFY(sync_acquired, mutex); - return true; - } else return false; -} - -//! A method to release queuing_mutex lock -void queuing_mutex::scoped_lock::release( ) -{ - __TBB_ASSERT(this->mutex!=NULL, "no lock acquired"); - - ITT_NOTIFY(sync_releasing, mutex); - if( !next ) { - if( this == mutex->q_tail.compare_and_swap<tbb::release>(NULL, this) ) { - // this was the only item in the queue, and the queue is now empty. - goto done; - } - // Someone in the queue - spin_wait_while_eq( next, (scoped_lock*)0 ); - } - __TBB_ASSERT(next,NULL); - __TBB_store_with_release(next->going, 1); -done: - initialize(); -} - -void queuing_mutex::internal_construct() { - ITT_SYNC_CREATE(this, _T("tbb::queuing_mutex"), _T("")); -} - -} // namespace tbb diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/queuing_rw_mutex.cpp b/deal.II/bundled/tbb30_104oss/src/tbb/queuing_rw_mutex.cpp deleted file mode 100644 index d02f3586b5..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/queuing_rw_mutex.cpp +++ /dev/null @@ -1,505 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -/** Before making any changes in the implementation, please emulate algorithmic changes - with SPIN tool using <TBB directory>/tools/spin_models/ReaderWriterMutex.pml. - There could be some code looking as "can be restructured" but its structure does matter! */ - -#include "tbb/tbb_machine.h" -#include "tbb/tbb_stddef.h" -#include "tbb/tbb_machine.h" -#include "tbb/queuing_rw_mutex.h" -#include "itt_notify.h" - - -namespace tbb { - -using namespace internal; - -//! Flag bits in a state_t that specify information about a locking request. -enum state_t_flags { - STATE_NONE = 0, - STATE_WRITER = 1, - STATE_READER = 1<<1, - STATE_READER_UNBLOCKNEXT = 1<<2, - STATE_COMBINED_WAITINGREADER = STATE_READER | STATE_READER_UNBLOCKNEXT, - STATE_ACTIVEREADER = 1<<3, - STATE_COMBINED_READER = STATE_COMBINED_WAITINGREADER | STATE_ACTIVEREADER, - STATE_UPGRADE_REQUESTED = 1<<4, - STATE_UPGRADE_WAITING = 1<<5, - STATE_UPGRADE_LOSER = 1<<6, - STATE_COMBINED_UPGRADING = STATE_UPGRADE_WAITING | STATE_UPGRADE_LOSER -}; - -const unsigned char RELEASED = 0; -const unsigned char ACQUIRED = 1; - -template<typename T> -inline atomic<T>& as_atomic( T& t ) { - return *(atomic<T>*)&t; -} - -inline bool queuing_rw_mutex::scoped_lock::try_acquire_internal_lock() -{ - return as_atomic(internal_lock).compare_and_swap<tbb::acquire>(ACQUIRED,RELEASED) == RELEASED; -} - -inline void queuing_rw_mutex::scoped_lock::acquire_internal_lock() -{ - // Usually, we would use the test-test-and-set idiom here, with exponential backoff. - // But so far, experiments indicate there is no value in doing so here. - while( !try_acquire_internal_lock() ) { - __TBB_Pause(1); - } -} - -inline void queuing_rw_mutex::scoped_lock::release_internal_lock() -{ - __TBB_store_with_release(internal_lock,RELEASED); -} - -inline void queuing_rw_mutex::scoped_lock::wait_for_release_of_internal_lock() -{ - spin_wait_until_eq(internal_lock, RELEASED); -} - -inline void queuing_rw_mutex::scoped_lock::unblock_or_wait_on_internal_lock( uintptr_t flag ) { - if( flag ) - wait_for_release_of_internal_lock(); - else - release_internal_lock(); -} - -#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) - // Workaround for overzealous compiler warnings - #pragma warning (push) - #pragma warning (disable: 4311 4312) -#endif - -//! A view of a T* with additional functionality for twiddling low-order bits. -template<typename T> -class tricky_atomic_pointer: no_copy { -public: - typedef typename atomic_rep<sizeof(T*)>::word word; - - template<memory_semantics M> - static T* fetch_and_add( T* volatile * location, word addend ) { - return reinterpret_cast<T*>( atomic_traits<sizeof(T*),M>::fetch_and_add(location, addend) ); - } - template<memory_semantics M> - static T* fetch_and_store( T* volatile * location, T* value ) { - return reinterpret_cast<T*>( atomic_traits<sizeof(T*),M>::fetch_and_store(location, reinterpret_cast<word>(value)) ); - } - template<memory_semantics M> - static T* compare_and_swap( T* volatile * location, T* value, T* comparand ) { - return reinterpret_cast<T*>( - atomic_traits<sizeof(T*),M>::compare_and_swap(location, reinterpret_cast<word>(value), - reinterpret_cast<word>(comparand)) - ); - } - - T* & ref; - tricky_atomic_pointer( T*& original ) : ref(original) {}; - tricky_atomic_pointer( T* volatile & original ) : ref(original) {}; - T* operator&( word operand2 ) const { - return reinterpret_cast<T*>( reinterpret_cast<word>(ref) & operand2 ); - } - T* operator|( word operand2 ) const { - return reinterpret_cast<T*>( reinterpret_cast<word>(ref) | operand2 ); - } -}; - -typedef tricky_atomic_pointer<queuing_rw_mutex::scoped_lock> tricky_pointer; - -#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) - // Workaround for overzealous compiler warnings - #pragma warning (pop) -#endif - -//! Mask for low order bit of a pointer. -static const tricky_pointer::word FLAG = 0x1; - -inline -uintptr_t get_flag( queuing_rw_mutex::scoped_lock* ptr ) { - return uintptr_t(tricky_pointer(ptr)&FLAG); -} - -//------------------------------------------------------------------------ -// Methods of queuing_rw_mutex::scoped_lock -//------------------------------------------------------------------------ - -void queuing_rw_mutex::scoped_lock::acquire( queuing_rw_mutex& m, bool write ) -{ - __TBB_ASSERT( !this->mutex, "scoped_lock is already holding a mutex"); - - // Must set all fields before the fetch_and_store, because once the - // fetch_and_store executes, *this becomes accessible to other threads. - mutex = &m; - prev = NULL; - next = NULL; - going = 0; - state = state_t(write ? STATE_WRITER : STATE_READER); - internal_lock = RELEASED; - - queuing_rw_mutex::scoped_lock* pred = m.q_tail.fetch_and_store<tbb::release>(this); - - if( write ) { // Acquiring for write - - if( pred ) { - ITT_NOTIFY(sync_prepare, mutex); - pred = tricky_pointer(pred) & ~FLAG; - __TBB_ASSERT( !( tricky_pointer(pred) & FLAG ), "use of corrupted pointer!" ); - __TBB_ASSERT( !pred->next, "the predecessor has another successor!"); - // ensure release semantics on IPF - __TBB_store_with_release(pred->next,this); - spin_wait_until_eq(going, 1); - } - - } else { // Acquiring for read -#if DO_ITT_NOTIFY - bool sync_prepare_done = false; -#endif - if( pred ) { - unsigned short pred_state; - __TBB_ASSERT( !this->prev, "the predecessor is already set" ); - if( tricky_pointer(pred)&FLAG ) { - /* this is only possible if pred is an upgrading reader and it signals us to wait */ - pred_state = STATE_UPGRADE_WAITING; - pred = tricky_pointer(pred) & ~FLAG; - } else { - // Load pred->state now, because once pred->next becomes - // non-NULL, we must assume that *pred might be destroyed. - pred_state = pred->state.compare_and_swap<tbb::acquire>(STATE_READER_UNBLOCKNEXT, STATE_READER); - } - this->prev = pred; - __TBB_ASSERT( !( tricky_pointer(pred) & FLAG ), "use of corrupted pointer!" ); - __TBB_ASSERT( !pred->next, "the predecessor has another successor!"); - // ensure release semantics on IPF - __TBB_store_with_release(pred->next,this); - if( pred_state != STATE_ACTIVEREADER ) { -#if DO_ITT_NOTIFY - sync_prepare_done = true; - ITT_NOTIFY(sync_prepare, mutex); -#endif - spin_wait_until_eq(going, 1); - } - } - unsigned short old_state = state.compare_and_swap<tbb::acquire>(STATE_ACTIVEREADER, STATE_READER); - if( old_state!=STATE_READER ) { -#if DO_ITT_NOTIFY - if( !sync_prepare_done ) - ITT_NOTIFY(sync_prepare, mutex); -#endif - // Failed to become active reader -> need to unblock the next waiting reader first - __TBB_ASSERT( state==STATE_READER_UNBLOCKNEXT, "unexpected state" ); - spin_wait_while_eq(next, (scoped_lock*)NULL); - /* state should be changed before unblocking the next otherwise it might finish - and another thread can get our old state and left blocked */ - state = STATE_ACTIVEREADER; - // ensure release semantics on IPF - __TBB_store_with_release(next->going,1); - } - } - - ITT_NOTIFY(sync_acquired, mutex); - - // Force acquire so that user's critical section receives correct values - // from processor that was previously in the user's critical section. - __TBB_load_with_acquire(going); -} - -bool queuing_rw_mutex::scoped_lock::try_acquire( queuing_rw_mutex& m, bool write ) -{ - __TBB_ASSERT( !this->mutex, "scoped_lock is already holding a mutex"); - - // Must set all fields before the fetch_and_store, because once the - // fetch_and_store executes, *this becomes accessible to other threads. - prev = NULL; - next = NULL; - going = 0; - state = state_t(write ? STATE_WRITER : STATE_ACTIVEREADER); - internal_lock = RELEASED; - - if( m.q_tail ) return false; - // The CAS must have release semantics, because we are - // "sending" the fields initialized above to other processors. - queuing_rw_mutex::scoped_lock* pred = m.q_tail.compare_and_swap<tbb::release>(this, NULL); - - // Force acquire so that user's critical section receives correct values - // from processor that was previously in the user's critical section. - // try_acquire should always have acquire semantic, even if failed. - __TBB_load_with_acquire(going); - - if( !pred ) { - mutex = &m; - ITT_NOTIFY(sync_acquired, mutex); - return true; - } else return false; - -} - -void queuing_rw_mutex::scoped_lock::release( ) -{ - __TBB_ASSERT(this->mutex!=NULL, "no lock acquired"); - - ITT_NOTIFY(sync_releasing, mutex); - - if( state == STATE_WRITER ) { // Acquired for write - - // The logic below is the same as "writerUnlock", but restructured to remove "return" in the middle of routine. - // In the statement below, acquire semantics of reading 'next' is required - // so that following operations with fields of 'next' are safe. - scoped_lock* n = __TBB_load_with_acquire(next); - if( !n ) { - if( this == mutex->q_tail.compare_and_swap<tbb::release>(NULL, this) ) { - // this was the only item in the queue, and the queue is now empty. - goto done; - } - spin_wait_while_eq( next, (scoped_lock*)NULL ); - n = next; - } - n->going = 2; // protect next queue node from being destroyed too early - if( n->state==STATE_UPGRADE_WAITING ) { - // the next waiting for upgrade means this writer was upgraded before. - acquire_internal_lock(); - queuing_rw_mutex::scoped_lock* tmp = tricky_pointer::fetch_and_store<tbb::release>(&(n->prev), NULL); - n->state = STATE_UPGRADE_LOSER; - __TBB_store_with_release(n->going,1); - unblock_or_wait_on_internal_lock(get_flag(tmp)); - } else { - __TBB_ASSERT( state & (STATE_COMBINED_WAITINGREADER | STATE_WRITER), "unexpected state" ); - __TBB_ASSERT( !( tricky_pointer(n->prev) & FLAG ), "use of corrupted pointer!" ); - n->prev = NULL; - // ensure release semantics on IPF - __TBB_store_with_release(n->going,1); - } - - } else { // Acquired for read - - queuing_rw_mutex::scoped_lock *tmp = NULL; -retry: - // Addition to the original paper: Mark this->prev as in use - queuing_rw_mutex::scoped_lock *pred = tricky_pointer::fetch_and_add<tbb::acquire>(&(this->prev), FLAG); - - if( pred ) { - if( !(pred->try_acquire_internal_lock()) ) - { - // Failed to acquire the lock on pred. The predecessor either unlinks or upgrades. - // In the second case, it could or could not know my "in use" flag - need to check - tmp = tricky_pointer::compare_and_swap<tbb::release>(&(this->prev), pred, tricky_pointer(pred)|FLAG ); - if( !(tricky_pointer(tmp)&FLAG) ) { - // Wait for the predecessor to change this->prev (e.g. during unlink) - spin_wait_while_eq( this->prev, tricky_pointer(pred)|FLAG ); - // Now owner of pred is waiting for _us_ to release its lock - pred->release_internal_lock(); - } - else ; // The "in use" flag is back -> the predecessor didn't get it and will release itself; nothing to do - - tmp = NULL; - goto retry; - } - __TBB_ASSERT(pred && pred->internal_lock==ACQUIRED, "predecessor's lock is not acquired"); - this->prev = pred; - acquire_internal_lock(); - - __TBB_store_with_release(pred->next,reinterpret_cast<scoped_lock *>(NULL)); - - if( !next && this != mutex->q_tail.compare_and_swap<tbb::release>(pred, this) ) { - spin_wait_while_eq( next, (void*)NULL ); - } - __TBB_ASSERT( !get_flag(next), "use of corrupted pointer" ); - - // ensure acquire semantics of reading 'next' - if( __TBB_load_with_acquire(next) ) { // I->next != nil - // Equivalent to I->next->prev = I->prev but protected against (prev[n]&FLAG)!=0 - tmp = tricky_pointer::fetch_and_store<tbb::release>(&(next->prev), pred); - // I->prev->next = I->next; - __TBB_ASSERT(this->prev==pred, NULL); - __TBB_store_with_release(pred->next,next); - } - // Safe to release in the order opposite to acquiring which makes the code simplier - pred->release_internal_lock(); - - } else { // No predecessor when we looked - acquire_internal_lock(); // "exclusiveLock(&I->EL)" - // ensure acquire semantics of reading 'next' - scoped_lock* n = __TBB_load_with_acquire(next); - if( !n ) { - if( this != mutex->q_tail.compare_and_swap<tbb::release>(NULL, this) ) { - spin_wait_while_eq( next, (scoped_lock*)NULL ); - n = next; - } else { - goto unlock_self; - } - } - n->going = 2; // protect next queue node from being destroyed too early - tmp = tricky_pointer::fetch_and_store<tbb::release>(&(n->prev), NULL); - // ensure release semantics on IPF - __TBB_store_with_release(n->going,1); - } -unlock_self: - unblock_or_wait_on_internal_lock(get_flag(tmp)); - } -done: - spin_wait_while_eq( going, 2 ); - - initialize(); -} - -bool queuing_rw_mutex::scoped_lock::downgrade_to_reader() -{ - __TBB_ASSERT( state==STATE_WRITER, "no sense to downgrade a reader" ); - - ITT_NOTIFY(sync_releasing, mutex); - - // ensure acquire semantics of reading 'next' - if( ! __TBB_load_with_acquire(next) ) { - state = STATE_READER; - if( this==mutex->q_tail ) { - unsigned short old_state = state.compare_and_swap<tbb::release>(STATE_ACTIVEREADER, STATE_READER); - if( old_state==STATE_READER ) { - goto downgrade_done; - } - } - /* wait for the next to register */ - spin_wait_while_eq( next, (void*)NULL ); - } - __TBB_ASSERT( next, "still no successor at this point!" ); - if( next->state & STATE_COMBINED_WAITINGREADER ) - __TBB_store_with_release(next->going,1); - else if( next->state==STATE_UPGRADE_WAITING ) - // the next waiting for upgrade means this writer was upgraded before. - next->state = STATE_UPGRADE_LOSER; - state = STATE_ACTIVEREADER; - -downgrade_done: - return true; -} - -bool queuing_rw_mutex::scoped_lock::upgrade_to_writer() -{ - __TBB_ASSERT( state==STATE_ACTIVEREADER, "only active reader can be upgraded" ); - - queuing_rw_mutex::scoped_lock * tmp; - queuing_rw_mutex::scoped_lock * me = this; - - ITT_NOTIFY(sync_releasing, mutex); - state = STATE_UPGRADE_REQUESTED; -requested: - __TBB_ASSERT( !( tricky_pointer(next) & FLAG ), "use of corrupted pointer!" ); - acquire_internal_lock(); - if( this != mutex->q_tail.compare_and_swap<tbb::release>(tricky_pointer(me)|FLAG, this) ) { - spin_wait_while_eq( next, (void*)NULL ); - queuing_rw_mutex::scoped_lock * n; - n = tricky_pointer::fetch_and_add<tbb::acquire>(&(this->next), FLAG); - unsigned short n_state = n->state; - /* the next reader can be blocked by our state. the best thing to do is to unblock it */ - if( n_state & STATE_COMBINED_WAITINGREADER ) - __TBB_store_with_release(n->going,1); - tmp = tricky_pointer::fetch_and_store<tbb::release>(&(n->prev), this); - unblock_or_wait_on_internal_lock(get_flag(tmp)); - if( n_state & (STATE_COMBINED_READER | STATE_UPGRADE_REQUESTED) ) { - // save n|FLAG for simplicity of following comparisons - tmp = tricky_pointer(n)|FLAG; - atomic_backoff backoff; - while(next==tmp) { - if( state & STATE_COMBINED_UPGRADING ) { - if( __TBB_load_with_acquire(next)==tmp ) - next = n; - goto waiting; - } - backoff.pause(); - } - __TBB_ASSERT(next!=(tricky_pointer(n)|FLAG), NULL); - goto requested; - } else { - __TBB_ASSERT( n_state & (STATE_WRITER | STATE_UPGRADE_WAITING), "unexpected state"); - __TBB_ASSERT( (tricky_pointer(n)|FLAG)==next, NULL); - next = n; - } - } else { - /* We are in the tail; whoever comes next is blocked by q_tail&FLAG */ - release_internal_lock(); - } // if( this != mutex->q_tail... ) - state.compare_and_swap<tbb::acquire>(STATE_UPGRADE_WAITING, STATE_UPGRADE_REQUESTED); - -waiting: - __TBB_ASSERT( !( tricky_pointer(next) & FLAG ), "use of corrupted pointer!" ); - __TBB_ASSERT( state & STATE_COMBINED_UPGRADING, "wrong state at upgrade waiting_retry" ); - __TBB_ASSERT( me==this, NULL ); - ITT_NOTIFY(sync_prepare, mutex); - /* if noone was blocked by the "corrupted" q_tail, turn it back */ - mutex->q_tail.compare_and_swap<tbb::release>( this, tricky_pointer(me)|FLAG ); - queuing_rw_mutex::scoped_lock * pred; - pred = tricky_pointer::fetch_and_add<tbb::acquire>(&(this->prev), FLAG); - if( pred ) { - bool success = pred->try_acquire_internal_lock(); - pred->state.compare_and_swap<tbb::release>(STATE_UPGRADE_WAITING, STATE_UPGRADE_REQUESTED); - if( !success ) { - tmp = tricky_pointer::compare_and_swap<tbb::release>(&(this->prev), pred, tricky_pointer(pred)|FLAG ); - if( tricky_pointer(tmp)&FLAG ) { - spin_wait_while_eq(this->prev, pred); - pred = this->prev; - } else { - spin_wait_while_eq( this->prev, tricky_pointer(pred)|FLAG ); - pred->release_internal_lock(); - } - } else { - this->prev = pred; - pred->release_internal_lock(); - spin_wait_while_eq(this->prev, pred); - pred = this->prev; - } - if( pred ) - goto waiting; - } else { - // restore the corrupted prev field for possible further use (e.g. if downgrade back to reader) - this->prev = pred; - } - __TBB_ASSERT( !pred && !this->prev, NULL ); - - // additional lifetime issue prevention checks - // wait for the successor to finish working with my fields - wait_for_release_of_internal_lock(); - // now wait for the predecessor to finish working with my fields - spin_wait_while_eq( going, 2 ); - // there is an acquire semantics statement in the end of spin_wait_while_eq. - - bool result = ( state != STATE_UPGRADE_LOSER ); - state = STATE_WRITER; - going = 1; - - ITT_NOTIFY(sync_acquired, mutex); - return result; -} - -void queuing_rw_mutex::internal_construct() { - ITT_SYNC_CREATE(this, _T("tbb::queuing_rw_mutex"), _T("")); -} - -} // namespace tbb diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/reader_writer_lock.cpp b/deal.II/bundled/tbb30_104oss/src/tbb/reader_writer_lock.cpp deleted file mode 100644 index 2f4e3f6e68..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/reader_writer_lock.cpp +++ /dev/null @@ -1,356 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#include "tbb/reader_writer_lock.h" -#include "tbb/tbb_machine.h" -#include "tbb/tbb_exception.h" -#include "itt_notify.h" - -namespace tbb { -namespace interface5 { - -const unsigned WFLAG1 = 0x1; // writer interested or active -const unsigned WFLAG2 = 0x2; // writers interested, no entering readers -const unsigned RFLAG = 0x4; // reader interested but not active -const unsigned RC_INCR = 0x8; // to adjust reader count - - -// Perform an atomic bitwise-OR on the operand with the addend, and return -// the previous value of the operand. -inline unsigned fetch_and_or(atomic<unsigned>& operand, unsigned addend) { - tbb::internal::atomic_backoff backoff; - for (;;) { - unsigned old = operand; - unsigned result = operand.compare_and_swap(old|addend, old); - if (result==old) return old; - backoff.pause(); - } -} - -// Perform an atomic bitwise-AND on the operand with the addend, and return -// the previous value of the operand. -inline unsigned fetch_and_and(atomic<unsigned>& operand, unsigned addend) { - tbb::internal::atomic_backoff backoff; - for (;;) { - unsigned old = operand; - unsigned result = operand.compare_and_swap(old&addend, old); - if (result==old) return old; - backoff.pause(); - } -} - -//! Spin WHILE the value at the location is greater than or equal to a given value -/** T and U should be comparable types. */ -template<typename T, typename U> -void spin_wait_while_geq( const volatile T& location, U value ) { - tbb::internal::atomic_backoff backoff; - while( location>=value ) backoff.pause(); -} - -//! Spin UNTIL (location & value) is true. -/** T and U should be comparable types. */ -template<typename T, typename U> -void spin_wait_until_and( const volatile T& location, U value ) { - tbb::internal::atomic_backoff backoff; - while( !(location & value) ) backoff.pause(); -} - - -void reader_writer_lock::internal_construct() { - reader_head = NULL; - writer_head = NULL; - writer_tail = NULL; - rdr_count_and_flags = 0; - my_current_writer = tbb_thread::id(); -#if TBB_USE_THREADING_TOOLS - ITT_SYNC_CREATE(this, _T("tbb::reader_writer_lock"), _T("")); -#endif /* TBB_USE_THREADING_TOOLS */ -} - -void reader_writer_lock::internal_destroy() { - __TBB_ASSERT(rdr_count_and_flags==0, "reader_writer_lock destroyed with pending readers/writers."); - __TBB_ASSERT(reader_head==NULL, "reader_writer_lock destroyed with pending readers."); - __TBB_ASSERT(writer_tail==NULL, "reader_writer_lock destroyed with pending writers."); - __TBB_ASSERT(writer_head==NULL, "reader_writer_lock destroyed with pending/active writers."); -} - -// Acquires the reader_writer_lock for write. If the lock is currently held in write -// mode by another context, the writer will block by spinning on a local variable. -// Throws exception improper_lock if the context tries to acquire a -// reader_writer_lock that it already has write ownership of. -void reader_writer_lock::lock() { - if (is_current_writer()) { // recursive lock attempt - // we don't support recursive writer locks; throw exception - tbb::internal::throw_exception(tbb::internal::eid_improper_lock); - } - else { - scoped_lock *a_writer_lock = new scoped_lock(); - (void) start_write(a_writer_lock); - } -} - -// Tries to acquire the reader_writer_lock for write. This function does not block. -// Return Value: True or false, depending on whether the lock is acquired or not. -// If the lock is already held by this acquiring context, try_lock() returns false. -bool reader_writer_lock::try_lock() { - if (is_current_writer()) { // recursive lock attempt - return false; - } - else { - scoped_lock *a_writer_lock = new scoped_lock(); - a_writer_lock->status = waiting_nonblocking; - return start_write(a_writer_lock); - } -} - -bool reader_writer_lock::start_write(scoped_lock *I) { - tbb_thread::id id = this_tbb_thread::get_id(); - scoped_lock *pred = NULL; - if (I->status == waiting_nonblocking) { - if ((pred = writer_tail.compare_and_swap(I, NULL)) != NULL) { - delete I; - return false; - } - } - else { - ITT_NOTIFY(sync_prepare, this); - pred = writer_tail.fetch_and_store(I); - } - if (pred) - pred->next = I; - else { - set_next_writer(I); - if (I->status == waiting_nonblocking) { - if (I->next) { // potentially more writers - set_next_writer(I->next); - } - else { // no more writers - writer_head.fetch_and_store(NULL); - if (I != writer_tail.compare_and_swap(NULL, I)) { // an incoming writer is in the process of being added - spin_wait_while_eq(I->next, (scoped_lock *)NULL); // wait for new writer to be added - __TBB_ASSERT(I->next, "There should be a node following the last writer."); - set_next_writer(I->next); - } - } - delete I; - return false; - } - } - spin_wait_while_eq(I->status, waiting); - ITT_NOTIFY(sync_acquired, this); - my_current_writer = id; - return true; -} - -void reader_writer_lock::set_next_writer(scoped_lock *W) { - writer_head = W; - if (W->status == waiting_nonblocking) { - if (rdr_count_and_flags.compare_and_swap(WFLAG1+WFLAG2, 0) == 0) { - W->status = active; - } - } - else { - if (fetch_and_or(rdr_count_and_flags, WFLAG1) & RFLAG) { // reader present - spin_wait_until_and(rdr_count_and_flags, WFLAG2); // block until readers set WFLAG2 - } - else { // no reader in timing window - __TBB_AtomicOR(&rdr_count_and_flags, WFLAG2); - } - spin_wait_while_geq(rdr_count_and_flags, RC_INCR); // block until readers finish - W->status = active; - } -} - -// Acquires the reader_writer_lock for read. If the lock is currently held by a writer, -// this reader will block and wait until the writers are done. -// Throws exception improper_lock when the context tries to acquire a reader_writer_lock -// that it already has write ownership of. -void reader_writer_lock::lock_read() { - if (is_current_writer()) { // recursive lock attempt - // we don't support writer->reader downgrade; throw exception - tbb::internal::throw_exception(tbb::internal::eid_improper_lock); - } - else { - scoped_lock_read a_reader_lock; - start_read(&a_reader_lock); - } -} - -// Tries to acquire the reader_writer_lock for read. This function does not block. -// Return Value: True or false, depending on whether the lock is acquired or not. -bool reader_writer_lock::try_lock_read() { - if (is_current_writer()) { // recursive lock attempt - return false; - } - else { - if (rdr_count_and_flags.fetch_and_add(RC_INCR) & (WFLAG1+WFLAG2)) { // writers present - rdr_count_and_flags -= RC_INCR; - return false; - } - else { // no writers - ITT_NOTIFY(sync_acquired, this); - return true; - } - } -} - -void reader_writer_lock::start_read(scoped_lock_read *I) { - ITT_NOTIFY(sync_prepare, this); - I->next = reader_head.fetch_and_store(I); - if (!I->next) { // first arriving reader in my group; set RFLAG, test writer flags - // unblock and/or update statuses of non-blocking readers - if (!(fetch_and_or(rdr_count_and_flags, RFLAG) & (WFLAG1+WFLAG2))) { // no writers - unblock_readers(); - } - } - __TBB_ASSERT(I->status == waiting || I->status == active, "Lock requests should be waiting or active before blocking."); - spin_wait_while_eq(I->status, waiting); // block - if (I->next) { - __TBB_ASSERT(I->next->status == waiting, NULL); - rdr_count_and_flags += RC_INCR; - I->next->status = active; // wake successor - } - ITT_NOTIFY(sync_acquired, this); -} - -void reader_writer_lock::unblock_readers() { - // clear rdr interest flag, increment rdr count - __TBB_ASSERT(rdr_count_and_flags&RFLAG, NULL); - rdr_count_and_flags += RC_INCR-RFLAG; - __TBB_ASSERT(rdr_count_and_flags >= RC_INCR, NULL); - // indicate clear of window - if (rdr_count_and_flags & WFLAG1 && !(rdr_count_and_flags & WFLAG2)) { - __TBB_AtomicOR(&rdr_count_and_flags, WFLAG2); - } - // unblock waiting readers - scoped_lock_read *head = reader_head.fetch_and_store(NULL); - __TBB_ASSERT(head, NULL); - __TBB_ASSERT(head->status == waiting, NULL); - head->status = active; -} - -// Releases the reader_writer_lock -void reader_writer_lock::unlock() { - if( my_current_writer!=tbb_thread::id() ) { - // A writer owns the lock - __TBB_ASSERT(is_current_writer(), "caller of reader_writer_lock::unlock() does not own the lock."); - __TBB_ASSERT(writer_head, NULL); - __TBB_ASSERT(writer_head->status==active, NULL); - scoped_lock *a_writer_lock = writer_head; - end_write(a_writer_lock); - __TBB_ASSERT(a_writer_lock != writer_head, "Internal error: About to turn writer_head into dangling reference."); - delete a_writer_lock; - } else { - end_read(); - } -} - -void reader_writer_lock::end_write(scoped_lock *I) { - __TBB_ASSERT(I==writer_head, "Internal error: can't unlock a thread that is not holding the lock."); - my_current_writer = tbb_thread::id(); - ITT_NOTIFY(sync_releasing, this); - if (I->next) { // potentially more writers - writer_head = I->next; - writer_head->status = active; - } - else { // No more writers; clear writer flag, test reader interest flag - __TBB_ASSERT(writer_head, NULL); - if (fetch_and_and(rdr_count_and_flags, ~(WFLAG1+WFLAG2)) & RFLAG) { - unblock_readers(); - } - writer_head.fetch_and_store(NULL); - if (I != writer_tail.compare_and_swap(NULL, I)) { // an incoming writer is in the process of being added - spin_wait_while_eq(I->next, (scoped_lock *)NULL); // wait for new writer to be added - __TBB_ASSERT(I->next, "There should be a node following the last writer."); - set_next_writer(I->next); - } - } -} - -void reader_writer_lock::end_read() { - ITT_NOTIFY(sync_releasing, this); - __TBB_ASSERT(rdr_count_and_flags >= RC_INCR, "unlock() called but no readers hold the lock."); - rdr_count_and_flags -= RC_INCR; -} - -inline bool reader_writer_lock::is_current_writer() { - return my_current_writer==this_tbb_thread::get_id(); -} - -// Construct with a blocking attempt to acquire a write lock on the passed reader_writer_lock -void reader_writer_lock::scoped_lock::internal_construct (reader_writer_lock& lock) { - mutex = &lock; - next = NULL; - status = waiting; - if (mutex->is_current_writer()) { // recursive lock attempt - // we don't support recursive writer locks; throw exception - tbb::internal::throw_exception(tbb::internal::eid_improper_lock); - } - else { // this thread holds no locks - (void) mutex->start_write(this); - } -} - -inline reader_writer_lock::scoped_lock::scoped_lock() : mutex(NULL), next(NULL) { - status = waiting; -} - -// Construct with a blocking attempt to acquire a write lock on the passed reader_writer_lock -void reader_writer_lock::scoped_lock_read::internal_construct (reader_writer_lock& lock) { - mutex = &lock; - next = NULL; - status = waiting; - if (mutex->is_current_writer()) { // recursive lock attempt - // we don't support writer->reader downgrade; throw exception - tbb::internal::throw_exception(tbb::internal::eid_improper_lock); - } - else { // this thread holds no locks - mutex->start_read(this); - } -} - -inline reader_writer_lock::scoped_lock_read::scoped_lock_read() : mutex(NULL), next(NULL) { - status = waiting; -} - -void reader_writer_lock::scoped_lock::internal_destroy() { - if (mutex) { - __TBB_ASSERT(mutex->is_current_writer(), "~scoped_lock() destroyed by thread different than thread that holds lock."); - mutex->end_write(this); - } - status = invalid; -} - -void reader_writer_lock::scoped_lock_read::internal_destroy() { - if (mutex) - mutex->end_read(); - status = invalid; -} - -} // namespace interface5 -} // namespace tbb diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/recursive_mutex.cpp b/deal.II/bundled/tbb30_104oss/src/tbb/recursive_mutex.cpp deleted file mode 100644 index 7083020e8c..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/recursive_mutex.cpp +++ /dev/null @@ -1,143 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#include "tbb/recursive_mutex.h" -#include "itt_notify.h" - -namespace tbb { - -void recursive_mutex::scoped_lock::internal_acquire( recursive_mutex& m ) { -#if _WIN32||_WIN64 - switch( m.state ) { - case INITIALIZED: - // since we cannot look into the internal of the CriticalSection object - // we won't know how many times the lock has been acquired, and thus - // we won't know when we may safely set the state back to INITIALIZED - // if we change the state to HELD as in mutex.cpp. thus, we won't change - // the state for recursive_mutex - EnterCriticalSection( &m.impl ); - break; - case DESTROYED: - __TBB_ASSERT(false,"recursive_mutex::scoped_lock: mutex already destroyed"); - break; - default: - __TBB_ASSERT(false,"recursive_mutex::scoped_lock: illegal mutex state"); - break; - } -#else - int error_code = pthread_mutex_lock(&m.impl); - __TBB_ASSERT_EX(!error_code,"recursive_mutex::scoped_lock: pthread_mutex_lock failed"); -#endif /* _WIN32||_WIN64 */ - my_mutex = &m; -} - -void recursive_mutex::scoped_lock::internal_release() { - __TBB_ASSERT( my_mutex, "recursive_mutex::scoped_lock: not holding a mutex" ); -#if _WIN32||_WIN64 - switch( my_mutex->state ) { - case INITIALIZED: - LeaveCriticalSection( &my_mutex->impl ); - break; - case DESTROYED: - __TBB_ASSERT(false,"recursive_mutex::scoped_lock: mutex already destroyed"); - break; - default: - __TBB_ASSERT(false,"recursive_mutex::scoped_lock: illegal mutex state"); - break; - } -#else - int error_code = pthread_mutex_unlock(&my_mutex->impl); - __TBB_ASSERT_EX(!error_code, "recursive_mutex::scoped_lock: pthread_mutex_unlock failed"); -#endif /* _WIN32||_WIN64 */ - my_mutex = NULL; -} - -bool recursive_mutex::scoped_lock::internal_try_acquire( recursive_mutex& m ) { -#if _WIN32||_WIN64 - switch( m.state ) { - case INITIALIZED: - break; - case DESTROYED: - __TBB_ASSERT(false,"recursive_mutex::scoped_lock: mutex already destroyed"); - break; - default: - __TBB_ASSERT(false,"recursive_mutex::scoped_lock: illegal mutex state"); - break; - } -#endif /* _WIN32||_WIN64 */ - bool result; -#if _WIN32||_WIN64 - result = TryEnterCriticalSection(&m.impl)!=0; -#else - result = pthread_mutex_trylock(&m.impl)==0; -#endif /* _WIN32||_WIN64 */ - if( result ) - my_mutex = &m; - return result; -} - -void recursive_mutex::internal_construct() { -#if _WIN32||_WIN64 - InitializeCriticalSection(&impl); - state = INITIALIZED; -#else - pthread_mutexattr_t mtx_attr; - int error_code = pthread_mutexattr_init( &mtx_attr ); - if( error_code ) - tbb::internal::handle_perror(error_code,"recursive_mutex: pthread_mutexattr_init failed"); - - pthread_mutexattr_settype( &mtx_attr, PTHREAD_MUTEX_RECURSIVE ); - error_code = pthread_mutex_init( &impl, &mtx_attr ); - if( error_code ) - tbb::internal::handle_perror(error_code,"recursive_mutex: pthread_mutex_init failed"); - pthread_mutexattr_destroy( &mtx_attr ); -#endif /* _WIN32||_WIN64*/ - ITT_SYNC_CREATE(&impl, _T("tbb::recursive_mutex"), _T("")); -} - -void recursive_mutex::internal_destroy() { -#if _WIN32||_WIN64 - switch( state ) { - case INITIALIZED: - DeleteCriticalSection(&impl); - break; - case DESTROYED: - __TBB_ASSERT(false,"recursive_mutex: already destroyed"); - break; - default: - __TBB_ASSERT(false,"recursive_mutex: illegal state for destruction"); - break; - } - state = DESTROYED; -#else - int error_code = pthread_mutex_destroy(&impl); - __TBB_ASSERT_EX(!error_code,"recursive_mutex: pthread_mutex_destroy failed"); -#endif /* _WIN32||_WIN64 */ -} - -} // namespace tbb diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/scheduler.cpp b/deal.II/bundled/tbb30_104oss/src/tbb/scheduler.cpp deleted file mode 100644 index c0b4f18ff5..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/scheduler.cpp +++ /dev/null @@ -1,1176 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#include "tbb/tbb_machine.h" - -#include "custom_scheduler.h" -#include "scheduler_utility.h" -#include "governor.h" -#include "market.h" -#include "arena.h" -#include "mailbox.h" -#include "observer_proxy.h" -#include "itt_notify.h" - -namespace tbb { -namespace internal { - -/** Defined in tbb_main.cpp **/ -extern generic_scheduler* (*AllocateSchedulerPtr)( arena*, size_t index ); - -inline generic_scheduler* allocate_scheduler ( arena* a, size_t index ) { - return AllocateSchedulerPtr(a, index); -} - -#if __TBB_TASK_GROUP_CONTEXT -#if !__TBB_ARENA_PER_MASTER -//! Head of the list of master thread schedulers. -static scheduler_list_node_t the_scheduler_list_head; - -//! Mutex protecting access to the list of schedulers. -static mutex the_scheduler_list_mutex; -#endif /* !__TBB_ARENA_PER_MASTER */ - -//! Counter that is incremented whenever new cancellation signal is sent to a task group. -/** Together with generic_scheduler::local_cancel_count forms cross-thread signaling - mechanism that allows to avoid locking at the hot path of normal execution flow. - - When a descendant task group context is being registered or unregistered, - the global and local counters are compared. If they differ, it means that - a cancellation signal is being propagated, and registration/deregistration - routines take slower branch that may block (at most one thread of the pool - can be blocked at any moment). Otherwise the control path is lock-free and fast. **/ -uintptr_t global_cancel_count = 0; - -//! Context to be associated with dummy tasks of worker threads schedulers. -/** It is never used for its direct purpose, and is introduced solely for the sake - of avoiding one extra conditional branch in the end of wait_for_all method. **/ -static task_group_context dummy_context(task_group_context::isolated); -#endif /* __TBB_TASK_GROUP_CONTEXT */ - -void Scheduler_OneTimeInitialization ( bool itt_present ) { - AllocateSchedulerPtr = itt_present ? &custom_scheduler<DefaultSchedulerTraits>::allocate_scheduler : - &custom_scheduler<IntelSchedulerTraits>::allocate_scheduler; -#if __TBB_TASK_GROUP_CONTEXT && !__TBB_ARENA_PER_MASTER - ITT_SYNC_CREATE(&the_scheduler_list_mutex, SyncType_GlobalLock, SyncObj_SchedulersList); - the_scheduler_list_head.my_next = &the_scheduler_list_head; - the_scheduler_list_head.my_prev = &the_scheduler_list_head; -#endif /* __TBB_TASK_GROUP_CONTEXT && !__TBB_ARENA_PER_MASTER */ -} - -//------------------------------------------------------------------------ -// scheduler interface -//------------------------------------------------------------------------ - -// A pure virtual destructor should still have a body -// so the one for tbb::internal::scheduler::~scheduler() is provided here -scheduler::~scheduler( ) {} - -//------------------------------------------------------------------------ -// generic_scheduler -//------------------------------------------------------------------------ - -#if _MSC_VER && !defined(__INTEL_COMPILER) - // Suppress overzealous compiler warning about using 'this' in base initializer list. - #pragma warning(push) - #pragma warning(disable:4355) -#endif - -generic_scheduler::generic_scheduler( arena* a, size_t index ) : - my_stealing_threshold(0), - arena_index(index), - task_pool_size(0), - my_arena_slot(&dummy_slot), -#if __TBB_ARENA_PER_MASTER - my_market(NULL), -#endif /* __TBB_ARENA_PER_MASTER */ - my_arena(a), - random( unsigned(this-(generic_scheduler*)NULL) ), - free_list(NULL), - innermost_running_task(NULL), - dummy_task(NULL), - ref_count(1), - my_affinity_id(0), - is_registered(false), - is_auto_initialized(false), -#if __TBB_SCHEDULER_OBSERVER - local_last_observer_proxy(NULL), -#endif /* __TBB_SCHEDULER_OBSERVER */ -#if __TBB_COUNT_TASK_NODES - task_node_count(0), -#endif /* __TBB_COUNT_TASK_NODES */ - small_task_count(1), // Extra 1 is a guard reference - return_list(NULL), -#if __TBB_TASK_GROUP_CONTEXT - local_ctx_list_update(0), - nonlocal_ctx_list_update(0) -#endif /* __TBB_TASK_GROUP_CONTEXT */ -#if __TBB_SURVIVE_THREAD_SWITCH && TBB_USE_ASSERT - ,my_cilk_state(cs_none) -#endif /* __TBB_SURVIVE_THREAD_SWITCH && TBB_USE_ASSERT */ -{ - dummy_slot.task_pool = allocate_task_pool( min_task_pool_size ); - dummy_slot.head = dummy_slot.tail = 0; - dummy_task = &allocate_task( sizeof(task), __TBB_CONTEXT_ARG(NULL, NULL) ); -#if __TBB_TASK_GROUP_CONTEXT - context_list_head.my_prev = &context_list_head; - context_list_head.my_next = &context_list_head; - ITT_SYNC_CREATE(&context_list_mutex, SyncType_Scheduler, SyncObj_ContextsList); -#endif /* __TBB_TASK_GROUP_CONTEXT */ - dummy_task->prefix().ref_count = 2; - ITT_SYNC_CREATE(&dummy_task->prefix().ref_count, SyncType_Scheduler, SyncObj_WorkerLifeCycleMgmt); - ITT_SYNC_CREATE(&return_list, SyncType_Scheduler, SyncObj_TaskReturnList); - assert_task_pool_valid(); -#if __TBB_SURVIVE_THREAD_SWITCH - my_cilk_unwatch_thunk.routine = NULL; -#endif /* __TBB_SURVIVE_THREAD_SWITCH */ -} - -#if _MSC_VER && !defined(__INTEL_COMPILER) - #pragma warning(pop) -#endif // warning 4355 is back - -#if TBB_USE_ASSERT > 1 -bool generic_scheduler::assert_task_pool_valid() const { - acquire_task_pool(); - task** tp = dummy_slot.task_pool; - __TBB_ASSERT( task_pool_size >= min_task_pool_size, NULL ); - __TBB_ASSERT( my_arena_slot->head <= my_arena_slot->tail, NULL ); - for ( size_t i = 0; i < my_arena_slot->head; ++i ) - __TBB_ASSERT( tp[i] == poisoned_ptr, "Task pool corrupted" ); - for ( size_t i = my_arena_slot->head; i < my_arena_slot->tail; ++i ) { - __TBB_ASSERT( (uintptr_t)tp[i] + 1 > 1u, "nil or invalid task pointer in the deque" ); - __TBB_ASSERT( tp[i]->prefix().state == task::ready || - tp[i]->prefix().extra_state == es_task_proxy, "task in the deque has invalid state" ); - } - for ( size_t i = my_arena_slot->tail; i < task_pool_size; ++i ) - __TBB_ASSERT( tp[i] == poisoned_ptr, "Task pool corrupted" ); - release_task_pool(); -} -#endif /* TBB_USE_ASSERT > 1 */ - -#if __TBB_TASK_GROUP_CONTEXT -void generic_scheduler::propagate_cancellation () { - spin_mutex::scoped_lock lock(context_list_mutex); - // Acquire fence is necessary to ensure that the subsequent node->my_next load - // returned the correct value in case it was just inserted in another thread. - // The fence also ensures visibility of the correct my_parent value. - context_list_node_t *node = __TBB_load_with_acquire(context_list_head.my_next); - while ( node != &context_list_head ) { - task_group_context &ctx = __TBB_get_object_ref(task_group_context, my_node, node); - // The absence of acquire fence while reading my_cancellation_requested may result - // in repeated traversals of the same parents chain if another group (precedent or - // descendant) belonging to the tree being canceled sends cancellation request of - // its own around the same time. - if ( !ctx.my_cancellation_requested ) - ctx.propagate_cancellation_from_ancestors(); - node = node->my_next; - __TBB_ASSERT( is_alive(ctx.my_version_and_traits), "Walked into a destroyed context while propagating cancellation" ); - } - // Sync up local cancelation epoch with the global one. Release fence prevents - // reordering of possible store to my_cancellation_requested after the sync point. - __TBB_store_with_release(local_cancel_count, global_cancel_count); -} - -#if !__TBB_ARENA_PER_MASTER -/** Propagates cancellation down the tree of dependent contexts by walking each - thread's local list of contexts **/ -void generic_scheduler::propagate_cancellation ( task_group_context& ctx ) { - __TBB_ASSERT ( ctx.my_cancellation_requested, "No cancellation request in the context" ); - // The whole propagation algorithm is under the lock in order to ensure correctness - // in case of parallel cancellations at the different levels of the context tree. - // See the note 2 at the bottom of the file. - mutex::scoped_lock lock(the_scheduler_list_mutex); - // Advance global cancellation state - __TBB_FetchAndAddWrelease(&global_cancel_count, 1); - // First propagate to workers using arena to access their context lists - size_t num_workers = my_arena->prefix().number_of_workers; - for ( size_t i = 0; i < num_workers; ++i ) { - // No fence is necessary here since the context list of worker's scheduler - // can contain anything of interest only after the first stealing was done - // by that worker. And doing it applies the necessary fence - generic_scheduler *s = my_arena->prefix().worker_list[i].scheduler; - // If the worker is in the middle of its startup sequence, skip it. - if ( s ) - s->propagate_cancellation(); - } - // Then propagate to masters using the global list of master's schedulers - scheduler_list_node_t *node = the_scheduler_list_head.my_next; - while ( node != &the_scheduler_list_head ) { - __TBB_get_object_ref(generic_scheduler, my_node, node).propagate_cancellation(); - node = node->my_next; - } -} -#endif /* !__TBB_ARENA_PER_MASTER */ -#endif /* __TBB_TASK_GROUP_CONTEXT */ - - -void generic_scheduler::init_stack_info () { - // Stacks are growing top-down. Highest address is called "stack base", - // and the lowest is "stack limit". -#if __TBB_ARENA_PER_MASTER - __TBB_ASSERT( !my_stealing_threshold, "Stealing threshold has already been calculated" ); - size_t stack_size = my_market->worker_stack_size(); -#else /* !__TBB_ARENA_PER_MASTER */ - size_t stack_size = my_arena->prefix().stack_size; -#endif /* !__TBB_ARENA_PER_MASTER */ -#if USE_WINTHREAD -#if defined(_MSC_VER)&&_MSC_VER<1400 && !_WIN64 - NT_TIB *pteb = (NT_TIB*)__TBB_machine_get_current_teb(); -#else - NT_TIB *pteb = (NT_TIB*)NtCurrentTeb(); -#endif - __TBB_ASSERT( &pteb < pteb->StackBase && &pteb > pteb->StackLimit, "invalid stack info in TEB" ); - __TBB_ASSERT( stack_size >0, "stack_size not initialized?" ); - // When a thread is created with the attribute STACK_SIZE_PARAM_IS_A_RESERVATION, stack limit - // in the TIB points to the committed part of the stack only. This renders the expression - // "(uintptr_t)pteb->StackBase / 2 + (uintptr_t)pteb->StackLimit / 2" virtually useless. - // Thus for worker threads we use the explicit stack size we used while creating them. - // And for master threads we rely on the following fact and assumption: - // - the default stack size of a master thread on Windows is 1M; - // - if it was explicitly set by the application it is at least as large as the size of a worker stack. - if ( is_worker() || stack_size < MByte ) - my_stealing_threshold = (uintptr_t)pteb->StackBase - stack_size / 2; - else - my_stealing_threshold = (uintptr_t)pteb->StackBase - MByte / 2; -#else /* USE_PTHREAD */ - // There is no portable way to get stack base address in Posix, so we use - // non-portable method (on all modern Linux) or the simplified approach - // based on the common sense assumptions. The most important assumption - // is that the main thread's stack size is not less than that of other threads. - void *stack_base = &stack_size; -#if __TBB_ipf - void *rsb_base = __TBB_get_bsp(); -#endif -#if __linux__ - size_t np_stack_size = 0; - void *stack_limit = NULL; - pthread_attr_t attr_stack, np_attr_stack; - if( 0 == pthread_getattr_np(pthread_self(), &np_attr_stack) ) { - if ( 0 == pthread_attr_getstack(&np_attr_stack, &stack_limit, &np_stack_size) ) { - if ( 0 == pthread_attr_init(&attr_stack) ) { - if ( 0 == pthread_attr_getstacksize(&attr_stack, &stack_size) ) - { - stack_base = (char*)stack_limit + np_stack_size; - if ( np_stack_size < stack_size ) { - // We are in a secondary thread. Use reliable data. -#if __TBB_ipf - // IA64 stack is split into RSE backup and memory parts - rsb_base = stack_limit; - stack_size = np_stack_size/2; -#else - stack_size = np_stack_size; -#endif /* !__TBB_ipf */ - } - // We are either in the main thread or this thread stack - // is bigger that that of the main one. As we cannot discern - // these cases we fall back to the default (heuristic) values. - } - pthread_attr_destroy(&attr_stack); - } - } - pthread_attr_destroy(&np_attr_stack); - } -#endif /* __linux__ */ - __TBB_ASSERT( stack_size>0, "stack size must be positive" ); - my_stealing_threshold = (uintptr_t)((char*)stack_base - stack_size/2); -#if __TBB_ipf - my_rsb_stealing_threshold = (uintptr_t)((char*)rsb_base + stack_size/2); -#endif -#endif /* USE_PTHREAD */ -} - -/** The function uses synchronization scheme similar to the one in the destructor - of task_group_context augmented with interlocked state change of each context - object. The purpose of this algo is to prevent threads doing nonlocal context - destruction from accessing destroyed owner-scheduler instance still pointed to - by the context object. **/ -void generic_scheduler::cleanup_local_context_list () { - // Detach contexts remaining in the local list - bool wait_for_concurrent_destroyers_to_leave = false; - uintptr_t local_count_snapshot = local_cancel_count; - local_ctx_list_update = 1; - { - // This is just a definition. Actual lock is acquired only in case of conflict. - spin_mutex::scoped_lock lock; - // Full fence prevents reordering of store to local_ctx_list_update with - // load from nonlocal_ctx_list_update. - __TBB_full_memory_fence(); - // Check for the conflict with concurrent destroyer or cancelation propagator - if ( nonlocal_ctx_list_update || local_count_snapshot != global_cancel_count ) - lock.acquire(context_list_mutex); - // No acquire fence is necessary for loading context_list_head.my_next, - // as the list can be updated by this thread only. - context_list_node_t *node = context_list_head.my_next; - while ( node != &context_list_head ) { - task_group_context &ctx = __TBB_get_object_ref(task_group_context, my_node, node); - __TBB_ASSERT( ctx.my_kind != task_group_context::binding_required, "Only a context bound to a root task can be detached" ); - node = node->my_next; - __TBB_ASSERT( is_alive(ctx.my_version_and_traits), "Walked into a destroyed context while detaching contexts from the local list" ); - // On 64-bit systems my_kind can be a 32-bit value padded with 32 uninitialized bits. - // So the cast below is necessary to throw off the higher bytes containing garbage - if ( (task_group_context::kind_type)(uintptr_t)__TBB_FetchAndStoreW(&ctx.my_kind, task_group_context::detached) == task_group_context::dying ) - wait_for_concurrent_destroyers_to_leave = true; - } - } - __TBB_store_with_release( local_ctx_list_update, 0 ); - // Wait until other threads referencing this scheduler object finish with it - if ( wait_for_concurrent_destroyers_to_leave ) - spin_wait_until_eq( nonlocal_ctx_list_update, 0u ); -} - -void generic_scheduler::free_scheduler() { - if( in_arena() ) { - acquire_task_pool(); - leave_arena(); - } -#if __TBB_TASK_GROUP_CONTEXT - cleanup_local_context_list(); -#if !__TBB_ARENA_PER_MASTER - task_group_context* default_context = dummy_task->prefix().context; - if ( default_context != &dummy_context) { - // Only master thread's dummy task has a dynamically allocated context - default_context->task_group_context::~task_group_context(); - NFS_Free(default_context); - { - mutex::scoped_lock lock(the_scheduler_list_mutex); - my_node.my_next->my_prev = my_node.my_prev; - my_node.my_prev->my_next = my_node.my_next; - } - } -#endif /* !__TBB_ARENA_PER_MASTER */ -#endif /* __TBB_TASK_GROUP_CONTEXT */ - free_task<small_local_task>( *dummy_task ); - - // k accounts for a guard reference and each task that we deallocate. - intptr_t k = 1; - for(;;) { - while( task* t = free_list ) { - free_list = t->prefix().next; - deallocate_task(*t); - ++k; - } - if( return_list==plugged_return_list() ) - break; - free_list = (task*)__TBB_FetchAndStoreW( &return_list, (intptr_t)plugged_return_list() ); - } -#if __TBB_COUNT_TASK_NODES -#if __TBB_ARENA_PER_MASTER - my_market->update_task_node_count( task_node_count ); -#else /* !__TBB_ARENA_PER_MASTER */ - my_arena->prefix().task_node_count += task_node_count; -#endif /* !__TBB_ARENA_PER_MASTER */ -#endif /* __TBB_COUNT_TASK_NODES */ -#if !__TBB_ARENA_PER_MASTER && __TBB_STATISTICS - dump_statistics(my_counters, arena_index < my_arena->prefix().number_of_workers ? arena_index + 1 : 0 ); -#endif /* !__TBB_ARENA_PER_MASTER && __TBB_STATISTICS */ - free_task_pool( dummy_slot.task_pool ); - dummy_slot.task_pool = NULL; - // Update small_task_count last. Doing so sooner might cause another thread to free *this. - __TBB_ASSERT( small_task_count>=k, "small_task_count corrupted" ); - governor::sign_off(this); - if( __TBB_FetchAndAddW( &small_task_count, -k )==k ) - NFS_Free( this ); -} - -task& generic_scheduler::allocate_task( size_t number_of_bytes, - __TBB_CONTEXT_ARG(task* parent, task_group_context* context) ) { - GATHER_STATISTIC(++my_counters.active_tasks); - task* t = free_list; - if( number_of_bytes<=quick_task_size ) { - if( t ) { - GATHER_STATISTIC(--my_counters.free_list_length); - __TBB_ASSERT( t->state()==task::freed, "free list of tasks is corrupted" ); - free_list = t->prefix().next; - } else if( return_list ) { - // No fence required for read of return_list above, because __TBB_FetchAndStoreW has a fence. - t = (task*)__TBB_FetchAndStoreW( &return_list, 0 ); - __TBB_ASSERT( t, "another thread emptied the return_list" ); - __TBB_ASSERT( t->prefix().origin==this, "task returned to wrong return_list" ); - ITT_NOTIFY( sync_acquired, &return_list ); - free_list = t->prefix().next; - } else { - t = (task*)((char*)NFS_Allocate( task_prefix_reservation_size+quick_task_size, 1, NULL ) + task_prefix_reservation_size ); -#if __TBB_COUNT_TASK_NODES - ++task_node_count; -#endif /* __TBB_COUNT_TASK_NODES */ - t->prefix().origin = this; - ++small_task_count; - } - } else { - GATHER_STATISTIC(++my_counters.big_tasks); - t = (task*)((char*)NFS_Allocate( task_prefix_reservation_size+number_of_bytes, 1, NULL ) + task_prefix_reservation_size ); -#if __TBB_COUNT_TASK_NODES - ++task_node_count; -#endif /* __TBB_COUNT_TASK_NODES */ - t->prefix().origin = NULL; - } - task_prefix& p = t->prefix(); -#if __TBB_TASK_GROUP_CONTEXT - p.context = context; -#endif /* __TBB_TASK_GROUP_CONTEXT */ - p.owner = this; - p.ref_count = 0; - // Assign some not outrageously out-of-place value for a while - p.depth = 0; - p.parent = parent; - // In TBB 2.1 and later, the constructor for task sets extra_state to indicate the version of the tbb/task.h header. - // In TBB 2.0 and earlier, the constructor leaves extra_state as zero. - p.extra_state = 0; - p.affinity = 0; - p.state = task::allocated; - return *t; -} - -void generic_scheduler::free_nonlocal_small_task( task& t ) { - __TBB_ASSERT( t.state()==task::freed, NULL ); - generic_scheduler& s = *static_cast<generic_scheduler*>(t.prefix().origin); - __TBB_ASSERT( &s!=this, NULL ); - for(;;) { - task* old = s.return_list; - if( old==plugged_return_list() ) - break; - // Atomically insert t at head of s.return_list - t.prefix().next = old; - ITT_NOTIFY( sync_releasing, &s.return_list ); - if( __TBB_CompareAndSwapW( &s.return_list, (intptr_t)&t, (intptr_t)old )==(intptr_t)old ) { - GATHER_STATISTIC(++my_counters.free_list_length); - return; - } - } - deallocate_task(t); - if( __TBB_FetchAndDecrementWrelease( &s.small_task_count )==1 ) { - // We freed the last task allocated by scheduler s, so it's our responsibility - // to free the scheduler. - NFS_Free( &s ); - } -} - -task** generic_scheduler::allocate_task_pool( size_t n ) { - __TBB_ASSERT( n > task_pool_size, "Cannot shrink the task pool" ); - size_t byte_size = ((n * sizeof(task*) + NFS_MaxLineSize - 1) / NFS_MaxLineSize) * NFS_MaxLineSize; - task_pool_size = byte_size / sizeof(task*); - task** new_pool = (task**)NFS_Allocate( byte_size, 1, NULL ); - // No need to clear the fresh deque since valid items are designated by the head and tail members. -#if TBB_USE_ASSERT>=2 - // But clear it in the high vigilance debug mode - memset( new_pool, reinterpret_cast<int>(poisoned_ptr), n ); -#endif /* TBB_USE_ASSERT>=2 */ - return new_pool; -} - -void generic_scheduler::grow_task_pool( size_t new_size ) { - assert_task_pool_valid(); - if ( new_size < 2 * task_pool_size ) - new_size = 2 * task_pool_size; - task** new_pool = allocate_task_pool( new_size ); // updates task_pool_size - task** old_pool = dummy_slot.task_pool; - acquire_task_pool(); // requires the old dummy_slot.task_pool value - my_arena_slot->tail -= my_arena_slot->head; - __TBB_ASSERT( my_arena_slot->tail <= task_pool_size, "new task pool is too short" ); - memcpy( new_pool, old_pool + my_arena_slot->head, my_arena_slot->tail * sizeof(task*) ); - my_arena_slot->head = 0; - dummy_slot.task_pool = new_pool; - release_task_pool(); // updates the task pool pointer in our arena slot - free_task_pool( old_pool ); - assert_task_pool_valid(); -} - -/** ATTENTION: - This method is mostly the same as generic_scheduler::lock_task_pool(), with - a little different logic of slot state checks (slot is either locked or points - to our task pool). - Thus if either of them is changed, consider changing the counterpart as well. **/ -inline void generic_scheduler::acquire_task_pool() const { - if ( !in_arena() ) - return; // we are not in arena - nothing to lock - atomic_backoff backoff; - bool sync_prepare_done = false; - for(;;) { -#if TBB_USE_ASSERT - __TBB_ASSERT( my_arena_slot == my_arena->slot + arena_index, "invalid arena slot index" ); - // Local copy of the arena slot task pool pointer is necessary for the next - // assertion to work correctly to exclude asynchronous state transition effect. - task** tp = my_arena_slot->task_pool; - __TBB_ASSERT( tp == LockedTaskPool || tp == dummy_slot.task_pool, "slot ownership corrupt?" ); -#endif - if( my_arena_slot->task_pool != LockedTaskPool && - __TBB_CompareAndSwapW( &my_arena_slot->task_pool, (intptr_t)LockedTaskPool, - (intptr_t)dummy_slot.task_pool ) == (intptr_t)dummy_slot.task_pool ) - { - // We acquired our own slot - ITT_NOTIFY(sync_acquired, my_arena_slot); - break; - } - else if( !sync_prepare_done ) { - // Start waiting - ITT_NOTIFY(sync_prepare, my_arena_slot); - sync_prepare_done = true; - } - // Someone else acquired a lock, so pause and do exponential backoff. - backoff.pause(); - } - __TBB_ASSERT( my_arena_slot->task_pool == LockedTaskPool, "not really acquired task pool" ); -} // generic_scheduler::acquire_task_pool - -inline void generic_scheduler::release_task_pool() const { - if ( !in_arena() ) - return; // we are not in arena - nothing to unlock - __TBB_ASSERT( my_arena_slot, "we are not in arena" ); - __TBB_ASSERT( my_arena_slot->task_pool == LockedTaskPool, "arena slot is not locked" ); - ITT_NOTIFY(sync_releasing, my_arena_slot); - __TBB_store_with_release( my_arena_slot->task_pool, dummy_slot.task_pool ); -} - -/** ATTENTION: - This method is mostly the same as generic_scheduler::acquire_task_pool(), - with a little different logic of slot state checks (slot can be empty, locked - or point to any task pool other than ours, and asynchronous transitions between - all these states are possible). - Thus if any of them is changed, consider changing the counterpart as well **/ -inline task** generic_scheduler::lock_task_pool( arena_slot* victim_arena_slot ) const { - task** victim_task_pool; - atomic_backoff backoff; - bool sync_prepare_done = false; - for(;;) { - victim_task_pool = victim_arena_slot->task_pool; - // NOTE: Do not use comparison of head and tail indices to check for - // the presence of work in the victim's task pool, as they may give - // incorrect indication because of task pool relocations and resizes. - if ( victim_task_pool == EmptyTaskPool ) { - // The victim thread emptied its task pool - nothing to lock - if( sync_prepare_done ) - ITT_NOTIFY(sync_cancel, victim_arena_slot); - break; - } - if( victim_task_pool != LockedTaskPool && - __TBB_CompareAndSwapW( &victim_arena_slot->task_pool, - (intptr_t)LockedTaskPool, (intptr_t)victim_task_pool ) == (intptr_t)victim_task_pool ) - { - // We've locked victim's task pool - ITT_NOTIFY(sync_acquired, victim_arena_slot); - break; - } - else if( !sync_prepare_done ) { - // Start waiting - ITT_NOTIFY(sync_prepare, victim_arena_slot); - sync_prepare_done = true; - } - GATHER_STATISTIC( ++my_counters.thieves_conflicts ); - // Someone else acquired a lock, so pause and do exponential backoff. - backoff.pause(); - } - __TBB_ASSERT( victim_task_pool == EmptyTaskPool || - (victim_arena_slot->task_pool == LockedTaskPool && victim_task_pool != LockedTaskPool), - "not really locked victim's task pool?" ); - return victim_task_pool; -} // generic_scheduler::lock_task_pool - -inline void generic_scheduler::unlock_task_pool( arena_slot* victim_arena_slot, - task** victim_task_pool ) const { - __TBB_ASSERT( victim_arena_slot, "empty victim arena slot pointer" ); - __TBB_ASSERT( victim_arena_slot->task_pool == LockedTaskPool, "victim arena slot is not locked" ); - ITT_NOTIFY(sync_releasing, victim_arena_slot); - __TBB_store_with_release( victim_arena_slot->task_pool, victim_task_pool ); -} - - -inline task* generic_scheduler::prepare_for_spawning( task* t ) { - __TBB_ASSERT( t->state()==task::allocated, "attempt to spawn task that is not in 'allocated' state" ); - t->prefix().owner = this; - t->prefix().state = task::ready; -#if TBB_USE_ASSERT - if( task* parent = t->parent() ) { - internal::reference_count ref_count = parent->prefix().ref_count; - __TBB_ASSERT( ref_count>=0, "attempt to spawn task whose parent has a ref_count<0" ); - __TBB_ASSERT( ref_count!=0, "attempt to spawn task whose parent has a ref_count==0 (forgot to set_ref_count?)" ); - parent->prefix().extra_state |= es_ref_count_active; - } -#endif /* TBB_USE_ASSERT */ - affinity_id dst_thread = t->prefix().affinity; - __TBB_ASSERT( dst_thread == 0 || is_version_3_task(*t), "backwards compatibility to TBB 2.0 tasks is broken" ); - if( dst_thread != 0 && dst_thread != my_affinity_id ) { - task_proxy& proxy = (task_proxy&)allocate_task( sizeof(task_proxy), - __TBB_CONTEXT_ARG(NULL, NULL) ); - // Mark as a proxy - proxy.prefix().extra_state = es_task_proxy; - proxy.outbox = &my_arena->mailbox(dst_thread); - proxy.task_and_tag = intptr_t(t)|3; - ITT_NOTIFY( sync_releasing, proxy.outbox ); - // Mail the proxy - after this point t may be destroyed by another thread at any moment. - proxy.outbox->push(proxy); - return &proxy; - } - return t; -} - -/** Conceptually, this method should be a member of class scheduler. - But doing so would force us to publish class scheduler in the headers. */ -void generic_scheduler::local_spawn( task& first, task*& next ) { - __TBB_ASSERT( governor::is_set(this), NULL ); - assert_task_pool_valid(); - if ( &first.prefix().next == &next ) { - // Single task is being spawned - if ( my_arena_slot->tail == task_pool_size ) { - // If the free space at the beginning of the task pool is too short - // we are likely facing a pathological single-producer-multiple-consumers - // scenario, and thus it's better to expand the task pool - if ( my_arena_slot->head > min_task_pool_size/4 ) { - // Move the busy part of the deque to the beginning of the allocated space - acquire_task_pool(); - my_arena_slot->tail -= my_arena_slot->head; - memmove( dummy_slot.task_pool, dummy_slot.task_pool + my_arena_slot->head, my_arena_slot->tail * sizeof(task*) ); - my_arena_slot->head = 0; - release_task_pool(); - } - else { - grow_task_pool( task_pool_size + 1 ); - } - } - dummy_slot.task_pool[my_arena_slot->tail] = prepare_for_spawning( &first ); - ITT_NOTIFY(sync_releasing, my_arena_slot); - // The following store with release is required on ia64 only - size_t new_tail = my_arena_slot->tail + 1; - __TBB_store_with_release( my_arena_slot->tail, new_tail ); - __TBB_ASSERT ( my_arena_slot->tail <= task_pool_size, "task deque end was overwritten" ); - } - else { - // Task list is being spawned - const size_t initial_capacity = 64; - task *arr[initial_capacity]; - fast_reverse_vector<task*> tasks(arr, initial_capacity); - task *t_next = NULL; - for( task* t = &first; ; t = t_next ) { - // After prepare_for_spawning returns t may already have been destroyed. - // So milk it while it is alive. - bool end = &t->prefix().next == &next; - t_next = t->prefix().next; - tasks.push_back( prepare_for_spawning(t) ); - if( end ) - break; - } - size_t num_tasks = tasks.size(); - __TBB_ASSERT ( arena_index != null_arena_index, "invalid arena slot index" ); - if ( my_arena_slot->tail + num_tasks > task_pool_size ) { - // 1 compensates for head possibly temporarily incremented by a thief - size_t new_size = my_arena_slot->tail - my_arena_slot->head + num_tasks + 1; - if ( new_size <= task_pool_size ) { - // Move the busy part of the deque to the beginning of the allocated space - acquire_task_pool(); - my_arena_slot->tail -= my_arena_slot->head; - memmove( dummy_slot.task_pool, dummy_slot.task_pool + my_arena_slot->head, my_arena_slot->tail * sizeof(task*) ); - my_arena_slot->head = 0; - release_task_pool(); - } - else { - grow_task_pool( new_size ); - } - } -#if DO_ITT_NOTIFY - else { - // The preceding if-branch issues the same ittnotify inside release_task_pool() or grow_task_pool() methods - ITT_NOTIFY(sync_releasing, my_arena_slot); - } -#endif /* DO_ITT_NOTIFY */ - tasks.copy_memory( dummy_slot.task_pool + my_arena_slot->tail ); - // The following store with release is required on ia64 only - size_t new_tail = my_arena_slot->tail + num_tasks; - __TBB_store_with_release( my_arena_slot->tail, new_tail ); - __TBB_ASSERT ( my_arena_slot->tail <= task_pool_size, "task deque end was overwritten" ); - } -#if __TBB_ARENA_PER_MASTER - if ( !in_arena() ) - enter_arena(); - my_arena->advertise_new_work</*Spawned=*/true>(); -#else /* !__TBB_ARENA_PER_MASTER */ - if ( !in_arena() ) { - if ( is_worker() ) - enter_arena(); - else - try_enter_arena(); - } - my_arena->mark_pool_full(); -#endif /* !__TBB_ARENA_PER_MASTER */ - assert_task_pool_valid(); -} - -void generic_scheduler::local_spawn_root_and_wait( task& first, task*& next ) { - __TBB_ASSERT( governor::is_set(this), NULL ); - __TBB_ASSERT( &first, NULL ); - auto_empty_task dummy( __TBB_CONTEXT_ARG(this, first.prefix().context) ); - internal::reference_count n = 0; - for( task* t=&first; ; t=t->prefix().next ) { - ++n; - __TBB_ASSERT( !t->prefix().parent, "not a root task, or already running" ); - t->prefix().parent = &dummy; - if( &t->prefix().next==&next ) break; -#if __TBB_TASK_GROUP_CONTEXT - __TBB_ASSERT( t->prefix().context == t->prefix().next->prefix().context, - "all the root tasks in list must share the same context"); -#endif /* __TBB_TASK_GROUP_CONTEXT */ - } - dummy.prefix().ref_count = n+1; - if( n>1 ) - local_spawn( *first.prefix().next, next ); - local_wait_for_all( dummy, &first ); -} - -inline task* generic_scheduler::get_mailbox_task() { - __TBB_ASSERT( my_affinity_id>0, "not in arena" ); - task* result = NULL; - while( task_proxy* t = inbox.pop() ) { - intptr_t tat = __TBB_load_with_acquire(t->task_and_tag); - __TBB_ASSERT( tat==task_proxy::mailbox_bit || (tat==(tat|3)&&tat!=3), NULL ); - if( tat!=task_proxy::mailbox_bit && __TBB_CompareAndSwapW( &t->task_and_tag, task_proxy::pool_bit, tat )==tat ) { - // Successfully grabbed the task, and left pool seeker with job of freeing the proxy. - ITT_NOTIFY( sync_acquired, inbox.outbox() ); - result = (task*)(tat & ~3); - result->prefix().owner = this; - break; - } - free_task_proxy( *t ); - } - return result; -} - -inline task* generic_scheduler::strip_proxy( task_proxy* tp ) { - __TBB_ASSERT( tp->prefix().extra_state==es_task_proxy, NULL ); - intptr_t tat = __TBB_load_with_acquire(tp->task_and_tag); - if( (tat&3)==3 ) { - // proxy is shared by a pool and a mailbox. - // Attempt to transition it to "empty proxy in mailbox" state. - if( __TBB_CompareAndSwapW( &tp->task_and_tag, task_proxy::mailbox_bit, tat )==tat ) { - // Successfully grabbed the task, and left the mailbox with the job of freeing the proxy. - return (task*)(tat&~3); - } - __TBB_ASSERT( tp->task_and_tag==task_proxy::pool_bit, NULL ); - } else { - // We have exclusive access to the proxy - __TBB_ASSERT( (tat&3)==task_proxy::pool_bit, "task did not come from pool?" ); - __TBB_ASSERT ( !(tat&~3), "Empty proxy in the pool contains non-zero task pointer" ); - } -#if TBB_USE_ASSERT - tp->prefix().state = task::allocated; -#endif - free_task_proxy( *tp ); - // Another thread grabbed the underlying task via their mailbox - return NULL; -} - -#if __TBB_ARENA_PER_MASTER -void generic_scheduler::local_enqueue( task& t ) { - __TBB_ASSERT( governor::is_set(this), NULL ); - __TBB_ASSERT( t.state()==task::allocated, "attempt to enqueue task that is not in 'allocated' state" ); - t.prefix().owner = this; - t.prefix().state = task::ready; - -#if TBB_USE_ASSERT - if( task* parent = t.parent() ) { - internal::reference_count ref_count = parent->prefix().ref_count; - __TBB_ASSERT( ref_count>=0, "attempt to enqueue task whose parent has a ref_count<0" ); - __TBB_ASSERT( ref_count!=0, "attempt to enqueue task whose parent has a ref_count==0 (forgot to set_ref_count?)" ); - parent->prefix().extra_state |= es_ref_count_active; - } - __TBB_ASSERT(t.prefix().affinity==affinity_id(0), "affinity is ignored for enqueued tasks"); -#endif /* TBB_USE_ASSERT */ - - __TBB_ASSERT( my_arena, "thread is not in any arena" ); - ITT_NOTIFY(sync_releasing, &my_arena->my_task_stream); - my_arena->my_task_stream.push( &t, my_arena_slot->hint_for_push ); - my_arena->advertise_new_work< /*Spawned=*/ false >(); - assert_task_pool_valid(); -} - -inline task* generic_scheduler::dequeue_task() { - task* result = NULL; - my_arena->my_task_stream.pop(result, my_arena_slot->hint_for_pop); - if (result) ITT_NOTIFY(sync_acquired, &my_arena->my_task_stream); - return result; -} -#endif /* __TBB_ARENA_PER_MASTER */ - -inline task* generic_scheduler::get_task() { - task* result = NULL; -retry: - --my_arena_slot->tail; - __TBB_full_memory_fence(); - if ( (intptr_t)my_arena_slot->head > (intptr_t)my_arena_slot->tail ) { - acquire_task_pool(); - if ( (intptr_t)my_arena_slot->head <= (intptr_t)my_arena_slot->tail ) { - // The thief backed off - grab the task - result = dummy_slot.task_pool[my_arena_slot->tail]; - __TBB_ASSERT( !is_poisoned(result), NULL ); - poison_pointer( dummy_slot.task_pool[my_arena_slot->tail] ); - } - else { - __TBB_ASSERT ( my_arena_slot->head == my_arena_slot->tail + 1, "victim/thief arbitration algorithm failure" ); - } - if ( (intptr_t)my_arena_slot->head < (intptr_t)my_arena_slot->tail ) { - release_task_pool(); - } - else { - // In any case the deque is empty now, so compact it - my_arena_slot->head = my_arena_slot->tail = 0; - if ( in_arena() ) - leave_arena(); - } - } - else { - result = dummy_slot.task_pool[my_arena_slot->tail]; - __TBB_ASSERT( !is_poisoned(result), NULL ); - poison_pointer( dummy_slot.task_pool[my_arena_slot->tail] ); - } - if( result && is_proxy(*result) ) { - result = strip_proxy((task_proxy*)result); - if( !result ) { - goto retry; - } - GATHER_STATISTIC( ++my_counters.proxies_executed ); - // Following assertion should be true because TBB 2.0 tasks never specify affinity, and hence are not proxied. - __TBB_ASSERT( is_version_3_task(*result), "backwards compatibility with TBB 2.0 broken" ); - // Task affinity has changed. - innermost_running_task = result; - result->note_affinity(my_affinity_id); - } - return result; -} // generic_scheduler::get_task - -task* generic_scheduler::steal_task( arena_slot& victim_slot ) { - task** victim_pool = lock_task_pool( &victim_slot ); - if ( !victim_pool ) - return NULL; - const size_t none = ~size_t(0); - size_t first_skipped_proxy = none; - task* result = NULL; -retry: - ++victim_slot.head; - __TBB_full_memory_fence(); - if ( (intptr_t)victim_slot.head > (intptr_t)victim_slot.tail ) { - --victim_slot.head; - } - else { - result = victim_pool[victim_slot.head - 1]; - __TBB_ASSERT( !is_poisoned(result), NULL ); - if( is_proxy(*result) ) { - task_proxy& tp = *static_cast<task_proxy*>(result); - // If task will likely be grabbed by whom it was mailed to, skip it. - if( (tp.task_and_tag & 3) == 3 && tp.outbox->recipient_is_idle() ) { - GATHER_STATISTIC( ++my_counters.proxies_bypassed ); - if ( first_skipped_proxy == none ) - first_skipped_proxy = victim_slot.head - 1; - result = NULL; - goto retry; - } - } - poison_pointer(victim_pool[victim_slot.head - 1]); - } - if ( first_skipped_proxy != none ) { - if ( result ) { - victim_pool[victim_slot.head - 1] = victim_pool[first_skipped_proxy]; - poison_pointer( victim_pool[first_skipped_proxy] ); - __TBB_store_with_release( victim_slot.head, first_skipped_proxy + 1 ); - } - else - __TBB_store_with_release( victim_slot.head, first_skipped_proxy ); - } - unlock_task_pool( &victim_slot, victim_pool ); - return result; -} - -inline void generic_scheduler::do_enter_arena() { - my_arena_slot = &my_arena->slot[arena_index]; - __TBB_ASSERT ( my_arena_slot->head == my_arena_slot->tail, "task deque of a free slot must be empty" ); - __TBB_ASSERT ( dummy_slot.head < dummy_slot.tail, "entering arena without tasks to share" ); - my_arena_slot->head = dummy_slot.head; - my_arena_slot->tail = dummy_slot.tail; - // Release signal on behalf of previously spawned tasks (when this thread was not in arena yet) - ITT_NOTIFY(sync_releasing, my_arena_slot); - __TBB_store_with_release( my_arena_slot->task_pool, dummy_slot.task_pool ); - // We'll leave arena only when it's empty, so clean up local instances of indices. - dummy_slot.head = dummy_slot.tail = 0; -} - -void generic_scheduler::enter_arena() { - __TBB_ASSERT ( my_arena, "no arena: initialization not completed?" ); -#if __TBB_ARENA_PER_MASTER - __TBB_ASSERT ( !in_arena(), "thread is already in arena?" ); - __TBB_ASSERT ( arena_index < my_arena->my_num_slots, "arena slot index is out-of-bound" ); -#else /* !__TBB_ARENA_PER_MASTER */ - __TBB_ASSERT ( is_worker(), "only workers should use enter_arena()" ); - __TBB_ASSERT ( !in_arena(), "worker already in arena?" ); - __TBB_ASSERT ( arena_index < my_arena->prefix().number_of_workers, "invalid worker arena slot index" ); -#endif /* !__TBB_ARENA_PER_MASTER */ - __TBB_ASSERT ( my_arena->slot[arena_index].task_pool == EmptyTaskPool, "someone else grabbed my arena slot?" ); - do_enter_arena(); -} - -#if !__TBB_ARENA_PER_MASTER -void generic_scheduler::try_enter_arena() { - __TBB_ASSERT ( !is_worker(), "only masters should use try_enter_arena()" ); - __TBB_ASSERT ( my_arena, "no arena: initialization not completed?" ); - __TBB_ASSERT ( !in_arena(), "master already in arena?" ); - __TBB_ASSERT ( arena_index >= my_arena->prefix().number_of_workers && - arena_index < my_arena->prefix().number_of_slots, "invalid arena slot hint value" ); - - size_t h = arena_index; - // We do not lock task pool upon successful entering arena - if( my_arena->slot[h].task_pool != EmptyTaskPool || - __TBB_CompareAndSwapW( &my_arena->slot[h].task_pool, (intptr_t)LockedTaskPool, - (intptr_t)EmptyTaskPool ) != (intptr_t)EmptyTaskPool ) - { - // Hinted arena slot is already busy, try some of the others at random - unsigned first = my_arena->prefix().number_of_workers, - last = my_arena->prefix().number_of_slots; - unsigned n = last - first - 1; - /// \todo Is this limit reasonable? - size_t max_attempts = last - first; - for (;;) { - size_t k = first + random.get() % n; - if( k >= h ) - ++k; // Adjusts random distribution to exclude previously tried slot - h = k; - if( my_arena->slot[h].task_pool == EmptyTaskPool && - __TBB_CompareAndSwapW( &my_arena->slot[h].task_pool, (intptr_t)LockedTaskPool, - (intptr_t)EmptyTaskPool ) == (intptr_t)EmptyTaskPool ) - { - break; - } - if ( --max_attempts == 0 ) { - // After so many attempts we are still unable to find a vacant arena slot. - // Cease the vain effort and work outside of arena for a while. - return; - } - } - } - // Successfully claimed a slot in the arena. - ITT_NOTIFY(sync_acquired, &my_arena->slot[h]); - __TBB_ASSERT ( my_arena->slot[h].task_pool == LockedTaskPool, "arena slot is not actually acquired" ); - arena_index = h; - do_enter_arena(); - attach_mailbox( affinity_id(h+1) ); -} -#endif /* !__TBB_ARENA_PER_MASTER */ - -void generic_scheduler::leave_arena() { - __TBB_ASSERT( in_arena(), "Not in arena" ); - // Do not reset arena_index. It will be used to (attempt to) re-acquire the slot next time - __TBB_ASSERT( &my_arena->slot[arena_index] == my_arena_slot, "arena slot and slot index mismatch" ); - __TBB_ASSERT ( my_arena_slot->task_pool == LockedTaskPool, "Task pool must be locked when leaving arena" ); - __TBB_ASSERT ( my_arena_slot->head == my_arena_slot->tail, "Cannot leave arena when the task pool is not empty" ); -#if !__TBB_ARENA_PER_MASTER - if ( !is_worker() ) { - my_affinity_id = 0; - inbox.detach(); - } -#endif /* !__TBB_ARENA_PER_MASTER */ - ITT_NOTIFY(sync_releasing, &my_arena->slot[arena_index]); - __TBB_store_with_release( my_arena_slot->task_pool, EmptyTaskPool ); - my_arena_slot = &dummy_slot; -} - -#if __TBB_ARENA_PER_MASTER -generic_scheduler* generic_scheduler::create_worker( market& m, size_t index ) { - generic_scheduler* s = allocate_scheduler( NULL, index ); -#if __TBB_TASK_GROUP_CONTEXT - s->dummy_task->prefix().context = &dummy_context; - // Sync up the local cancellation state with the global one. No need for fence here. - s->local_cancel_count = global_cancel_count; -#endif /* __TBB_TASK_GROUP_CONTEXT */ - s->my_market = &m; - s->init_stack_info(); - return s; -} - -#else /* !__TBB_ARENA_PER_MASTER */ - -generic_scheduler* generic_scheduler::create_worker( arena& a, size_t index ) { - generic_scheduler* s = allocate_scheduler( &a, index ); - - // Put myself into the arena -#if __TBB_TASK_GROUP_CONTEXT - s->dummy_task->prefix().context = &dummy_context; - // Sync up the local cancellation state with the global one. No need for fence here. - s->local_cancel_count = global_cancel_count; -#endif /* __TBB_TASK_GROUP_CONTEXT */ - s->attach_mailbox( index+1 ); - s->init_stack_info(); - - __TBB_store_with_release( a.prefix().worker_list[index].scheduler, s ); - return s; -} -#endif /* !__TBB_ARENA_PER_MASTER */ - -generic_scheduler* generic_scheduler::create_master( arena& a ) { - generic_scheduler* s = allocate_scheduler( &a, -#if __TBB_ARENA_PER_MASTER - 0 // Master thread always occupies the first slot -#else /* !__TBB_ARENA_PER_MASTER */ - null_arena_index // Master thread will have to search for a vacant slot -#endif /* !__TBB_ARENA_PER_MASTER */ - ); - task& t = *s->dummy_task; - s->innermost_running_task = &t; - t.prefix().ref_count = 1; - governor::sign_on(s); - __TBB_ASSERT( &task::self()==&t, "governor::sign_on failed?" ); -#if __TBB_ARENA_PER_MASTER -#if __TBB_TASK_GROUP_CONTEXT - // Context to be used by root tasks by default (if the user has not specified one). - // Allocation is done by NFS allocator because we cannot reuse memory allocated - // for task objects since the free list is empty at the moment. - t.prefix().context = a.my_master_default_ctx = - new ( NFS_Allocate(sizeof(task_group_context), 1, NULL) ) task_group_context(task_group_context::isolated); -#endif - s->my_market = a.my_market; - __TBB_ASSERT( s->arena_index == 0, "Master thread must occupy the first slot in its arena" ); - s->attach_mailbox(1); - a.slot[0].my_scheduler = s; -#if _WIN32|_WIN64 - __TBB_ASSERT( s->my_market, NULL ); - s->my_market->register_master( s->master_exec_resource ); -#endif /* _WIN32|_WIN64 */ -#else /* !__TBB_ARENA_PER_MASTER */ -#if _WIN32|_WIN64 - s->register_master(); -#endif -#if __TBB_TASK_GROUP_CONTEXT - // Context to be used by root tasks by default (if the user has not specified one). - // Allocation is done by NFS allocator because we cannot reuse memory allocated - // for task objects since the free list is empty at the moment. - t.prefix().context = new ( NFS_Allocate(sizeof(task_group_context), 1, NULL) ) task_group_context(task_group_context::isolated); - scheduler_list_node_t &node = s->my_node; - { - mutex::scoped_lock lock(the_scheduler_list_mutex); - node.my_next = the_scheduler_list_head.my_next; - node.my_prev = &the_scheduler_list_head; - the_scheduler_list_head.my_next->my_prev = &node; - the_scheduler_list_head.my_next = &node; -#endif /* __TBB_TASK_GROUP_CONTEXT */ - unsigned last = a.prefix().number_of_slots, - cur_limit = a.prefix().limit; - // This slot index assignment is just a hint to ... - if ( cur_limit < last ) { - // ... to prevent competition between the first few masters. - s->arena_index = cur_limit++; - // In the absence of exception handling this code is a subject to data - // race in case of multiple masters concurrently entering empty arena. - // But it does not affect correctness, and can only result in a few - // masters competing for the same arena slot during the first acquisition. - // The cost of competition is low in comparison to that of oversubscription. - a.prefix().limit = cur_limit; - } - else { - // ... to minimize the probability of competition between multiple masters. - unsigned first = a.prefix().number_of_workers; - s->arena_index = first + s->random.get() % (last - first); - } -#if __TBB_TASK_GROUP_CONTEXT - } -#endif -#endif /* !__TBB_ARENA_PER_MASTER */ - s->init_stack_info(); -#if __TBB_TASK_GROUP_CONTEXT - // Sync up the local cancellation state with the global one. No need for fence here. - s->local_cancel_count = global_cancel_count; -#endif -#if __TBB_SCHEDULER_OBSERVER - // Process any existing observers. - s->notify_entry_observers(); -#endif /* __TBB_SCHEDULER_OBSERVER */ - return s; -} - -void generic_scheduler::cleanup_worker( void* arg, bool is_worker ) { - generic_scheduler& s = *(generic_scheduler*)arg; - __TBB_ASSERT( s.dummy_slot.task_pool, "cleaning up worker with missing task pool" ); -// APM TODO: Decide how observers should react to each entry/leave to/from arena -#if __TBB_SCHEDULER_OBSERVER - s.notify_exit_observers( is_worker ); -#endif /* __TBB_SCHEDULER_OBSERVER */ - // When comparing "head" and "tail" indices ">=" is used because this worker's - // task pool may still be published in the arena, and thieves can optimistically - // bump "head" (and then roll back). - __TBB_ASSERT( s.my_arena_slot->task_pool == EmptyTaskPool || s.my_arena_slot->head >= s.my_arena_slot->tail, - "worker has unfinished work at run down" ); - s.free_scheduler(); -} - -void generic_scheduler::cleanup_master() { - generic_scheduler& s = *this; // for similarity with cleanup_worker - __TBB_ASSERT( s.dummy_slot.task_pool, "cleaning up master with missing task pool" ); -#if __TBB_SCHEDULER_OBSERVER - s.notify_exit_observers(/*is_worker=*/false); -#endif /* __TBB_SCHEDULER_OBSERVER */ - if ( !local_task_pool_empty() ) { - __TBB_ASSERT ( governor::is_set(this), "TLS slot is cleared before the task pool cleanup" ); - s.local_wait_for_all( *s.dummy_task, NULL ); - __TBB_ASSERT ( governor::is_set(this), "Other thread reused our TLS key during the task pool cleanup" ); - } -#if __TBB_ARENA_PER_MASTER -#if _WIN32|_WIN64 - __TBB_ASSERT( s.my_market, NULL ); - s.my_market->unregister_master( s.master_exec_resource ); -#endif /* _WIN32|_WIN64 */ - arena* a = s.my_arena; -#if __TBB_STATISTICS - *a->slot[0].my_counters += s.my_counters; -#endif /* __TBB_STATISTICS */ -#else /* !__TBB_ARENA_PER_MASTER */ -#if _WIN32|_WIN64 - s.unregister_master(); -#endif /* _WIN32|_WIN64 */ -#endif /* __TBB_ARENA_PER_MASTER */ - s.free_scheduler(); -#if __TBB_ARENA_PER_MASTER - a->slot[0].my_scheduler = NULL; - // Do not close arena if some fire-and-forget tasks remain; workers should care of it. - if( a->my_task_stream.empty() && a->pool_state.fetch_and_store(arena::SNAPSHOT_EMPTY)!=arena::SNAPSHOT_EMPTY ) - a->my_market->adjust_demand( *a, -(int)a->my_max_num_workers ); -#if __TBB_STATISTICS_EARLY_DUMP - GATHER_STATISTIC( a->dump_arena_statistics() ); -#endif - if ( --a->my_num_threads_active==0 && a->pool_state==arena::SNAPSHOT_EMPTY ) - a->close_arena(); -#else /* !__TBB_ARENA_PER_MASTER */ - governor::finish_with_arena(); -#endif /* !__TBB_ARENA_PER_MASTER */ -} - -#if __TBB_SCHEDULER_OBSERVER - void generic_scheduler::notify_entry_observers() { - local_last_observer_proxy = observer_proxy::process_list(local_last_observer_proxy,is_worker(),/*is_entry=*/true); - } - - void generic_scheduler::notify_exit_observers( bool is_worker ) { - observer_proxy::process_list(local_last_observer_proxy,is_worker,/*is_entry=*/false); - } -#endif /* __TBB_SCHEDULER_OBSERVER */ - -} // namespace internal -} // namespace tbb - diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/scheduler.h b/deal.II/bundled/tbb30_104oss/src/tbb/scheduler.h deleted file mode 100644 index 7adbb3c905..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/scheduler.h +++ /dev/null @@ -1,556 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef _TBB_scheduler_H -#define _TBB_scheduler_H - -#include "scheduler_common.h" -#include "arena.h" -#include "mailbox.h" -#include "tbb_misc.h" // for FastRandom - -#if __TBB_TASK_GROUP_CONTEXT -#include "tbb/spin_mutex.h" -#endif /* __TBB_TASK_GROUP_CONTEXT */ - -#if __TBB_SURVIVE_THREAD_SWITCH -#include "cilk-tbb-interop.h" -#endif /* __TBB_SURVIVE_THREAD_SWITCH */ - -namespace tbb { -namespace internal { - -template<typename SchedulerTraits> class custom_scheduler; - -//------------------------------------------------------------------------ -// generic_scheduler -//------------------------------------------------------------------------ - -#if __TBB_TASK_GROUP_CONTEXT -struct scheduler_list_node_t { - scheduler_list_node_t *my_prev, - *my_next; -}; -#endif /* __TBB_TASK_GROUP_CONTEXT */ - -#define EmptyTaskPool ((task**)0) -#define LockedTaskPool ((task**)~(intptr_t)0) - -class governor; - -#if __TBB_SCHEDULER_OBSERVER -class task_scheduler_observer_v3; -class observer_proxy; -#endif /* __TBB_SCHEDULER_OBSERVER */ - -#if __TBB_ARENA_PER_MASTER -class market; -#endif - -//! Cilk-style task scheduler. -/** None of the fields here are every read or written by threads other than - the thread that creates the instance. - - Class generic_scheduler is an abstract base class that contains most of the scheduler, - except for tweaks specific to processors and tools (e.g. VTune). - The derived template class custom_scheduler<SchedulerTraits> fills in the tweaks. */ -class generic_scheduler: public scheduler, public ::rml::job { - friend class tbb::task; -#if __TBB_ARENA_PER_MASTER - friend class market; -#else - friend class UnpaddedArenaPrefix; -#endif /* !__TBB_ARENA_PER_MASTER */ - friend class arena; - friend class allocate_root_proxy; - friend class governor; -#if __TBB_TASK_GROUP_CONTEXT - friend class allocate_root_with_context_proxy; - friend class tbb::task_group_context; -#endif /* __TBB_TASK_GROUP_CONTEXT */ -#if __TBB_SCHEDULER_OBSERVER - friend class task_scheduler_observer_v3; -#endif /* __TBB_SCHEDULER_OBSERVER */ - friend class scheduler; - template<typename SchedulerTraits> friend class custom_scheduler; - - //! If sizeof(task) is <=quick_task_size, it is handled on a free list instead of malloc'd. - static const size_t quick_task_size = 256-task_prefix_reservation_size; - - static bool is_version_3_task( task& t ) { - return (t.prefix().extra_state & 0x0F)>=0x1; - } - - //! Position in the call stack specifying its maximal filling when stealing is still allowed - uintptr_t my_stealing_threshold; -#if __TBB_ipf - //! Position in the RSE backup area specifying its maximal filling when stealing is still allowed - uintptr_t my_rsb_stealing_threshold; -#endif - - static const size_t null_arena_index = ~size_t(0); - - //! Index of the arena slot the scheduler occupies now, or occupied last time. - size_t arena_index; - - //! Capacity of ready tasks deque (number of elements - pointers to task). - size_t task_pool_size; - - //! Pointer to the slot in the arena we own at the moment. - /** When out of arena it points to this scheduler's dummy_slot. **/ - mutable arena_slot* my_arena_slot; - - bool in_arena () const { return my_arena_slot != &dummy_slot; } - - bool local_task_pool_empty () { - return my_arena_slot->task_pool == EmptyTaskPool || my_arena_slot->head >= my_arena_slot->tail; - } - -#if __TBB_ARENA_PER_MASTER - //! The market I am in - market* my_market; - - //! The arena that I own (if master) or am servicing at the moment (if worker) - arena* my_arena; -#else /* !__TBB_ARENA_PER_MASTER */ - //! The arena that I own (if master) or am servicing at the moment (if worker) - arena* const my_arena; -#endif /* !__TBB_ARENA_PER_MASTER */ - - //! Random number generator used for picking a random victim from which to steal. - FastRandom random; - - //! Free list of small tasks that can be reused. - task* free_list; - - //! Innermost task whose task::execute() is running. - task* innermost_running_task; - - //! Fake root task created by slave threads. - /** The task is used as the "parent" argument to method wait_for_all. */ - task* dummy_task; - - //! Reference count for scheduler - /** Number of task_scheduler_init objects that point to this scheduler */ - long ref_count; - - mail_inbox inbox; - - void attach_mailbox( affinity_id id ) { - __TBB_ASSERT(id>0,NULL); - inbox.attach( my_arena->mailbox(id) ); - my_affinity_id = id; - } - - //! The mailbox id assigned to this scheduler. - /** The id is assigned upon first entry into the arena. - TODO: how are id's being garbage collected? - TODO: master thread may enter arena and leave and then reenter. - We want to give it the same affinity_id upon reentry, if practical. - */ - affinity_id my_affinity_id; - - /* A couple of bools can be located here because space is otherwise just padding after my_affinity_id. */ - - //! True if this is assigned to thread local storage by registering with governor. - bool is_registered; - - //! True if *this was created by automatic TBB initialization - bool is_auto_initialized; - -#if __TBB_SCHEDULER_OBSERVER - //! Last observer_proxy processed by this scheduler - observer_proxy* local_last_observer_proxy; - - //! Notify any entry observers that have been created since the last call by this thread. - void notify_entry_observers(); - - //! Notify all exit observers that this thread is no longer participating in task scheduling. - void notify_exit_observers( bool is_worker ); -#endif /* __TBB_SCHEDULER_OBSERVER */ - -#if __TBB_COUNT_TASK_NODES - //! Net number of big task objects that have been allocated but not yet freed. - intptr_t task_node_count; -#endif /* __TBB_COUNT_TASK_NODES */ - - //! Sets up the data necessary for the stealing limiting heuristics - void init_stack_info (); - - //! Returns true if stealing is allowed - bool can_steal () { - int anchor; -#if __TBB_ipf - return my_stealing_threshold < (uintptr_t)&anchor && (uintptr_t)__TBB_get_bsp() < my_rsb_stealing_threshold; -#else - return my_stealing_threshold < (uintptr_t)&anchor; -#endif - } - - //! Actions common to enter_arena and try_enter_arena - void do_enter_arena(); - - //! Used by workers to enter the arena - /** Does not lock the task pool in case if arena slot has been successfully grabbed. **/ - void enter_arena(); - -#if !__TBB_ARENA_PER_MASTER - //! Used by masters to try to enter the arena - /** Does not lock the task pool in case if arena slot has been successfully grabbed. **/ - void try_enter_arena(); -#endif /* !__TBB_ARENA_PER_MASTER */ - - //! Leave the arena - void leave_arena(); - - //! Locks victim's task pool, and returns pointer to it. The pointer can be NULL. - task** lock_task_pool( arena_slot* victim_arena_slot ) const; - - //! Unlocks victim's task pool - void unlock_task_pool( arena_slot* victim_arena_slot, task** victim_task_pool ) const; - - - //! Locks the local task pool - void acquire_task_pool() const; - - //! Unlocks the local task pool - void release_task_pool() const; - - //! Checks if t is affinitized to another thread, and if so, bundles it as proxy. - /** Returns either t or proxy containing t. **/ - task* prepare_for_spawning( task* t ); - - //! Get a task from the local pool. - /** Called only by the pool owner. - Returns the pointer to the task or NULL if the pool is empty. - In the latter case compacts the pool. **/ - task* get_task(); - - //! Attempt to get a task from the mailbox. - /** Called only by the thread that owns *this. - Gets a task only if there is one not yet executed by another thread. - If successful, unlinks the task and returns a pointer to it. - Otherwise returns NULL. */ - task* get_mailbox_task(); - - //! True if t is a task_proxy - static bool is_proxy( const task& t ) { - return t.prefix().extra_state==es_task_proxy; - } - - //! Extracts task pointer from task_proxy, and frees the proxy. - /** Return NULL if underlying task was claimed by mailbox. */ - task* strip_proxy( task_proxy* result ); - -#if __TBB_ARENA_PER_MASTER - //! Get a task from the starvation-resistant task stream of the current arena. - /** Returns the pointer to the task, or NULL if the attempt was unsuccessful. - The latter case does not mean that the stream is drained, however. **/ - task* dequeue_task(); - -#endif /* __TBB_ARENA_PER_MASTER */ - //! Steal task from another scheduler's ready pool. - task* steal_task( arena_slot& victim_arena_slot ); - - /** Initial size of the task deque sufficient to serve without reallocation - 4 nested parallel_for calls with iteration space of 65535 grains each. **/ - static const size_t min_task_pool_size = 64; - - //! Allocate task pool containing at least n elements. - task** allocate_task_pool( size_t n ); - - //! Deallocate task pool that was allocated by means of allocate_task_pool. - static void free_task_pool( task** pool ) { - __TBB_ASSERT( pool, "attempt to free NULL TaskPool" ); - NFS_Free( pool ); - } - - //! Grow ready task deque to at least n elements. - void grow_task_pool( size_t n ); - - //! Initialize a scheduler for a master thread. - static generic_scheduler* create_master( arena& a ); - - //! Perform necessary cleanup when a master thread stops using TBB. - void cleanup_master(); - - //! Initialize a scheduler for a worker thread. -#if __TBB_ARENA_PER_MASTER - static generic_scheduler* create_worker( market& m, size_t index ); -#else /* !__TBB_ARENA_PER_MASTER */ - static generic_scheduler* create_worker( arena& a, size_t index ); -#endif /* !__TBB_ARENA_PER_MASTER */ - - //! Perform necessary cleanup when a worker thread finishes. - static void cleanup_worker( void* arg, bool is_worker ); - -protected: - generic_scheduler( arena*, size_t index ); - -#if TBB_USE_ASSERT > 1 - //! Check that internal data structures are in consistent state. - /** Raises __TBB_ASSERT failure if inconsistency is found. */ - void assert_task_pool_valid() const; -#else - void assert_task_pool_valid() const {} -#endif /* TBB_USE_ASSERT <= 1 */ - -public: - /*override*/ - void spawn( task& first, task*& next ); - - /*override*/ - void spawn_root_and_wait( task& first, task*& next ); - -#if __TBB_ARENA_PER_MASTER - /*override*/ - void enqueue( task& task_, void* reserved ); - - void local_enqueue( task& task_ ); -#endif /* __TBB_ARENA_PER_MASTER */ - - void local_spawn( task& first, task*& next ); - void local_spawn_root_and_wait( task& first, task*& next ); - virtual void local_wait_for_all( task& parent, task* child ) = 0; - - //! Destroy and deallocate this scheduler object - void free_scheduler(); - - //! Allocate task object, either from the heap or a free list. - /** Returns uninitialized task object with initialized prefix. */ - task& allocate_task( size_t number_of_bytes, - __TBB_CONTEXT_ARG(task* parent, task_group_context* context) ); - - //! Put task on free list. - /** Does not call destructor. */ - template<free_task_hint h> - void free_task( task& t ); - - void free_task_proxy( task_proxy& tp ) { -#if TBB_USE_ASSERT - poison_pointer( tp.outbox ); - poison_pointer( tp.next_in_mailbox ); - tp.task_and_tag = 0xDEADBEEF; -#endif /* TBB_USE_ASSERT */ - free_task<small_task>(tp); - } - - //! Return task object to the memory allocator. - void deallocate_task( task& t ) { -#if TBB_USE_ASSERT - task_prefix& p = t.prefix(); - p.state = 0xFF; - p.extra_state = 0xFF; - poison_pointer(p.next); -#endif /* TBB_USE_ASSERT */ - NFS_Free((char*)&t-task_prefix_reservation_size); -#if __TBB_COUNT_TASK_NODES - --task_node_count; -#endif /* __TBB_COUNT_TASK_NODES */ - } - - //! True if running on a worker thread, false otherwise. - bool is_worker() { -#if __TBB_ARENA_PER_MASTER - return arena_index != 0; -#else /* !__TBB_ARENA_PER_MASTER */ - return arena_index < my_arena->prefix().number_of_workers; -#endif /* !__TBB_ARENA_PER_MASTER */ - } - -#if __TBB_ARENA_PER_MASTER - //! Returns number of worker threads in the arena this thread belongs to. - unsigned number_of_workers_in_my_arena() { - return my_arena->my_max_num_workers; - } -#endif /* __TBB_ARENA_PER_MASTER */ - -#if __TBB_COUNT_TASK_NODES - intptr_t get_task_node_count( bool count_arena_workers = false ) { - return task_node_count + (count_arena_workers? my_arena->workers_task_node_count(): 0); - } -#endif /* __TBB_COUNT_TASK_NODES */ - - //! Special value used to mark return_list as not taking any more entries. - static task* plugged_return_list() {return (task*)(intptr_t)(-1);} - - //! Number of small tasks that have been allocated by this scheduler. - intptr_t small_task_count; - - //! List of small tasks that have been returned to this scheduler by other schedulers. - task* return_list; - - //! Try getting a task from the mailbox or stealing from another scheduler. - /** Redirects to a customization. */ - virtual task* receive_or_steal_task( reference_count&, bool ) = 0; - - //! Free a small task t that that was allocated by a different scheduler - void free_nonlocal_small_task( task& t ); - -#if __TBB_TASK_GROUP_CONTEXT - //! Padding isolating thread local members from members that can be written to by other threads. - char _padding1[NFS_MaxLineSize - sizeof(context_list_node_t)]; - - //! Head of the thread specific list of task group contexts. - context_list_node_t context_list_head; - - //! Mutex protecting access to the list of task group contexts. - spin_mutex context_list_mutex; - -#if !__TBB_ARENA_PER_MASTER - //! Used to form the list of master thread schedulers. - scheduler_list_node_t my_node; -#endif /* !__TBB_ARENA_PER_MASTER */ - - //! Thread local cancellation epoch. - /** When local epoch equals the global one, the cancellation state known - to this thread is synchronized with the global cancellation state. **/ - uintptr_t local_cancel_count; - - //! Flag indicating that a context is being destructed by its owner thread - /** Together with nonlocal_ctx_list_update constitue a synchronization protocol - that keeps hot path of context destruction (by the owner thread) mostly - lock-free. **/ - uintptr_t local_ctx_list_update; - - //! Detaches abandoned contexts - /** These contexts must be destroyed by other threads. **/ - void cleanup_local_context_list (); - -#if !__TBB_ARENA_PER_MASTER - //! Propagates cancellation request to all descendants of the context. - void propagate_cancellation ( task_group_context& ctx ); -#endif /* !__TBB_ARENA_PER_MASTER */ - - //! Propagates cancellation request to contexts registered by this scheduler. - void propagate_cancellation (); -#endif /* __TBB_TASK_GROUP_CONTEXT */ - -#if _WIN32||_WIN64 -private: - //! Handle returned by RML when registering a master with RML - ::rml::server::execution_resource_t master_exec_resource; - -#if !__TBB_ARENA_PER_MASTER - //! register master with the resource manager - void register_master() { - __TBB_ASSERT( my_arena->prefix().server, "RML server not defined?" ); - // the server may ignore registration and set master_exec_resource to NULL. - my_arena->prefix().server->register_master( master_exec_resource ); - } - - //! unregister master with the resource manager - void unregister_master() const { - my_arena->prefix().server->unregister_master( master_exec_resource ); - } -#endif /* !__TBB_ARENA_PER_MASTER && ( _WIN32||_WIN64 ) */ -#endif /* _WIN32||_WIN64 */ - - //! Dummy slot used when scheduler is not in arena - /** The data structure is heavily padded, therefore it should be placed after - other data fields used by the owner thread only to allow compiler using - instructions with short offsets when accessing the majority of data members. **/ - arena_slot dummy_slot; - -#if __TBB_TASK_GROUP_CONTEXT - //! Flag indicating that a context is being destructed by non-owner thread. - /** See also local_ctx_list_update. **/ - uintptr_t nonlocal_ctx_list_update; -#endif /* __TBB_TASK_GROUP_CONTEXT */ - -#if __TBB_SURVIVE_THREAD_SWITCH - __cilk_tbb_unwatch_thunk my_cilk_unwatch_thunk; -#if TBB_USE_ASSERT - //! State values used to check interface contract with Cilk runtime. - /** Names of cs_running...cs_freed derived from state machine diagram in cilk-tbb-interop.h */ - enum cilk_state_t { - cs_none=0xF000, // Start at nonzero value so that we can detect use of zeroed memory. - cs_running, - cs_limbo, - cs_freed - }; - cilk_state_t my_cilk_state; -#endif /* TBB_USE_ASSERT */ -#endif /* __TBB_SURVIVE_THREAD_SWITCH */ - -#if __TBB_STATISTICS - //! Set of counters to track internal statistics on per thread basis - /** Placed at the end of the class definition to minimize the disturbance of - the core logic memory operations. **/ - mutable statistics_counters my_counters; -#endif /* __TBB_STATISTICS */ - -}; // class generic_scheduler - - -template<free_task_hint h> -void generic_scheduler::free_task( task& t ) { - GATHER_STATISTIC(--my_counters.active_tasks); - task_prefix& p = t.prefix(); - // Verify that optimization hints are correct. - __TBB_ASSERT( h!=small_local_task || p.origin==this, NULL ); - __TBB_ASSERT( !(h&small_task) || p.origin, NULL ); -#if TBB_USE_ASSERT - p.depth = 0xDEADBEEF; - p.ref_count = 0xDEADBEEF; - poison_pointer(p.owner); -#endif /* TBB_USE_ASSERT */ - __TBB_ASSERT( 1L<<t.state() & (1L<<task::executing|1L<<task::allocated), NULL ); - p.state = task::freed; - if( h==small_local_task || p.origin==this ) { - GATHER_STATISTIC(++my_counters.free_list_length); - p.next = free_list; - free_list = &t; - } else if( !(h&local_task) && p.origin ) { - free_nonlocal_small_task(t); - } else { - GATHER_STATISTIC(--my_counters.big_tasks); - deallocate_task(t); - } -} - -} // namespace internal -} // namespace tbb - -#include "governor.h" - -inline void tbb::internal::generic_scheduler::spawn( task& first, task*& next ) { - governor::local_scheduler()->local_spawn( first, next ); -} - -inline void tbb::internal::generic_scheduler::spawn_root_and_wait( task& first, task*& next ) { - governor::local_scheduler()->local_spawn_root_and_wait( first, next ); -} - -#if __TBB_ARENA_PER_MASTER -inline void tbb::internal::generic_scheduler::enqueue( task& task_, void* /*reserved*/ ) { - governor::local_scheduler()->local_enqueue( task_ ); -} - -#endif /* __TBB_ARENA_PER_MASTER */ -#endif /* _TBB_scheduler_H */ diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/scheduler_common.h b/deal.II/bundled/tbb30_104oss/src/tbb/scheduler_common.h deleted file mode 100644 index 37ae26ec5a..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/scheduler_common.h +++ /dev/null @@ -1,192 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef _TBB_scheduler_common_H -#define _TBB_scheduler_common_H - -#include "tbb/tbb_stddef.h" - -#include <string.h> // for memset, memcpy, memmove - -#include "tbb_statistics.h" - -/* Temporarily change "private" to "public" while including "tbb/task.h". - This hack allows us to avoid publishing internal types and methods - in the public header files just for sake of friend declarations. */ -#define private public -#include "tbb/task.h" -#include "tbb/tbb_exception.h" -#undef private - -// This macro is an attempt to get rid of ugly ifdefs in the shared parts of the code. -// It drops the second argument depending on whether the controlling macro is defined. -// The first argument is just a convenience allowing to keep comma before the macro usage. -#if __TBB_TASK_GROUP_CONTEXT - #define __TBB_CONTEXT_ARG(arg1, context) arg1, context -#else /* !__TBB_TASK_GROUP_CONTEXT */ - #define __TBB_CONTEXT_ARG(arg1, context) arg1 -#endif /* !__TBB_TASK_GROUP_CONTEXT */ - -#if DO_TBB_TRACE -#include <cstdio> -#define TBB_TRACE(x) ((void)std::printf x) -#else -#define TBB_TRACE(x) ((void)(0)) -#endif /* DO_TBB_TRACE */ - -#if _MSC_VER && !defined(__INTEL_COMPILER) - // Workaround for overzealous compiler warnings - // These particular warnings are so ubiquitous that no attempt is made to narrow - // the scope of the warnings. - #pragma warning (disable: 4100 4127 4312 4244 4267 4706) -#endif - -namespace tbb { -namespace internal { - -/** Defined in scheduler.cpp **/ -extern uintptr_t global_cancel_count; - -//! Alignment for a task object -const size_t task_alignment = 16; - -//! Number of bytes reserved for a task prefix -/** If not exactly sizeof(task_prefix), the extra bytes *precede* the task_prefix. */ -const size_t task_prefix_reservation_size = ((sizeof(internal::task_prefix)-1)/task_alignment+1)*task_alignment; - -//! Definitions for bits in task_prefix::extra_state -enum task_extra_state { - //! Tag for v1 tasks (i.e. tasks in TBB 1.0 and 2.0) - es_version_1_task = 0, - //! Tag for v3 tasks (i.e. tasks in TBB 2.1-2.2) - es_version_3_task = 1, - //! Tag for v3 task_proxy. - es_task_proxy = 0x20, - //! Set if ref_count might be changed by another thread. Used for debugging. - es_ref_count_active = 0x40, - //! Set if the task has been stolen - es_task_is_stolen = 0x80 -}; - -//! Optimization hint to free_task that enables it omit unnecessary tests and code. -enum free_task_hint { - //! No hint - no_hint=0, - //! Task is known to have been allocated by this scheduler - local_task=1, - //! Task is known to be a small task. - /** Task should be returned to the free list of *some* scheduler, possibly not this scheduler. */ - small_task=2, - //! Bitwise-OR of local_task and small_task. - /** Task should be returned to free list of this scheduler. */ - small_local_task=3 -}; - -//------------------------------------------------------------------------ -// Debugging support -//------------------------------------------------------------------------ - -#if TBB_USE_ASSERT - -static const uintptr_t venom = -#if __TBB_WORDSIZE == 8 - 0xDDEEAADDDEADBEEF; -#else - 0xDEADBEEF; -#endif - - -/** In contrast to poison_pointer() and assert_task_valid() poison_value() is a macro - because the variable used as its argument may be undefined in release builds. **/ -#define poison_value(g) (g = venom) - -/** Expected to be used in assertions only, thus no empty form is defined. **/ -inline bool is_alive( uintptr_t v ) { return v != venom; } - -/** Logically, this method should be a member of class task. - But we do not want to publish it, so it is here instead. */ -inline void assert_task_valid( const task& task ) { - __TBB_ASSERT( &task!=NULL, NULL ); - __TBB_ASSERT( !is_poisoned(&task), NULL ); - __TBB_ASSERT( (uintptr_t)&task % task_alignment == 0, "misaligned task" ); - __TBB_ASSERT( (unsigned)task.state()<=(unsigned)task::recycle, "corrupt task (invalid state)" ); -} - -#else /* !TBB_USE_ASSERT */ - -#define poison_value(g) ((void)0) - -inline void assert_task_valid( const task& ) {} - -#endif /* !TBB_USE_ASSERT */ - -//------------------------------------------------------------------------ -// Helpers -//------------------------------------------------------------------------ - -inline bool ConcurrentWaitsEnabled ( task& t ) { - return (t.prefix().context->my_version_and_traits & task_group_context::concurrent_wait) != 0; -} - -inline bool CancellationInfoPresent ( task& t ) { - return t.prefix().context->my_cancellation_requested != 0; -} - -#if __TBB_TASK_GROUP_CONTEXT -#if TBB_USE_CAPTURED_EXCEPTION - inline tbb_exception* TbbCurrentException( task_group_context*, tbb_exception* src) { return src->move(); } - inline tbb_exception* TbbCurrentException( task_group_context*, captured_exception* src) { return src; } -#else - // Using macro instead of an inline function here allows to avoid evaluation of the - // TbbCapturedException expression when exact propagation is enabled for the context. - #define TbbCurrentException(context, TbbCapturedException) \ - context->my_version_and_traits & task_group_context::exact_exception \ - ? tbb_exception_ptr::allocate() \ - : tbb_exception_ptr::allocate( *(TbbCapturedException) ); -#endif /* !TBB_USE_CAPTURED_EXCEPTION */ - -#define TbbRegisterCurrentException(context, TbbCapturedException) \ - if ( context->cancel_group_execution() ) { \ - /* We are the first to signal cancellation, so store the exception that caused it. */ \ - context->my_exception = TbbCurrentException( context, TbbCapturedException ); \ - } - -#define TbbCatchAll(context) \ - catch ( tbb_exception& exc ) { \ - TbbRegisterCurrentException( context, &exc ); \ - } catch ( std::exception& exc ) { \ - TbbRegisterCurrentException( context, captured_exception::allocate(typeid(exc).name(), exc.what()) ); \ - } catch ( ... ) { \ - TbbRegisterCurrentException( context, captured_exception::allocate("...", "Unidentified exception") );\ - } -#endif /* __TBB_TASK_GROUP_CONTEXT */ - -} // namespace internal -} // namespace tbb - -#endif /* _TBB_scheduler_common_H */ diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/scheduler_utility.h b/deal.II/bundled/tbb30_104oss/src/tbb/scheduler_utility.h deleted file mode 100644 index a85aac1b79..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/scheduler_utility.h +++ /dev/null @@ -1,141 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef _TBB_scheduler_utility_H -#define _TBB_scheduler_utility_H - -#include "scheduler.h" - -namespace tbb { -namespace internal { - -//------------------------------------------------------------------------ -// auto_empty_task -//------------------------------------------------------------------------ - -//! Smart holder for the empty task class with automatic destruction -class auto_empty_task { - task* my_task; - generic_scheduler* my_scheduler; -public: - auto_empty_task ( __TBB_CONTEXT_ARG(generic_scheduler *s, task_group_context* context) ) - : my_task( new(&s->allocate_task(sizeof(empty_task), __TBB_CONTEXT_ARG(NULL, context))) empty_task ) - , my_scheduler(s) - {} - // empty_task has trivial destructor, so there's no need to call it. - ~auto_empty_task () { my_scheduler->free_task<small_local_task>(*my_task); } - - operator task& () { return *my_task; } - task* operator & () { return my_task; } - task_prefix& prefix () { return my_task->prefix(); } -}; // class auto_empty_task - -//------------------------------------------------------------------------ -// fast_reverse_vector -//------------------------------------------------------------------------ - -//! Vector that grows without reallocations, and stores items in the reverse order. -/** Requires to initialize its first segment with a preallocated memory chunk - (usually it is static array or an array allocated on the stack). - The second template parameter specifies maximal number of segments. Each next - segment is twice as large as the previous one. **/ -template<typename T, size_t max_segments = 16> -class fast_reverse_vector -{ -public: - fast_reverse_vector ( T* initial_segment, size_t segment_size ) - : m_cur_segment(initial_segment) - , m_cur_segment_size(segment_size) - , m_pos(segment_size) - , m_num_segments(0) - , m_size(0) - { - __TBB_ASSERT ( initial_segment && segment_size, "Nonempty initial segment must be supplied"); - } - - ~fast_reverse_vector () - { - for ( size_t i = 1; i < m_num_segments; ++i ) - NFS_Free( m_segments[i] ); - } - - size_t size () const { return m_size + m_cur_segment_size - m_pos; } - - void push_back ( const T& val ) - { - if ( !m_pos ) { - m_segments[m_num_segments++] = m_cur_segment; - __TBB_ASSERT ( m_num_segments < max_segments, "Maximal capacity exceeded" ); - m_size += m_cur_segment_size; - m_cur_segment_size *= 2; - m_pos = m_cur_segment_size; - m_cur_segment = (T*)NFS_Allocate( m_cur_segment_size * sizeof(T), 1, NULL ); - } - m_cur_segment[--m_pos] = val; - } - - //! Copies the contents of the vector into the dst array. - /** Can only be used when T is a POD type, as copying does not invoke copy constructors. **/ - void copy_memory ( T* dst ) const - { - size_t size = m_cur_segment_size - m_pos; - memcpy( dst, m_cur_segment + m_pos, size * sizeof(T) ); - dst += size; - size = m_cur_segment_size / 2; - for ( long i = (long)m_num_segments - 1; i >= 0; --i ) { - memcpy( dst, m_segments[i], size * sizeof(T) ); - dst += size; - size /= 2; - } - } - -protected: - //! The current (not completely filled) segment - T *m_cur_segment; - - //! Capacity of m_cur_segment - size_t m_cur_segment_size; - - //! Insertion position in m_cur_segment - size_t m_pos; - - //! Array of filled segments (has fixed size specified by the second template parameter) - T *m_segments[max_segments]; - - //! Number of filled segments (the size of m_segments) - size_t m_num_segments; - - //! Number of items in the segments in m_segments - size_t m_size; - -}; // class fast_reverse_vector - -} // namespace internal -} // namespace tbb - -#endif /* _TBB_scheduler_utility_H */ diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/semaphore.h b/deal.II/bundled/tbb30_104oss/src/tbb/semaphore.h deleted file mode 100644 index 6436745a85..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/semaphore.h +++ /dev/null @@ -1,132 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_tbb_semaphore_H -#define __TBB_tbb_semaphore_H - -#include "tbb/tbb_stddef.h" - -#if _WIN32||_WIN64 -#include "tbb/machine/windows_api.h" - -#elif __APPLE__ -#include <mach/semaphore.h> -#include <mach/task.h> -#include <mach/mach_init.h> -#include <mach/error.h> - -#else -#include <semaphore.h> -#ifdef TBB_USE_DEBUG -#include <errno.h> -#endif -#endif /*_WIN32||_WIN64*/ - -namespace tbb { -namespace internal { - - -#if _WIN32||_WIN64 -typedef LONG sem_count_t; -//! Edsger Dijkstra's counting semaphore -class semaphore : no_copy { - static const int max_semaphore_cnt = MAXLONG; -public: - //! ctor - semaphore(size_t start_cnt_ = 0) {init_semaphore(start_cnt_);} - //! dtor - ~semaphore() {CloseHandle( sem );} - //! wait/acquire - void P() {WaitForSingleObject( sem, INFINITE );} - //! post/release - void V() {ReleaseSemaphore( sem, 1, NULL );} -private: - HANDLE sem; - void init_semaphore(size_t start_cnt_) {sem = CreateSemaphore( NULL, LONG(start_cnt_), max_semaphore_cnt, NULL );} -}; -#elif __APPLE__ -//! Edsger Dijkstra's counting semaphore -class semaphore : no_copy { -public: - //! ctor - semaphore(int start_cnt_ = 0) : sem(start_cnt_) { init_semaphore(start_cnt_); } - //! dtor - ~semaphore() { - kern_return_t ret = semaphore_destroy( mach_task_self(), sem ); - __TBB_ASSERT_EX( ret==err_none, NULL ); - } - //! wait/acquire - void P() { - int ret; - do { - ret = semaphore_wait( sem ); - } while( ret==KERN_ABORTED ); - __TBB_ASSERT( ret==KERN_SUCCESS, "semaphore_wait() failed" ); - } - //! post/release - void V() { semaphore_signal( sem ); } -private: - semaphore_t sem; - void init_semaphore(int start_cnt_) { - kern_return_t ret = semaphore_create( mach_task_self(), &sem, SYNC_POLICY_FIFO, start_cnt_ ); - __TBB_ASSERT_EX( ret==err_none, "failed to create a semaphore" ); - } -}; -#else /* Linux/Unix */ -typedef uint32_t sem_count_t; -//! Edsger Dijkstra's counting semaphore -class semaphore : no_copy { -public: - //! ctor - semaphore(int start_cnt_ = 0 ) { init_semaphore( start_cnt_ ); } - - //! dtor - ~semaphore() { - int ret = sem_destroy( &sem ); - __TBB_ASSERT_EX( !ret, NULL ); - } - //! wait/acquire - void P() { - while( sem_wait( &sem )!=0 ) - __TBB_ASSERT( errno==EINTR, NULL ); - } - //! post/release - void V() { sem_post( &sem ); } -private: - sem_t sem; - void init_semaphore(int start_cnt_) { - int ret = sem_init( &sem, /*shared among threads*/ 0, start_cnt_ ); - __TBB_ASSERT_EX( !ret, NULL ); - } -}; -#endif /* _WIN32||_WIN64 */ - -} // namespace internal -} // namespace tbb - -#endif /* __TBB_tbb_semaphore_H */ diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/spin_mutex.cpp b/deal.II/bundled/tbb30_104oss/src/tbb/spin_mutex.cpp deleted file mode 100644 index d5b11af185..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/spin_mutex.cpp +++ /dev/null @@ -1,68 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#include "tbb/tbb_machine.h" -#include "tbb/spin_mutex.h" -#include "itt_notify.h" -#include "tbb_misc.h" - -namespace tbb { - -void spin_mutex::scoped_lock::internal_acquire( spin_mutex& m ) { - __TBB_ASSERT( !my_mutex, "already holding a lock on a spin_mutex" ); - ITT_NOTIFY(sync_prepare, &m); - my_unlock_value = __TBB_LockByte(m.flag); - my_mutex = &m; - ITT_NOTIFY(sync_acquired, &m); -} - -void spin_mutex::scoped_lock::internal_release() { - __TBB_ASSERT( my_mutex, "release on spin_mutex::scoped_lock that is not holding a lock" ); - __TBB_ASSERT( !(my_unlock_value&1), "corrupted scoped_lock?" ); - - ITT_NOTIFY(sync_releasing, my_mutex); - __TBB_store_with_release(my_mutex->flag, static_cast<unsigned char>(my_unlock_value)); - my_mutex = NULL; -} - -bool spin_mutex::scoped_lock::internal_try_acquire( spin_mutex& m ) { - __TBB_ASSERT( !my_mutex, "already holding a lock on a spin_mutex" ); - bool result = bool( __TBB_TryLockByte(m.flag) ); - if( result ) { - my_unlock_value = 0; - my_mutex = &m; - ITT_NOTIFY(sync_acquired, &m); - } - return result; -} - -void spin_mutex::internal_construct() { - ITT_SYNC_CREATE(this, _T("tbb::spin_mutex"), _T("")); -} - -} // namespace tbb diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/spin_rw_mutex.cpp b/deal.II/bundled/tbb30_104oss/src/tbb/spin_rw_mutex.cpp deleted file mode 100644 index f4f09da528..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/spin_rw_mutex.cpp +++ /dev/null @@ -1,174 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#include "tbb/spin_rw_mutex.h" -#include "tbb/tbb_machine.h" -#include "itt_notify.h" - -#if defined(_MSC_VER) && defined(_Wp64) - // Workaround for overzealous compiler warnings in /Wp64 mode - #pragma warning (disable: 4244) -#endif - -namespace tbb { - -template<typename T> // a template can work with private spin_rw_mutex::state_t -static inline T CAS(volatile T &addr, T newv, T oldv) { - // ICC (9.1 and 10.1 tried) unable to do implicit conversion - // from "volatile T*" to "volatile void*", so explicit cast added. - return T(__TBB_CompareAndSwapW((volatile void *)&addr, (intptr_t)newv, (intptr_t)oldv)); -} - -//! Acquire write lock on the given mutex. -bool spin_rw_mutex_v3::internal_acquire_writer() -{ - ITT_NOTIFY(sync_prepare, this); - internal::atomic_backoff backoff; - for(;;) { - state_t s = const_cast<volatile state_t&>(state); // ensure reloading - if( !(s & BUSY) ) { // no readers, no writers - if( CAS(state, WRITER, s)==s ) - break; // successfully stored writer flag - backoff.reset(); // we could be very close to complete op. - } else if( !(s & WRITER_PENDING) ) { // no pending writers - __TBB_AtomicOR(&state, WRITER_PENDING); - } - backoff.pause(); - } - ITT_NOTIFY(sync_acquired, this); - return false; -} - -//! Release writer lock on the given mutex -void spin_rw_mutex_v3::internal_release_writer() -{ - ITT_NOTIFY(sync_releasing, this); - __TBB_AtomicAND( &state, READERS ); -} - -//! Acquire read lock on given mutex. -void spin_rw_mutex_v3::internal_acquire_reader() -{ - ITT_NOTIFY(sync_prepare, this); - internal::atomic_backoff backoff; - for(;;) { - state_t s = const_cast<volatile state_t&>(state); // ensure reloading - if( !(s & (WRITER|WRITER_PENDING)) ) { // no writer or write requests - state_t t = (state_t)__TBB_FetchAndAddW( &state, (intptr_t) ONE_READER ); - if( !( t&WRITER )) - break; // successfully stored increased number of readers - // writer got there first, undo the increment - __TBB_FetchAndAddW( &state, -(intptr_t)ONE_READER ); - } - backoff.pause(); - } - - ITT_NOTIFY(sync_acquired, this); - __TBB_ASSERT( state & READERS, "invalid state of a read lock: no readers" ); -} - -//! Upgrade reader to become a writer. -/** Returns true if the upgrade happened without re-acquiring the lock and false if opposite */ -bool spin_rw_mutex_v3::internal_upgrade() -{ - state_t s = state; - __TBB_ASSERT( s & READERS, "invalid state before upgrade: no readers " ); - // check and set writer-pending flag - // required conditions: either no pending writers, or we are the only reader - // (with multiple readers and pending writer, another upgrade could have been requested) - while( (s & READERS)==ONE_READER || !(s & WRITER_PENDING) ) { - state_t old_s = s; - if( (s=CAS(state, s | WRITER | WRITER_PENDING, s))==old_s ) { - internal::atomic_backoff backoff; - ITT_NOTIFY(sync_prepare, this); - // the state should be 0...0111, i.e. 1 reader and waiting writer; - // both new readers and writers are blocked - while( (state & READERS) != ONE_READER ) // more than 1 reader - backoff.pause(); - __TBB_ASSERT((state&(WRITER_PENDING|WRITER))==(WRITER_PENDING|WRITER),"invalid state when upgrading to writer"); - - __TBB_FetchAndAddW( &state, - (intptr_t)(ONE_READER+WRITER_PENDING)); - ITT_NOTIFY(sync_acquired, this); - return true; // successfully upgraded - } - } - // slow reacquire - internal_release_reader(); - return internal_acquire_writer(); // always returns false -} - -//! Downgrade writer to a reader -void spin_rw_mutex_v3::internal_downgrade() { - ITT_NOTIFY(sync_releasing, this); - __TBB_FetchAndAddW( &state, (intptr_t)(ONE_READER-WRITER)); - __TBB_ASSERT( state & READERS, "invalid state after downgrade: no readers" ); -} - -//! Release read lock on the given mutex -void spin_rw_mutex_v3::internal_release_reader() -{ - __TBB_ASSERT( state & READERS, "invalid state of a read lock: no readers" ); - ITT_NOTIFY(sync_releasing, this); // release reader - __TBB_FetchAndAddWrelease( &state,-(intptr_t)ONE_READER); -} - -//! Try to acquire write lock on the given mutex -bool spin_rw_mutex_v3::internal_try_acquire_writer() -{ - // for a writer: only possible to acquire if no active readers or writers - state_t s = state; - if( !(s & BUSY) ) // no readers, no writers; mask is 1..1101 - if( CAS(state, WRITER, s)==s ) { - ITT_NOTIFY(sync_acquired, this); - return true; // successfully stored writer flag - } - return false; -} - -//! Try to acquire read lock on the given mutex -bool spin_rw_mutex_v3::internal_try_acquire_reader() -{ - // for a reader: acquire if no active or waiting writers - state_t s = state; - if( !(s & (WRITER|WRITER_PENDING)) ) { // no writers - state_t t = (state_t)__TBB_FetchAndAddW( &state, (intptr_t) ONE_READER ); - if( !( t&WRITER )) { // got the lock - ITT_NOTIFY(sync_acquired, this); - return true; // successfully stored increased number of readers - } - // writer got there first, undo the increment - __TBB_FetchAndAddW( &state, -(intptr_t)ONE_READER ); - } - return false; -} - - -void spin_rw_mutex_v3::internal_construct() { - ITT_SYNC_CREATE(this, _T("tbb::spin_rw_mutex"), _T("")); -} -} // namespace tbb diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/task.cpp b/deal.II/bundled/tbb30_104oss/src/tbb/task.cpp deleted file mode 100644 index 72bce80565..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/task.cpp +++ /dev/null @@ -1,278 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#include <new> - -// Do not include task.h directly. Use scheduler_common.h instead -#include "scheduler_common.h" -#include "governor.h" -#include "scheduler.h" -#include "itt_notify.h" - -#include "tbb/cache_aligned_allocator.h" -#include "tbb/partitioner.h" - -namespace tbb { - -using namespace std; - -namespace internal { - -//------------------------------------------------------------------------ -// Methods of allocate_root_proxy -//------------------------------------------------------------------------ -task& allocate_root_proxy::allocate( size_t size ) { - internal::generic_scheduler* v = governor::local_scheduler(); - __TBB_ASSERT( v, "thread did not activate a task_scheduler_init object?" ); -#if __TBB_TASK_GROUP_CONTEXT - task_prefix& p = v->innermost_running_task->prefix(); - - ITT_STACK_CREATE(p.context->itt_caller); -#endif - // New root task becomes part of the currently running task's cancellation context - return v->allocate_task( size, __TBB_CONTEXT_ARG(NULL, p.context) ); -} - -void allocate_root_proxy::free( task& task ) { - internal::generic_scheduler* v = governor::local_scheduler(); - __TBB_ASSERT( v, "thread does not have initialized task_scheduler_init object?" ); -#if __TBB_TASK_GROUP_CONTEXT - // No need to do anything here as long as there is no context -> task connection -#endif /* __TBB_TASK_GROUP_CONTEXT */ - v->free_task<local_task>( task ); -} - -#if __TBB_TASK_GROUP_CONTEXT -//------------------------------------------------------------------------ -// Methods of allocate_root_with_context_proxy -//------------------------------------------------------------------------ -task& allocate_root_with_context_proxy::allocate( size_t size ) const { - internal::generic_scheduler* v = governor::local_scheduler(); - __TBB_ASSERT( v, "thread did not activate a task_scheduler_init object?" ); - task_prefix& p = v->innermost_running_task->prefix(); - task& t = v->allocate_task( size, __TBB_CONTEXT_ARG(NULL, &my_context) ); - // Supported usage model prohibits concurrent initial binding. Thus we do not - // need interlocked operations or fences to manipulate with my_context.my_kind - if ( my_context.my_kind == task_group_context::binding_required ) { - __TBB_ASSERT ( my_context.my_owner, "Context without owner" ); - __TBB_ASSERT ( !my_context.my_parent, "Parent context set before initial binding" ); - // If we are in the outermost task dispatch loop of a master thread, then - // there is nothing to bind this context to, and we skip the binding part. - if ( v->innermost_running_task != v->dummy_task ) { - // Though the following assignment makes my_context accessible for - // cancelation propagation, we cannot rely on the cancellation being - // propagated into it without taking a global lock. Instead we always - // check the state of my_context's ancestors, and use cancelation - // epoch counters to minimize the depth of inspection. - my_context.my_parent = p.context; - uintptr_t local_count_snapshot = v->local_cancel_count; - // Prevent load of global_cancel_count from being hoisted above store - // to my_context.my_parent and load of local_cancel_count. - __TBB_full_memory_fence(); - // The full fence guarantees that if no cancelation propagation was - // detected by the following condition, either my_context's parent - // has correct cancelation state or my_context will receive cancelation - // signal if new cancelation starts after - if ( local_count_snapshot != global_cancel_count ) { - // Another thread is propagating cancellation right now. Make sure - // that my_context's parent gets the cancellation request (if one - // of its ancestors is canceled) before we read it later on. - p.context->propagate_cancellation_from_ancestors(); - } - if ( p.context->my_cancellation_requested ) { - // Propagate cancellation state from the parent context - my_context.my_cancellation_requested = 1; - } - } - my_context.my_kind = task_group_context::binding_completed; - } - // else the context either has already been associated with its parent or is isolated - ITT_STACK_CREATE(my_context.itt_caller); - return t; -} - -void allocate_root_with_context_proxy::free( task& task ) const { - internal::generic_scheduler* v = governor::local_scheduler(); - __TBB_ASSERT( v, "thread does not have initialized task_scheduler_init object?" ); - // No need to do anything here as long as unbinding is performed by context destructor only. - v->free_task<local_task>( task ); -} -#endif /* __TBB_TASK_GROUP_CONTEXT */ - -//------------------------------------------------------------------------ -// Methods of allocate_continuation_proxy -//------------------------------------------------------------------------ -task& allocate_continuation_proxy::allocate( size_t size ) const { - task& t = *((task*)this); - assert_task_valid(t); - generic_scheduler* s = governor::local_scheduler(); - task* parent = t.parent(); - t.prefix().parent = NULL; - return s->allocate_task( size, __TBB_CONTEXT_ARG(parent, t.prefix().context) ); -} - -void allocate_continuation_proxy::free( task& mytask ) const { - // Restore the parent as it was before the corresponding allocate was called. - ((task*)this)->prefix().parent = mytask.parent(); - governor::local_scheduler()->free_task<local_task>(mytask); -} - -//------------------------------------------------------------------------ -// Methods of allocate_child_proxy -//------------------------------------------------------------------------ -task& allocate_child_proxy::allocate( size_t size ) const { - task& t = *((task*)this); - assert_task_valid(t); - generic_scheduler* s = governor::local_scheduler(); - return s->allocate_task( size, __TBB_CONTEXT_ARG(&t, t.prefix().context) ); -} - -void allocate_child_proxy::free( task& mytask ) const { - governor::local_scheduler()->free_task<local_task>(mytask); -} - -//------------------------------------------------------------------------ -// Methods of allocate_additional_child_of_proxy -//------------------------------------------------------------------------ -task& allocate_additional_child_of_proxy::allocate( size_t size ) const { - parent.increment_ref_count(); - generic_scheduler* s = governor::local_scheduler(); - return s->allocate_task( size, __TBB_CONTEXT_ARG(&parent, parent.prefix().context) ); -} - -void allocate_additional_child_of_proxy::free( task& task ) const { - // Undo the increment. We do not check the result of the fetch-and-decrement. - // We could consider be spawning the task if the fetch-and-decrement returns 1. - // But we do not know that was the programmer's intention. - // Furthermore, if it was the programmer's intention, the program has a fundamental - // race condition (that we warn about in Reference manual), because the - // reference count might have become zero before the corresponding call to - // allocate_additional_child_of_proxy::allocate. - parent.internal_decrement_ref_count(); - governor::local_scheduler()->free_task<local_task>(task); -} - -//------------------------------------------------------------------------ -// Support for auto_partitioner -//------------------------------------------------------------------------ -size_t get_initial_auto_partitioner_divisor() { - const size_t X_FACTOR = 4; - return X_FACTOR * (governor::max_number_of_workers()+1); -} - -//------------------------------------------------------------------------ -// Methods of affinity_partitioner_base_v3 -//------------------------------------------------------------------------ -void affinity_partitioner_base_v3::resize( unsigned factor ) { - // Check factor to avoid asking for number of workers while there might be no arena. - size_t new_size = factor ? factor*(governor::max_number_of_workers()+1) : 0; - if( new_size!=my_size ) { - if( my_array ) { - NFS_Free( my_array ); - // Following two assignments must be done here for sake of exception safety. - my_array = NULL; - my_size = 0; - } - if( new_size ) { - my_array = static_cast<affinity_id*>(NFS_Allocate(new_size,sizeof(affinity_id), NULL )); - memset( my_array, 0, sizeof(affinity_id)*new_size ); - my_size = new_size; - } - } -} - -} // namespace internal - -using namespace tbb::internal; - -//------------------------------------------------------------------------ -// task -//------------------------------------------------------------------------ - -void task::internal_set_ref_count( int count ) { - __TBB_ASSERT( count>=0, "count must not be negative" ); - __TBB_ASSERT( !(prefix().extra_state & es_ref_count_active), "ref_count race detected" ); - ITT_NOTIFY(sync_releasing, &prefix().ref_count); - prefix().ref_count = count; -} - -internal::reference_count task::internal_decrement_ref_count() { - ITT_NOTIFY( sync_releasing, &prefix().ref_count ); - internal::reference_count k = __TBB_FetchAndDecrementWrelease( &prefix().ref_count ); - __TBB_ASSERT( k>=1, "task's reference count underflowed" ); - if( k==1 ) - ITT_NOTIFY( sync_acquired, &prefix().ref_count ); - return k-1; -} - -task& task::self() { - generic_scheduler *v = governor::local_scheduler(); - v->assert_task_pool_valid(); - __TBB_ASSERT( v->innermost_running_task, NULL ); - return *v->innermost_running_task; -} - -bool task::is_owned_by_current_thread() const { - return true; -} - -void interface5::internal::task_base::destroy( task& victim ) { - // 1 may be a guard reference for wait_for_all, which was not reset because - // of concurrent_wait mode or because prepared root task was not actually used - // for spawning tasks (as in structured_task_group). - __TBB_ASSERT( (intptr_t)victim.prefix().ref_count <= 1, "Task being destroyed must not have children" ); - __TBB_ASSERT( victim.state()==task::allocated, "illegal state for victim task" ); - task* parent = victim.parent(); - victim.~task(); - if( parent ) { - __TBB_ASSERT( parent->state()==task::allocated, "attempt to destroy child of running or corrupted parent?" ); - parent->internal_decrement_ref_count(); - // Despite last reference to *parent removed, it should not be destroyed (documented behavior). - } - governor::local_scheduler()->free_task<no_hint>( victim ); -} - -void task::spawn_and_wait_for_all( task_list& list ) { - generic_scheduler* s = governor::local_scheduler(); - task* t = list.first; - if( t ) { - if( &t->prefix().next!=list.next_ptr ) - s->local_spawn( *t->prefix().next, *list.next_ptr ); - list.clear(); - } - s->local_wait_for_all( *this, t ); -} - -/** Defined out of line so that compiler does not replicate task's vtable. - It's pointless to define it inline anyway, because all call sites to it are virtual calls - that the compiler is unlikely to optimize. */ -void task::note_affinity( affinity_id ) { -} - -} // namespace tbb - diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/task_group_context.cpp b/deal.II/bundled/tbb30_104oss/src/tbb/task_group_context.cpp deleted file mode 100644 index 595dfb2d7c..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/task_group_context.cpp +++ /dev/null @@ -1,279 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#include "scheduler.h" - -#include "tbb/task.h" -#include "tbb/tbb_exception.h" -#include "tbb/cache_aligned_allocator.h" -#include "itt_notify.h" - -namespace tbb { - -#if __TBB_TASK_GROUP_CONTEXT - -using namespace internal; - -//------------------------------------------------------------------------ -// captured_exception -//------------------------------------------------------------------------ - -inline char* duplicate_string ( const char* src ) { - char* dst = NULL; - if ( src ) { - size_t len = strlen(src) + 1; - dst = (char*)allocate_via_handler_v3(len); - strncpy (dst, src, len); - } - return dst; -} - -void captured_exception::set ( const char* name, const char* info ) throw() { - my_exception_name = duplicate_string( name ); - my_exception_info = duplicate_string( info ); -} - -void captured_exception::clear () throw() { - deallocate_via_handler_v3 (const_cast<char*>(my_exception_name)); - deallocate_via_handler_v3 (const_cast<char*>(my_exception_info)); -} - -captured_exception* captured_exception::move () throw() { - captured_exception *e = (captured_exception*)allocate_via_handler_v3(sizeof(captured_exception)); - if ( e ) { - ::new (e) captured_exception(); - e->my_exception_name = my_exception_name; - e->my_exception_info = my_exception_info; - e->my_dynamic = true; - my_exception_name = my_exception_info = NULL; - } - return e; -} - -void captured_exception::destroy () throw() { - __TBB_ASSERT ( my_dynamic, "Method destroy can be used only on objects created by clone or allocate" ); - if ( my_dynamic ) { - this->captured_exception::~captured_exception(); - deallocate_via_handler_v3 (this); - } -} - -captured_exception* captured_exception::allocate ( const char* name, const char* info ) { - captured_exception *e = (captured_exception*)allocate_via_handler_v3( sizeof(captured_exception) ); - if ( e ) { - ::new (e) captured_exception(name, info); - e->my_dynamic = true; - } - return e; -} - -const char* captured_exception::name() const throw() { - return my_exception_name; -} - -const char* captured_exception::what() const throw() { - return my_exception_info; -} - - -//------------------------------------------------------------------------ -// tbb_exception_ptr -//------------------------------------------------------------------------ - -#if !TBB_USE_CAPTURED_EXCEPTION - -namespace internal { - -template<typename T> -tbb_exception_ptr* AllocateExceptionContainer( const T& src ) { - tbb_exception_ptr *eptr = (tbb_exception_ptr*)allocate_via_handler_v3( sizeof(tbb_exception_ptr) ); - if ( eptr ) - new (eptr) tbb_exception_ptr(src); - return eptr; -} - -tbb_exception_ptr* tbb_exception_ptr::allocate () { - return AllocateExceptionContainer( std::current_exception() ); -} - -tbb_exception_ptr* tbb_exception_ptr::allocate ( const tbb_exception& ) { - return AllocateExceptionContainer( std::current_exception() ); -} - -tbb_exception_ptr* tbb_exception_ptr::allocate ( captured_exception& src ) { - tbb_exception_ptr *res = AllocateExceptionContainer( src ); - src.destroy(); - return res; -} - -void tbb_exception_ptr::destroy () throw() { - this->tbb_exception_ptr::~tbb_exception_ptr(); - deallocate_via_handler_v3 (this); -} - -} // namespace internal -#endif /* !TBB_USE_CAPTURED_EXCEPTION */ - - -//------------------------------------------------------------------------ -// task_group_context -//------------------------------------------------------------------------ - -task_group_context::~task_group_context () { - if ( my_kind != isolated ) { - generic_scheduler *s = (generic_scheduler*)my_owner; - if ( governor::is_set(s) ) { - // Local update of the context list - uintptr_t local_count_snapshot = s->local_cancel_count; - s->local_ctx_list_update = 1; - __TBB_full_memory_fence(); - if ( s->nonlocal_ctx_list_update ) { - spin_mutex::scoped_lock lock(s->context_list_mutex); - my_node.my_prev->my_next = my_node.my_next; - my_node.my_next->my_prev = my_node.my_prev; - s->local_ctx_list_update = 0; - } - else { - my_node.my_prev->my_next = my_node.my_next; - my_node.my_next->my_prev = my_node.my_prev; - __TBB_store_with_release( s->local_ctx_list_update, 0 ); - if ( local_count_snapshot != global_cancel_count ) { - // Another thread was propagating cancellation request when we removed - // ourselves from the list. We must ensure that it is not accessing us - // when this destructor finishes. We'll be able to acquire the lock - // below only after the other thread finishes with us. - spin_mutex::scoped_lock lock(s->context_list_mutex); - } - } - } - else { - // Nonlocal update of the context list - if ( __TBB_FetchAndStoreW(&my_kind, dying) == detached ) { - my_node.my_prev->my_next = my_node.my_next; - my_node.my_next->my_prev = my_node.my_prev; - } - else { - __TBB_FetchAndAddW(&s->nonlocal_ctx_list_update, 1); - spin_wait_until_eq( s->local_ctx_list_update, 0u ); - s->context_list_mutex.lock(); - my_node.my_prev->my_next = my_node.my_next; - my_node.my_next->my_prev = my_node.my_prev; - s->context_list_mutex.unlock(); - __TBB_FetchAndAddW(&s->nonlocal_ctx_list_update, -1); - } - } - } -#if TBB_USE_DEBUG - my_version_and_traits = 0xDeadBeef; -#endif /* TBB_USE_DEBUG */ - if ( my_exception ) - my_exception->destroy(); - if (itt_caller != ITT_CALLER_NULL) ITT_STACK(caller_destroy, itt_caller); -} - -void task_group_context::init () { - __TBB_ASSERT ( sizeof(uintptr_t) < 32, "Layout of my_version_and_traits must be reconsidered on this platform" ); - __TBB_ASSERT ( sizeof(task_group_context) == 2 * NFS_MaxLineSize, "Context class has wrong size - check padding and members alignment" ); - __TBB_ASSERT ( (uintptr_t(this) & (sizeof(my_cancellation_requested) - 1)) == 0, "Context is improperly aligned" ); - __TBB_ASSERT ( my_kind == isolated || my_kind == bound, "Context can be created only as isolated or bound" ); - my_parent = NULL; - my_cancellation_requested = 0; - my_exception = NULL; - itt_caller = ITT_CALLER_NULL; - if ( my_kind == bound ) { - generic_scheduler *s = governor::local_scheduler(); - my_owner = s; - __TBB_ASSERT ( my_owner, "Thread has not activated a task_scheduler_init object?" ); - // Backward links are used by this thread only, thus no fences are necessary - my_node.my_prev = &s->context_list_head; - s->context_list_head.my_next->my_prev = &my_node; - my_node.my_next = s->context_list_head.my_next; - // Thread local list of contexts allows concurrent traversal by another - // thread while propagating cancellation request. Release fence ensures - // visibility of my_node's members in the traversing thread. - __TBB_store_with_release(s->context_list_head.my_next, &my_node); - } -} - -bool task_group_context::cancel_group_execution () { - __TBB_ASSERT ( my_cancellation_requested == 0 || my_cancellation_requested == 1, "Invalid cancellation state"); - if ( my_cancellation_requested || __TBB_CompareAndSwapW(&my_cancellation_requested, 1, 0) ) { - // This task group has already been canceled - return false; - } -#if __TBB_ARENA_PER_MASTER - governor::local_scheduler()->my_arena->propagate_cancellation( *this ); -#else /* !__TBB_ARENA_PER_MASTER */ - governor::local_scheduler()->propagate_cancellation( *this ); -#endif /* !__TBB_ARENA_PER_MASTER */ - return true; -} - -bool task_group_context::is_group_execution_cancelled () const { - return my_cancellation_requested != 0; -} - -// IMPORTANT: It is assumed that this method is not used concurrently! -void task_group_context::reset () { - //! \todo Add assertion that this context does not have children - // No fences are necessary since this context can be accessed from another thread - // only after stealing happened (which means necessary fences were used). - if ( my_exception ) { - my_exception->destroy(); - my_exception = NULL; - } - my_cancellation_requested = 0; -} - -void task_group_context::propagate_cancellation_from_ancestors () { - task_group_context *ancestor = my_parent; - while ( ancestor && !ancestor->my_cancellation_requested ) - ancestor = ancestor->my_parent; - if ( ancestor ) { - // One of my ancestor groups was canceled. Cancel all its descendants in my heritage line. - task_group_context *ctx = this; - do { - ctx->my_cancellation_requested = 1; - ctx = ctx->my_parent; - } while ( ctx != ancestor ); - } -} - -void task_group_context::register_pending_exception () { - if ( my_cancellation_requested ) - return; -#if TBB_USE_EXCEPTIONS - try { - throw; - } TbbCatchAll( this ); -#endif /* TBB_USE_EXCEPTIONS */ -} - -#endif /* __TBB_TASK_GROUP_CONTEXT */ - -} // namespace tbb diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/task_stream.h b/deal.II/bundled/tbb30_104oss/src/tbb/task_stream.h deleted file mode 100644 index 8ba48659c6..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/task_stream.h +++ /dev/null @@ -1,170 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef _TBB_task_stream_H -#define _TBB_task_stream_H - -#include "tbb/tbb_stddef.h" - -#if __TBB_ARENA_PER_MASTER - -#include <deque> -#include <climits> -#include "tbb/atomic.h" // for __TBB_Atomic* -#include "tbb/spin_mutex.h" -#include "tbb/tbb_allocator.h" -#include "scheduler_common.h" -#include "tbb_misc.h" // for FastRandom - -namespace tbb { -namespace internal { - -//! Essentially, this is just a pair of a queue and a mutex to protect the queue. -/** The reason std::pair is not used is that the code would look less clean - if field names were replaced with 'first' and 'second'. **/ -template< typename T, typename mutex_t > -struct queue_and_mutex { - typedef std::deque< T, tbb_allocator<T> > queue_base_t; - - queue_base_t my_queue; - mutex_t my_mutex; - - queue_and_mutex () : my_queue(), my_mutex() {} - ~queue_and_mutex () {} -}; - -const uintptr_t one = 1; - -inline void set_one_bit( uintptr_t& dest, int pos ) { - __TBB_ASSERT( pos>=0, NULL ); - __TBB_ASSERT( pos<32, NULL ); - __TBB_AtomicOR( &dest, one<<pos ); -} - -inline void clear_one_bit( uintptr_t& dest, int pos ) { - __TBB_ASSERT( pos>=0, NULL ); - __TBB_ASSERT( pos<32, NULL ); - __TBB_AtomicAND( &dest, ~(one<<pos) ); -} - -inline bool is_bit_set( uintptr_t val, int pos ) { - __TBB_ASSERT( pos>=0, NULL ); - __TBB_ASSERT( pos<32, NULL ); - return (val & (one<<pos)) != 0; -} - -//! The container for "fairness-oriented" aka "enqueued" tasks. -class task_stream { - typedef queue_and_mutex <task*, spin_mutex> lane_t; - unsigned N; - uintptr_t population; - FastRandom random; - padded<lane_t>* lanes; - -public: - task_stream() : N(), population(), random(unsigned(&N-(unsigned*)NULL)), lanes() - { - __TBB_ASSERT( sizeof(population) * CHAR_BIT >= 32, NULL ); - } - - void initialize( unsigned n_lanes ) { - N = n_lanes>=32 ? 32 : n_lanes>2 ? 1<<(__TBB_Log2(n_lanes-1)+1) : 2; - __TBB_ASSERT( N==32 || N>=n_lanes && ((N-1)&N)==0, "number of lanes miscalculated"); - lanes = new padded<lane_t>[N]; - __TBB_ASSERT( !population, NULL ); - } - - ~task_stream() { if (lanes) delete[] lanes; } - - //! Push a task into a lane. - void push( task* source, unsigned& last_random ) { - // Lane selection is random. Each thread should keep a separate seed value. - unsigned idx; - for( ; ; ) { - idx = random.get(last_random) & (N-1); - spin_mutex::scoped_lock lock; - if( lock.try_acquire(lanes[idx].my_mutex) ) { - lanes[idx].my_queue.push_back(source); - set_one_bit( population, idx ); - break; - } - } - } - //! Try finding and popping a task. - /** Does not change destination if unsuccessful. */ - void pop( task*& dest, unsigned& last_used_lane ) { - if( !population ) return; // keeps the hot path shorter - // Lane selection is round-robin. Each thread should keep its last used lane. - unsigned idx = (last_used_lane+1)&(N-1); - for( ; population; idx=(idx+1)&(N-1) ) { - if( is_bit_set( population, idx ) ) { - lane_t& lane = lanes[idx]; - spin_mutex::scoped_lock lock; - if( lock.try_acquire(lane.my_mutex) && !lane.my_queue.empty() ) { - dest = lane.my_queue.front(); - lane.my_queue.pop_front(); - if( lane.my_queue.empty() ) - clear_one_bit( population, idx ); - break; - } - } - } - last_used_lane = idx; - } - - //! Checks existence of a task. - bool empty() { - return !population; - } - //! Destroys all remaining tasks in every lane. Returns the number of destroyed tasks. - /** Tasks are not executed, because it would potentially create more tasks at a late stage. - The scheduler is really expected to execute all tasks before task_stream destruction. */ - intptr_t drain() { - intptr_t result = 0; - for(unsigned i=0; i<N; ++i) { - lane_t& lane = lanes[i]; - spin_mutex::scoped_lock lock(lane.my_mutex); - for(lane_t::queue_base_t::iterator it=lane.my_queue.begin(); - it!=lane.my_queue.end(); ++it, ++result) - { - task* t = *it; - tbb::task::destroy(*t); - } - lane.my_queue.clear(); - clear_one_bit( population, i ); - } - return result; - } -}; // task_stream - -} // namespace internal -} // namespace tbb - -#endif /* __TBB_ARENA_PER_MASTER */ - -#endif /* _TBB_task_stream_H */ diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/tbb_assert_impl.h b/deal.II/bundled/tbb30_104oss/src/tbb/tbb_assert_impl.h deleted file mode 100644 index 52cd780874..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/tbb_assert_impl.h +++ /dev/null @@ -1,101 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -// IMPORTANT: To use assertion handling in TBB, exactly one of the TBB source files -// should #include tbb_assert_impl.h thus instantiating assertion handling routines. -// The intent of putting it to a separate file is to allow some tests to use it -// as well in order to avoid dependency on the library. - -// include headers for required function declarations -#include <cstdlib> -#include <stdio.h> -#include <string.h> -#include <stdarg.h> -#if _MSC_VER -#include <crtdbg.h> -#define __TBB_USE_DBGBREAK_DLG TBB_USE_DEBUG -#endif - -#if _MSC_VER >= 1400 -#define __TBB_EXPORTED_FUNC __cdecl -#else -#define __TBB_EXPORTED_FUNC -#endif - -using namespace std; - -namespace tbb { - //! Type for an assertion handler - typedef void(*assertion_handler_type)( const char* filename, int line, const char* expression, const char * comment ); - - static assertion_handler_type assertion_handler; - - assertion_handler_type __TBB_EXPORTED_FUNC set_assertion_handler( assertion_handler_type new_handler ) { - assertion_handler_type old_handler = assertion_handler; - assertion_handler = new_handler; - return old_handler; - } - - void __TBB_EXPORTED_FUNC assertion_failure( const char* filename, int line, const char* expression, const char* comment ) { - if( assertion_handler_type a = assertion_handler ) { - (*a)(filename,line,expression,comment); - } else { - static bool already_failed; - if( !already_failed ) { - already_failed = true; - fprintf( stderr, "Assertion %s failed on line %d of file %s\n", - expression, line, filename ); - if( comment ) - fprintf( stderr, "Detailed description: %s\n", comment ); -#if __TBB_USE_DBGBREAK_DLG - if(1 == _CrtDbgReport(_CRT_ASSERT, filename, line, "tbb_debug.dll", "%s\r\n%s", expression, comment?comment:"")) - _CrtDbgBreak(); -#else - fflush(stderr); - abort(); -#endif - } - } - } - -#if defined(_MSC_VER)&&_MSC_VER<1400 -# define vsnprintf _vsnprintf -#endif - - namespace internal { - //! Report a runtime warning. - void __TBB_EXPORTED_FUNC runtime_warning( const char* format, ... ) - { - char str[1024]; memset(str, 0, 1024); - va_list args; va_start(args, format); - vsnprintf( str, 1024-1, format, args); - fprintf( stderr, "TBB Warning: %s\n", str); - } - } // namespace internal - -} /* namespace tbb */ diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/tbb_main.cpp b/deal.II/bundled/tbb30_104oss/src/tbb/tbb_main.cpp deleted file mode 100644 index 5bd3e45471..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/tbb_main.cpp +++ /dev/null @@ -1,253 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#include "tbb_main.h" -#include "governor.h" -#include "tbb_misc.h" -#include "itt_notify.h" - -namespace tbb { -namespace internal { - -//------------------------------------------------------------------------ -// Begin shared data layout. -// The following global data items are mostly read-only after initialization. -//------------------------------------------------------------------------ - -//! Padding in order to prevent false sharing. -static const char _pad[NFS_MaxLineSize - sizeof(int)] = {}; - -//------------------------------------------------------------------------ -// governor data -basic_tls<generic_scheduler*> governor::theTLS; -#if !__TBB_ARENA_PER_MASTER -arena* governor::theArena; -mutex governor::theArenaMutex; -unsigned governor::NumWorkers; -#endif /* !__TBB_ARENA_PER_MASTER */ -unsigned governor::DefaultNumberOfThreads; -rml::tbb_factory governor::theRMLServerFactory; -bool governor::UsePrivateRML; - -#if __TBB_ARENA_PER_MASTER -//------------------------------------------------------------------------ -// market data -market* market::theMarket; -market::global_market_mutex_type market::theMarketMutex; -#endif /* __TBB_ARENA_PER_MASTER */ - -//------------------------------------------------------------------------ -// One time initialization data - -//! Counter of references to global shared resources such as TLS. -atomic<int> __TBB_InitOnce::count; - -__TBB_InitOnce::mutex_type __TBB_InitOnce::InitializationLock; - -//! Flag that is set to true after one-time initializations are done. -bool __TBB_InitOnce::InitializationDone; - -#if DO_ITT_NOTIFY - static bool ITT_Present; - static bool ITT_InitializationDone; -#endif - -#if !(_WIN32||_WIN64) || __TBB_TASK_CPP_DIRECTLY_INCLUDED - static __TBB_InitOnce __TBB_InitOnceHiddenInstance; -#endif - -//------------------------------------------------------------------------ -// generic_scheduler data - -//! Pointer to the scheduler factory function -generic_scheduler* (*AllocateSchedulerPtr)( arena*, size_t index ); - -//! Table of primes used by fast random-number generator (FastRandom). -/** Also serves to keep anything else from being placed in the same - cache line as the global data items preceding it. */ -static const unsigned Primes[] = { - 0x9e3779b1, 0xffe6cc59, 0x2109f6dd, 0x43977ab5, - 0xba5703f5, 0xb495a877, 0xe1626741, 0x79695e6b, - 0xbc98c09f, 0xd5bee2b3, 0x287488f9, 0x3af18231, - 0x9677cd4d, 0xbe3a6929, 0xadc6a877, 0xdcf0674b, - 0xbe4d6fe9, 0x5f15e201, 0x99afc3fd, 0xf3f16801, - 0xe222cfff, 0x24ba5fdb, 0x0620452d, 0x79f149e3, - 0xc8b93f49, 0x972702cd, 0xb07dd827, 0x6c97d5ed, - 0x085a3d61, 0x46eb5ea7, 0x3d9910ed, 0x2e687b5b, - 0x29609227, 0x6eb081f1, 0x0954c4e1, 0x9d114db9, - 0x542acfa9, 0xb3e6bd7b, 0x0742d917, 0xe9f3ffa7, - 0x54581edb, 0xf2480f45, 0x0bb9288f, 0xef1affc7, - 0x85fa0ca7, 0x3ccc14db, 0xe6baf34b, 0x343377f7, - 0x5ca19031, 0xe6d9293b, 0xf0a9f391, 0x5d2e980b, - 0xfc411073, 0xc3749363, 0xb892d829, 0x3549366b, - 0x629750ad, 0xb98294e5, 0x892d9483, 0xc235baf3, - 0x3d2402a3, 0x6bdef3c9, 0xbec333cd, 0x40c9520f -}; - -//------------------------------------------------------------------------ -// End of shared data layout -//------------------------------------------------------------------------ - -//------------------------------------------------------------------------ -// Shared data accessors -//------------------------------------------------------------------------ - -unsigned GetPrime ( unsigned seed ) { - return Primes[seed%(sizeof(Primes)/sizeof(Primes[0]))]; -} - -//------------------------------------------------------------------------ -// __TBB_InitOnce -//------------------------------------------------------------------------ - -void __TBB_InitOnce::add_ref() { - if( ++count==1 ) - governor::acquire_resources(); -} - -void __TBB_InitOnce::remove_ref() { - int k = --count; - __TBB_ASSERT(k>=0,"removed __TBB_InitOnce ref that was not added?"); - if( k==0 ) - governor::release_resources(); -} - -//------------------------------------------------------------------------ -// One-time Initializations -//------------------------------------------------------------------------ - -//! Defined in cache_aligned_allocator.cpp -void initialize_cache_aligned_allocator(); - -#if __TBB_SURVIVE_THREAD_SWITCH -//! Defined in governor.cpp -void initialize_survive_thread_switch(); -#endif /* __TBB_SURVIVE_THREAD_SWITCH */ - -//! Defined in scheduler.cpp -void Scheduler_OneTimeInitialization ( bool itt_present ); - -#if DO_ITT_NOTIFY - -/** Thread-unsafe lazy one-time initialization of tools interop. - Used by both dummy handlers and general TBB one-time initialization routine. **/ -void ITT_DoUnsafeOneTimeInitialization () { - if ( !ITT_InitializationDone ) { - ITT_Present = (__TBB_load_ittnotify()!=0); - ITT_InitializationDone = true; -#if __TBB_ARENA_PER_MASTER - ITT_SYNC_CREATE(&market::theMarketMutex, SyncType_GlobalLock, SyncObj_SchedulerInitialization); -#else /* !__TBB_ARENA_PER_MASTER */ - ITT_SYNC_CREATE(&governor::theArenaMutex, SyncType_GlobalLock, SyncObj_SchedulerInitialization); -#endif /* !__TBB_ARENA_PER_MASTER */ - } -} - -/** Thread-safe lazy one-time initialization of tools interop. - Used by dummy handlers only. **/ -extern "C" -void ITT_DoOneTimeInitialization() { - __TBB_InitOnce::lock(); - ITT_DoUnsafeOneTimeInitialization(); - __TBB_InitOnce::unlock(); -} -#endif /* DO_ITT_NOTIFY */ - -//! Performs thread-safe lazy one-time general TBB initialization. -void DoOneTimeInitializations() { - __TBB_InitOnce::lock(); - // No fence required for load of InitializationDone, because we are inside a critical section. - if( !__TBB_InitOnce::InitializationDone ) { - __TBB_InitOnce::add_ref(); - if( GetBoolEnvironmentVariable("TBB_VERSION") ) - PrintVersion(); - bool have_itt = false; -#if DO_ITT_NOTIFY - ITT_DoUnsafeOneTimeInitialization(); - have_itt = ITT_Present; -#endif /* DO_ITT_NOTIFY */ - initialize_cache_aligned_allocator(); -#if __TBB_SURVIVE_THREAD_SWITCH - initialize_survive_thread_switch(); -#endif /* __TBB_SURVIVE_THREAD_SWITCH */ - governor::print_version_info(); - PrintExtraVersionInfo( "SCHEDULER", have_itt ? "default" : "Intel" ); - Scheduler_OneTimeInitialization( have_itt ); - __TBB_InitOnce::InitializationDone = true; - } - __TBB_InitOnce::unlock(); -} - -#if (_WIN32||_WIN64) && !__TBB_TASK_CPP_DIRECTLY_INCLUDED -//! Windows "DllMain" that handles startup and shutdown of dynamic library. -extern "C" bool WINAPI DllMain( HANDLE /*hinstDLL*/, DWORD reason, LPVOID /*lpvReserved*/ ) { - switch( reason ) { - case DLL_PROCESS_ATTACH: - __TBB_InitOnce::add_ref(); - break; - case DLL_PROCESS_DETACH: - __TBB_InitOnce::remove_ref(); - // It is assumed that InitializationDone is not set after DLL_PROCESS_DETACH, - // and thus no race on InitializationDone is possible. - if( __TBB_InitOnce::initialization_done() ) { - // Remove reference that we added in DoOneTimeInitializations. - __TBB_InitOnce::remove_ref(); - } - break; - case DLL_THREAD_DETACH: - governor::terminate_auto_initialized_scheduler(); - break; - } - return true; -} -#endif /* (_WIN32||_WIN64) && !__TBB_TASK_CPP_DIRECTLY_INCLUDED */ - -void itt_store_pointer_with_release_v3( void* dst, void* src ) { - ITT_NOTIFY(sync_releasing, dst); - __TBB_store_with_release(*static_cast<void**>(dst),src); -} - -void* itt_load_pointer_with_acquire_v3( const void* src ) { - void* result = __TBB_load_with_acquire(*static_cast<void*const*>(src)); - ITT_NOTIFY(sync_acquired, const_cast<void*>(src)); - return result; -} - -void* itt_load_pointer_v3( const void* src ) { - void* result = *static_cast<void*const*>(src); - return result; -} - -void itt_set_sync_name_v3( void* obj, const tchar* name) { - ITT_SYNC_RENAME(obj, name); - (void)obj, (void)name; // Prevents compiler warning when ITT support is switched off -} - - -} // namespace internal -} // namespace tbb diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/tbb_main.h b/deal.II/bundled/tbb30_104oss/src/tbb/tbb_main.h deleted file mode 100644 index b207b340b5..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/tbb_main.h +++ /dev/null @@ -1,106 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef _TBB_tbb_main_H -#define _TBB_tbb_main_H - -#include "tbb/atomic.h" - -namespace tbb { - -namespace internal { - -void DoOneTimeInitializations (); - -//------------------------------------------------------------------------ -// __TBB_InitOnce -//------------------------------------------------------------------------ - -//! Class that supports TBB initialization. -/** It handles acquisition and release of global resources (e.g. TLS) during startup and shutdown, - as well as synchronization for DoOneTimeInitializations. */ -class __TBB_InitOnce { - friend void DoOneTimeInitializations(); - friend void ITT_DoUnsafeOneTimeInitialization (); - - static atomic<int> count; - - //! Platform specific code to acquire resources. - static void acquire_resources(); - - //! Platform specific code to release resources. - static void release_resources(); - - //! Specifies if the one-time initializations has been done. - static bool InitializationDone; - - // Scenarios are possible when tools interop has to be initialized before the - // TBB itself. This imposes a requirement that the global initialization lock - // has to support valid static initialization, and does not issue any tool - // notifications in any build mode. - typedef unsigned char mutex_type; - - // Global initialization lock - static mutex_type InitializationLock; - -public: - static void lock() { __TBB_LockByte( InitializationLock ); } - - static void unlock() { __TBB_store_with_release( InitializationLock, 0 ); } - - static bool initialization_done() { return __TBB_load_with_acquire(InitializationDone); } - - //! Add initial reference to resources. - /** We assume that dynamic loading of the library prevents any other threads - from entering the library until this constructor has finished running. **/ - __TBB_InitOnce() { add_ref(); } - - //! Remove the initial reference to resources. - /** This is not necessarily the last reference if other threads are still running. **/ - ~__TBB_InitOnce() { - remove_ref(); - // We assume that InitializationDone is not set after file-scope destructors - // start running, and thus no race on InitializationDone is possible. - if( initialization_done() ) { - // Remove an extra reference that was added in DoOneTimeInitializations. - remove_ref(); - } - } - //! Add reference to resources. If first reference added, acquire the resources. - static void add_ref(); - - //! Remove reference to resources. If last reference removed, release the resources. - static void remove_ref(); -}; // class __TBB_InitOnce - - -} // namespace internal - -} // namespace tbb - -#endif /* _TBB_tbb_main_H */ diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/tbb_misc.cpp b/deal.II/bundled/tbb30_104oss/src/tbb/tbb_misc.cpp deleted file mode 100644 index 9efb6a62e3..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/tbb_misc.cpp +++ /dev/null @@ -1,230 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -// Source file for miscellaneous entities that are infrequently referenced by -// an executing program. - -#include "tbb/tbb_stddef.h" -#include "tbb_assert_impl.h" // Out-of-line TBB assertion handling routines are instantiated here. -#include "tbb/tbb_exception.h" -#include "tbb/tbb_machine.h" -#include "tbb_misc.h" -#include <cstdio> -#include <cstdlib> - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers - #pragma warning (push) - #pragma warning (disable: 4530) -#endif - -#include <cstring> - -#if !TBB_USE_EXCEPTIONS && _MSC_VER - #pragma warning (pop) -#endif - -using namespace std; - -namespace tbb { - -const char* bad_last_alloc::what() const throw() { return "bad allocation in previous or concurrent attempt"; } -const char* improper_lock::what() const throw() { return "attempted recursive lock on critical section or non-recursive mutex"; } -const char* invalid_multiple_scheduling::what() const throw() { return "The same task_handle object cannot be executed more than once"; } -const char* missing_wait::what() const throw() { return "wait() was not called on the structured_task_group"; } - -namespace internal { - -#if TBB_USE_EXCEPTIONS - #define DO_THROW(exc, init_args) throw exc init_args; -#else /* !TBB_USE_EXCEPTIONS */ - #define PRINT_ERROR_AND_ABORT(exc_name, msg) \ - fprintf (stderr, "Exception %s with message %s would've been thrown, " \ - "if exception handling were not disabled. Aborting.\n", exc_name, msg); \ - fflush(stderr); \ - abort(); - #define DO_THROW(exc, init_args) PRINT_ERROR_AND_ABORT(#exc, #init_args) -#endif /* !TBB_USE_EXCEPTIONS */ - - -/* The "what" should be fairly short, not more than about 128 characters. - Because we control all the call sites to handle_perror, it is pointless - to bullet-proof it for very long strings. - - Design note: ADR put this routine off to the side in tbb_misc.cpp instead of - Task.cpp because the throw generates a pathetic lot of code, and ADR wanted - this large chunk of code to be placed on a cold page. */ -void handle_perror( int error_code, const char* what ) { - char buf[256]; - __TBB_ASSERT( strlen(what) < sizeof(buf) - 64, "Error description is too long" ); - sprintf(buf,"%s: ",what); - char* end = strchr(buf,0); - size_t n = buf+sizeof(buf)-end; - strncpy( end, strerror( error_code ), n ); - // Ensure that buffer ends in terminator. - buf[sizeof(buf)-1] = 0; -#if TBB_USE_EXCEPTIONS - throw runtime_error(buf); -#else - PRINT_ERROR_AND_ABORT( "runtime_error", buf); -#endif /* !TBB_USE_EXCEPTIONS */ -} - -#if _WIN32||_WIN64 -void handle_win_error( int error_code ) { - char buf[512]; - FormatMessageA( FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, - NULL, error_code, 0, buf, sizeof(buf), NULL ); -#if TBB_USE_EXCEPTIONS - throw runtime_error(buf); -#else - PRINT_ERROR_AND_ABORT( "runtime_error", buf); -#endif /* !TBB_USE_EXCEPTIONS */ -} -#endif // _WIN32||_WIN64 - -void throw_bad_last_alloc_exception_v4() { - throw_exception_v4(eid_bad_last_alloc); -} - -void throw_exception_v4 ( exception_id eid ) { - __TBB_ASSERT ( eid > 0 && eid < eid_max, "Unknown exception ID" ); - switch ( eid ) { - case eid_bad_alloc: DO_THROW( bad_alloc, () ); - case eid_bad_last_alloc: DO_THROW( bad_last_alloc, () ); - case eid_nonpositive_step: DO_THROW( invalid_argument, ("Step must be positive") ); - case eid_out_of_range: DO_THROW( out_of_range, ("Index out of requested size range") ); - case eid_segment_range_error: DO_THROW( range_error, ("Index out of allocated segment slots") ); - case eid_index_range_error: DO_THROW( range_error, ("Index is not allocated") ); - case eid_missing_wait: DO_THROW( missing_wait, () ); - case eid_invalid_multiple_scheduling: DO_THROW( invalid_multiple_scheduling, () ); - case eid_improper_lock: DO_THROW( improper_lock, () ); - case eid_possible_deadlock: DO_THROW( runtime_error, ("Resource deadlock would occur") ); - case eid_operation_not_permitted: DO_THROW( runtime_error, ("Operation not permitted") ); - case eid_condvar_wait_failed: DO_THROW( runtime_error, ("Wait on condition variable failed") ); - case eid_invalid_load_factor: DO_THROW( out_of_range, ("Invalid hash load factor") ); - case eid_reserved: DO_THROW( out_of_range, ("[backward compatibility] Invalid number of buckets") ); - case eid_invalid_swap: DO_THROW( invalid_argument, ("swap() is invalid on non-equal allocators") ); - case eid_reservation_length_error: DO_THROW( length_error, ("reservation size exceeds permitted max size") ); - case eid_invalid_key: DO_THROW( out_of_range, ("invalid key") ); - default: break; - } -#if !TBB_USE_EXCEPTIONS && __APPLE__ - out_of_range e1(""); - length_error e2(""); - range_error e3(""); - invalid_argument e4(""); -#endif /* !TBB_USE_EXCEPTIONS && __APPLE__ */ -} - -#if _XBOX -bool GetBoolEnvironmentVariable( const char * name ) { return false;} -#else -bool GetBoolEnvironmentVariable( const char * name ) { - if( const char* s = getenv(name) ) - return strcmp(s,"0") != 0; - return false; -} -#endif /* !_XBOX */ - -#include "tbb_version.h" - -/** The leading "\0" is here so that applying "strings" to the binary delivers a clean result. */ -static const char VersionString[] = "\0" TBB_VERSION_STRINGS; - -static bool PrintVersionFlag = false; - -void PrintVersion() { - PrintVersionFlag = true; - fputs(VersionString+1,stderr); -} - -void PrintExtraVersionInfo( const char* category, const char* description ) { - if( PrintVersionFlag ) - fprintf(stderr, "%s: %s\t%s\n", "TBB", category, description ); -} - -void PrintRMLVersionInfo( void* arg, const char* server_info ) { - PrintExtraVersionInfo( server_info, (const char *)arg ); -} - -} // namespace internal - -extern "C" int TBB_runtime_interface_version() { - return TBB_INTERFACE_VERSION; -} - -} // namespace tbb - -#if !__TBB_RML_STATIC -#if __TBB_x86_32 - -#include "tbb/atomic.h" - -// in MSVC environment, int64_t defined in tbb::internal namespace only (see tbb_stddef.h) -#if _MSC_VER -using tbb::internal::int64_t; -#endif - -//! Warn about 8-byte store that crosses a cache line. -extern "C" void __TBB_machine_store8_slow_perf_warning( volatile void *ptr ) { - // Report run-time warning unless we have already recently reported warning for that address. - const unsigned n = 4; - static tbb::atomic<void*> cache[n]; - static tbb::atomic<unsigned> k; - for( unsigned i=0; i<n; ++i ) - if( ptr==cache[i] ) - goto done; - cache[(k++)%n] = const_cast<void*>(ptr); - tbb::internal::runtime_warning( "atomic store on misaligned 8-byte location %p is slow", ptr ); -done:; -} - -//! Handle 8-byte store that crosses a cache line. -extern "C" void __TBB_machine_store8_slow( volatile void *ptr, int64_t value ) { - for( tbb::internal::atomic_backoff b;; b.pause() ) { - int64_t tmp = *(int64_t*)ptr; - if( __TBB_CompareAndSwap8(ptr,value,tmp)==tmp ) - break; - } -} - -#endif /* __TBB_x86_32 */ -#endif /* !__TBB_RML_STATIC */ - -#if __TBB_ipf -extern "C" intptr_t __TBB_machine_lockbyte( volatile unsigned char& flag ) { - if ( !__TBB_TryLockByte(flag) ) { - tbb::internal::atomic_backoff b; - do { - b.pause(); - } while ( !__TBB_TryLockByte(flag) ); - } - return 0; -} -#endif diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/tbb_misc.h b/deal.II/bundled/tbb30_104oss/src/tbb/tbb_misc.h deleted file mode 100644 index 21d046480d..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/tbb_misc.h +++ /dev/null @@ -1,161 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef _TBB_tbb_misc_H -#define _TBB_tbb_misc_H - -#include "tbb/tbb_stddef.h" -#include "tbb/tbb_machine.h" - -#if _WIN32||_WIN64 -#include "tbb/machine/windows_api.h" -#elif __linux__ -#include <sys/sysinfo.h> -#define __TBB_DetectNumberOfWorkers() get_nprocs() -#elif defined(__sun) -#include <sys/sysinfo.h> -#include <unistd.h> -#elif defined(__NetBSD__) || defined(__FreeBSD__) || defined(_AIX) -#include <unistd.h> -#endif - -namespace tbb { -namespace internal { - -const size_t MByte = 1<<20; - -#if !defined(__TBB_WORDSIZE) - const size_t ThreadStackSize = 1*MByte; -#elif __TBB_WORDSIZE<=4 - const size_t ThreadStackSize = 2*MByte; -#else - const size_t ThreadStackSize = 4*MByte; -#endif - -#if defined(__TBB_DetectNumberOfWorkers) // covers Linux, Mac OS*, and other platforms - -static inline int DetectNumberOfWorkers() { - int n = __TBB_DetectNumberOfWorkers(); - return n>0? n: 1; // Fail safety strap -} - -#else /* !__TBB_DetectNumberOfWorkers */ - -#if _WIN32||_WIN64 - -static inline int DetectNumberOfWorkers() { - SYSTEM_INFO si; - GetSystemInfo(&si); - return static_cast<int>(si.dwNumberOfProcessors); -} - -#elif defined(_SC_NPROCESSORS_ONLN) - -static inline int DetectNumberOfWorkers() { - int number_of_workers = sysconf(_SC_NPROCESSORS_ONLN); - return number_of_workers>0? number_of_workers: 1; -} - -#else -#error DetectNumberOfWorkers: Method to detect the number of available CPUs is unknown -#endif /* os kind */ - -#endif /* !__TBB_DetectNumberOfWorkers */ - -//! Throws std::runtime_error with what() returning error_code description prefixed with aux_info -void handle_win_error( int error_code ); - -//! True if environment variable with given name is set and not 0; otherwise false. -bool GetBoolEnvironmentVariable( const char * name ); - -//! Print TBB version information on stderr -void PrintVersion(); - -//! Print extra TBB version information on stderr -void PrintExtraVersionInfo( const char* category, const char* description ); - -//! A callback routine to print RML version information on stderr -void PrintRMLVersionInfo( void* arg, const char* server_info ); - -// For TBB compilation only; not to be used in public headers -#if defined(min) || defined(max) -#undef min -#undef max -#endif - -//! Utility template function returning lesser of the two values. -/** Provided here to avoid including not strict safe <algorithm>.\n - In case operands cause signed/unsigned or size mismatch warnings it is caller's - responsibility to do the appropriate cast before calling the function. **/ -template<typename T1, typename T2> -T1 min ( const T1& val1, const T2& val2 ) { - return val1 < val2 ? val1 : val2; -} - -//! Utility template function returning greater of the two values. -/** Provided here to avoid including not strict safe <algorithm>.\n - In case operands cause signed/unsigned or size mismatch warnings it is caller's - responsibility to do the appropriate cast before calling the function. **/ -template<typename T1, typename T2> -T1 max ( const T1& val1, const T2& val2 ) { - return val1 < val2 ? val2 : val1; -} - -//------------------------------------------------------------------------ -// FastRandom -//------------------------------------------------------------------------ - -/** Defined in tbb_main.cpp **/ -unsigned GetPrime ( unsigned seed ); - -//! A fast random number generator. -/** Uses linear congruential method. */ -class FastRandom { - unsigned x, a; -public: - //! Get a random number. - unsigned short get() { - return get(x); - } - //! Get a random number for the given seed; update the seed for next use. - unsigned short get( unsigned& seed ) { - unsigned short r = (unsigned short)(seed>>16); - seed = seed*a+1; - return r; - } - //! Construct a random number generator. - FastRandom( unsigned seed ) { - x = seed; - a = GetPrime( seed ); - } -}; - -} // namespace internal -} // namespace tbb - -#endif /* _TBB_tbb_misc_H */ diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/tbb_resource.rc b/deal.II/bundled/tbb30_104oss/src/tbb/tbb_resource.rc deleted file mode 100644 index b20cb53745..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/tbb_resource.rc +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright 2005-2010 Intel Corporation. All Rights Reserved. -// -// This file is part of Threading Building Blocks. -// -// Threading Building Blocks is free software; you can redistribute it -// and/or modify it under the terms of the GNU General Public License -// version 2 as published by the Free Software Foundation. -// -// Threading Building Blocks is distributed in the hope that it will be -// useful, but WITHOUT ANY WARRANTY; without even the implied warranty -// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with Threading Building Blocks; if not, write to the Free Software -// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -// -// As a special exception, you may use this file as part of a free software -// library without restriction. Specifically, if other files instantiate -// templates or use macros or inline functions from this file, or you compile -// this file and link it with other files to produce an executable, this -// file does not by itself cause the resulting executable to be covered by -// the GNU General Public License. This exception does not however -// invalidate any other reasons why the executable file might be covered by -// the GNU General Public License. - -// Microsoft Visual C++ generated resource script. -// -#ifdef APSTUDIO_INVOKED -#ifndef APSTUDIO_READONLY_SYMBOLS -#define _APS_NO_MFC 1 -#define _APS_NEXT_RESOURCE_VALUE 102 -#define _APS_NEXT_COMMAND_VALUE 40001 -#define _APS_NEXT_CONTROL_VALUE 1001 -#define _APS_NEXT_SYMED_VALUE 101 -#endif -#endif - -#define APSTUDIO_READONLY_SYMBOLS -///////////////////////////////////////////////////////////////////////////// -// -// Generated from the TEXTINCLUDE 2 resource. -// -#include <winresrc.h> -#define ENDL "\r\n" -#include "tbb_version.h" - -///////////////////////////////////////////////////////////////////////////// -#undef APSTUDIO_READONLY_SYMBOLS - -///////////////////////////////////////////////////////////////////////////// -// Neutral resources - -//#if !defined(AFX_RESOURCE_DLL) || defined(AFX_TARG_NEU) -#ifdef _WIN32 -LANGUAGE LANG_NEUTRAL, SUBLANG_NEUTRAL -#pragma code_page(1252) -#endif //_WIN32 - -///////////////////////////////////////////////////////////////////////////// -// manifest integration -#ifdef TBB_MANIFEST -#include "winuser.h" -2 RT_MANIFEST tbbmanifest.exe.manifest -#endif - -///////////////////////////////////////////////////////////////////////////// -// -// Version -// - -VS_VERSION_INFO VERSIONINFO - FILEVERSION TBB_VERNUMBERS - PRODUCTVERSION TBB_VERNUMBERS - FILEFLAGSMASK 0x17L -#ifdef _DEBUG - FILEFLAGS 0x1L -#else - FILEFLAGS 0x0L -#endif - FILEOS 0x40004L - FILETYPE 0x2L - FILESUBTYPE 0x0L -BEGIN - BLOCK "StringFileInfo" - BEGIN - BLOCK "000004b0" - BEGIN - VALUE "CompanyName", "Intel Corporation\0" - VALUE "FileDescription", "Threading Building Blocks library\0" - VALUE "FileVersion", TBB_VERSION "\0" -//what is it? VALUE "InternalName", "tbb\0" - VALUE "LegalCopyright", "Copyright 2005-2010 Intel Corporation. All Rights Reserved.\0" - VALUE "LegalTrademarks", "\0" -#ifndef TBB_USE_DEBUG - VALUE "OriginalFilename", "tbb.dll\0" -#else - VALUE "OriginalFilename", "tbb_debug.dll\0" -#endif - VALUE "ProductName", "Intel(R) Threading Building Blocks for Windows\0" - VALUE "ProductVersion", TBB_VERSION "\0" - VALUE "Comments", TBB_VERSION_STRINGS "\0" - VALUE "PrivateBuild", "\0" - VALUE "SpecialBuild", "\0" - END - END - BLOCK "VarFileInfo" - BEGIN - VALUE "Translation", 0x0, 1200 - END -END - -//#endif // Neutral resources -///////////////////////////////////////////////////////////////////////////// - - -#ifndef APSTUDIO_INVOKED -///////////////////////////////////////////////////////////////////////////// -// -// Generated from the TEXTINCLUDE 3 resource. -// - - -///////////////////////////////////////////////////////////////////////////// -#endif // not APSTUDIO_INVOKED - diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/tbb_statistics.cpp b/deal.II/bundled/tbb30_104oss/src/tbb/tbb_statistics.cpp deleted file mode 100644 index b8946646cd..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/tbb_statistics.cpp +++ /dev/null @@ -1,174 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#include "tbb_statistics.h" - -#if __TBB_STATISTICS - -#include <climits> -#include <cstdarg> -#if __TBB_STATISTICS_STDOUT -#include <cstdio> -#endif - -#include "tbb/spin_mutex.h" - -namespace tbb { -namespace internal { - -//! Human readable titles of statistics groups defined by statistics_groups enum. -/** The order of this vector elements must correspond to the statistics_counters - structure layout. **/ -const char* StatGroupTitles[] = { - "task objects", "tasks executed", "stealing attempts", "task proxies", "arena", "market" -}; - -//! Human readable titles of statistics elements defined by statistics_counters struct. -/** The order of this vector elements must correspond to the statistics_counters - structure layout (with NULLs interspersed to separate groups). **/ -const char* StatFieldTitles[] = { - "active", "freed", "big", NULL, - "total", "w/o spawn", NULL, - "succeeded", "failed", "conflicts", NULL, - "mailed", "revoked", "stolen", "bypassed", "ignored", NULL, - "switches", "roundtrips", NULL, - "roundtrips", NULL, -}; - -//! Class for logging statistics -/** There should be only one instance of this class. - Results are written to a file "statistics.txt" in tab-separated format. */ -class statistics_logger { -public: - statistics_logger () { - __TBB_ASSERT( sg_end - 1 == 1 << (sizeof(StatGroupTitles)/sizeof(*StatGroupTitles) - 1), NULL ); - - my_file = fopen("statistics.txt","w"); - if( !my_file ) - perror("fopen(\"statistics.txt\"\")"); - // Initialize groups dump layout info - group_start_field[0] = 0; - for ( size_t i = 0, j = 0; i < NumGroups; ++i, ++j ) { - __TBB_ASSERT( StatFieldTitles[j], "Empty group occurred" ); - while ( StatFieldTitles[j] ) - ++j; - group_start_field[i + 1] = j - i; // -i accounts for preceding NULL separators - } - __TBB_ASSERT( group_start_field[NumGroups] == statistics_counters::size(), - "Wrong number of elements in StatFieldTitles" ); - dump( "%-*s", IDColumnWidth, ""); - process_groups( &statistics_logger::print_group_title ); - dump( "%-*s", IDColumnWidth, "ID"); - process_groups( &statistics_logger::print_field_titles ); - } - - ~statistics_logger () { fclose(my_file); } - - void record( const statistics_counters& c, size_t id ) { - spin_mutex::scoped_lock lock(my_mutex); - counters_to_dump = &c; - const char* idString = NULL; - switch ( id ) { - case 0: - idString = "M"; break; - case workers_counters_total: - idString = "Wtot"; break; - case arena_counters_total: - idString = "Tot"; break; - default: - dump( "W%-*u", IDColumnWidth - 1, id); - } - if ( idString ) - dump( "%-*s", IDColumnWidth, idString); - process_groups( &statistics_logger::print_field_values ); - } -private: - static const size_t IDColumnWidth = 5; - static const size_t StatisticsColumnWidth = 10; - static const size_t NumGroups = sizeof(StatGroupTitles)/sizeof(char*); - - //! File into which statistics are written. - FILE* my_file; - //! Mutex that serializes accesses to my_file - spin_mutex my_mutex; - //! Indices of the each group's first field in statistics_counters struct. - /** An extra element is used to track the total number of statistics fields. **/ - size_t group_start_field[NumGroups + 1]; - //! Currently processed set of counters. - const statistics_counters* counters_to_dump; - - void dump ( char const* fmt, ... ) { - va_list args; - va_start( args, fmt ); - if ( my_file ) - vfprintf( my_file, fmt, args ); - va_start( args, fmt ); -#if __TBB_STATISTICS_STDOUT - vprintf( fmt, args ); -#endif - } - - void process_groups ( void (statistics_logger::*per_group_action)(size_t group_idx) ) { - for ( size_t i = 0, group_flag = 1; i < NumGroups; ++i, group_flag <<= 1 ) { - __TBB_ASSERT( group_flag < sg_end, "StatGroupTitles contents is incompatible with statistics_groups definition" ); - if ( __TBB_ActiveStatisticsGroups & group_flag ) - (this->*per_group_action)( i ); - } - dump( "\n" ); - } - - void print_group_title ( size_t group_idx ) { - dump( "%-*s", (group_start_field[group_idx + 1] - group_start_field[group_idx]) * (StatisticsColumnWidth + 1), - StatGroupTitles[group_idx] ); - } - - void print_field_titles ( size_t group_idx ) { - // +group_idx accounts for preceding NULL separators - size_t i = group_start_field[group_idx] + group_idx; - while ( StatFieldTitles[i] ) - dump( "%-*s ", StatisticsColumnWidth, StatFieldTitles[i++] ); - } - - void print_field_values ( size_t group_idx ) { - size_t begin = group_start_field[group_idx], - end = group_start_field[group_idx + 1]; - for ( size_t i = begin; i < end; ++i ) - dump( "%-*ld ", StatisticsColumnWidth, counters_to_dump->field(i) ); - } -}; // class statistics_logger - -static statistics_logger the_statistics; - -void dump_statistics ( const statistics_counters& c, size_t id ) { - the_statistics.record(c, id); -} - -} // namespace internal -} // namespace tbb - -#endif /* __TBB_STATISTICS */ diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/tbb_statistics.h b/deal.II/bundled/tbb30_104oss/src/tbb/tbb_statistics.h deleted file mode 100644 index 7e59e02792..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/tbb_statistics.h +++ /dev/null @@ -1,204 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef _TBB_tbb_statistics_H -#define _TBB_tbb_statistics_H - -/** - This file defines parameters of the internal statistics collected by the TBB - library (currently by the task scheduler only). - - In __TBB_ARENA_PER_MASTER implementation statistics is accumulated in each - thread separately and is dumped when the scheduler instance in the given - thread is destroyed. For apps with multiple master threads or with the same - master repeatedly initializing and then deinitializing task scheduler this - results in TBB workers statistics getting unseparably mixed. - - Therefore in new __TBB_ARENA_PER_MASTER mode statistics is instead accumulated - in arena slots, and should be dumped when arena gets destroyed. This separates - statistics collected for each scheduler activity region in each master thread. - - With the current RML implementation (TBB 2.2, 3.0) to avoid complete loss of - statistics data during app shutdown (because of lazy workers deinitialization - logic) set __TBB_STATISTICS_EARLY_DUMP macro to write the statistics at the - moment a master thread deinitializes its scheduler. This may happen a little - earlier than the moment of arena destruction resulting in the following undesired - (though usually tolerable) effects: - - a few events related to unsuccessful stealing or thread pool activity may be lost, - - statistics may be substantially incomplete in case of FIFO tasks used in - the FAF mode. - - Macro __TBB_STATISTICS_STDOUT and global variable __TBB_ActiveStatisticsGroups - defined below can be used to configure the statistics output. - - To add new counter: - 1) Insert it into the appropriate group range in statistics_counters; - 2) Insert the corresponding field title into StatFieldTitles (preserving - relative order of the fields). - - To add new counters group: - 1) Insert new group bit flag into statistics_groups; - 2) Insert the new group title into StatGroupTitles (preserving - relative order of the groups). - 3) Add counter belonging to the new group as described above -**/ - -#include "tbb/tbb_stddef.h" - -#ifndef __TBB_STATISTICS -#define __TBB_STATISTICS 0 -#endif /* __TBB_STATISTICS */ - -#if __TBB_STATISTICS - -#include <string.h> // for memset - -//! Dump counters into stdout as well. -/** By default statistics counters are written to the file "statistics.txt" only. **/ -#define __TBB_STATISTICS_STDOUT 1 - -//! Dump statistics for an arena when its master completes -/** By default (when this macro is not set) the statistics is sent to output when - arena object is destroyed. But with the current lazy workers termination - logic default behavior may result in loosing all statistics output. **/ -#define __TBB_STATISTICS_EARLY_DUMP 1 - -#define GATHER_STATISTIC(x) (x) - -namespace tbb { -namespace internal { - -//! Groups of statistics counters. -/** The order of enumerators must be the same as the order of the corresponding - field groups in the statistics_counters structure. **/ -enum statistics_groups { - sg_task_allocation = 0x01, - sg_task_execution = 0x02, - sg_stealing = 0x04, - sg_affinity = 0x08, - sg_arena = 0x10, - sg_market = 0x20, - // List end marker. Insert new groups only before it. - sg_end -}; - -//! Groups of counters to output -const uintptr_t __TBB_ActiveStatisticsGroups = sg_task_execution | sg_stealing | sg_affinity | sg_arena | sg_market; - -//! A set of various statistics counters that are updated by the library on per thread basis. -/** All the fields must be of the same type (statistics_counters::counter_type). - This is necessary to allow reinterpreting this structure as an array. **/ -struct statistics_counters { - typedef long counter_type; - - // Group: sg_task_allocation - // Counters in this group can have negative values as the tasks migrate across - // threads while the associated counters are updated in the current thread only - // to avoid data races - - //! Number of tasks allocated and not yet destroyed - counter_type active_tasks; - //! Number of task corpses stored for future reuse - counter_type free_list_length; - //! Number of big tasks allocated during the run - /** To find total number of tasks malloc'd, compute (big_tasks+small_task_count) */ - counter_type big_tasks; - - // Group: sg_task_execution - - //! Number of tasks executed - counter_type tasks_executed; - //! Number of elided spawns - counter_type spawns_bypassed; - - // Group: sg_stealing - - //! Number of tasks successfully stolen - counter_type steals_committed; - //! Number of failed stealing attempts - counter_type steals_failed; - //! Number of failed stealing attempts - counter_type thieves_conflicts; - //! Number of tasks received from mailbox - - // Group: sg_affinity - - counter_type mails_received; - //! Number of affinitized tasks executed by the owner - /** Goes as "revoked" in statistics printout. **/ - counter_type proxies_executed; - //! Number of affinitized tasks intercepted by thieves - counter_type proxies_stolen; - //! Number of proxy bypasses by thieves during stealing - counter_type proxies_bypassed; - //! Number of affinitized tasks executed by the owner via scheduler bypass mechanism - counter_type affinity_ignored; - - // Group: sg_arena - - //! Number of times the state of arena switched between "full" and "empty" - counter_type gate_switches; - //! Number of times workers left an arena and returned into the market - counter_type arena_roundtrips; - //! Number of times workers left the market and returned into RML - counter_type market_roundtrips; - - // Constructor and helpers - - statistics_counters() { reset(); } - - void reset () { memset( this, 0, sizeof(statistics_counters) ); } - - counter_type& field ( size_t index ) { return reinterpret_cast<counter_type*>(this)[index]; } - - const counter_type& field ( size_t index ) const { return reinterpret_cast<const counter_type*>(this)[index]; } - - static size_t size () { return sizeof(statistics_counters) / sizeof(counter_type); } - - const statistics_counters& operator += ( const statistics_counters& rhs ) { - for ( size_t i = 0; i < size(); ++i ) - field(i) += rhs.field(i); - return *this; - } -}; // statistics_counters - -static const size_t workers_counters_total = (size_t)-1; -static const size_t arena_counters_total = (size_t)-2; - -void dump_statistics ( const statistics_counters& c, size_t id ); - -} // namespace internal -} // namespace tbb - -#else /* !__TBB_STATISTICS */ - -#define GATHER_STATISTIC(x) ((void)0) - -#endif /* !__TBB_STATISTICS */ - -#endif /* _TBB_tbb_statistics_H */ diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/tbb_thread.cpp b/deal.II/bundled/tbb30_104oss/src/tbb/tbb_thread.cpp deleted file mode 100644 index 40cb56f4c6..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/tbb_thread.cpp +++ /dev/null @@ -1,172 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#if _WIN32||_WIN64 -#include <process.h> /* Need _beginthreadex from there */ -#endif -#include "tbb_misc.h" // handle_win_error, ThreadStackSize -#include "tbb/tbb_stddef.h" -#include "tbb/tbb_thread.h" -#include "tbb/tbb_allocator.h" -#include "tbb/task_scheduler_init.h" /* Need task_scheduler_init::default_num_threads() */ - -namespace tbb { -namespace internal { - -//! Allocate a closure -void* allocate_closure_v3( size_t size ) -{ - return allocate_via_handler_v3( size ); -} - -//! Free a closure allocated by allocate_closure_v3 -void free_closure_v3( void *ptr ) -{ - deallocate_via_handler_v3( ptr ); -} - -void tbb_thread_v3::join() -{ - __TBB_ASSERT( joinable(), "thread should be joinable when join called" ); -#if _WIN32||_WIN64 - DWORD status = WaitForSingleObject( my_handle, INFINITE ); - if ( status == WAIT_FAILED ) - handle_win_error( GetLastError() ); - BOOL close_stat = CloseHandle( my_handle ); - if ( close_stat == 0 ) - handle_win_error( GetLastError() ); - my_thread_id = 0; -#else - int status = pthread_join( my_handle, NULL ); - if( status ) - handle_perror( status, "pthread_join" ); -#endif // _WIN32||_WIN64 - my_handle = 0; -} - -void tbb_thread_v3::detach() { - __TBB_ASSERT( joinable(), "only joinable thread can be detached" ); -#if _WIN32||_WIN64 - BOOL status = CloseHandle( my_handle ); - if ( status == 0 ) - handle_win_error( GetLastError() ); - my_thread_id = 0; -#else - int status = pthread_detach( my_handle ); - if( status ) - handle_perror( status, "pthread_detach" ); -#endif // _WIN32||_WIN64 - my_handle = 0; -} - -void tbb_thread_v3::internal_start( __TBB_NATIVE_THREAD_ROUTINE_PTR(start_routine), - void* closure ) { -#if _WIN32||_WIN64 - unsigned thread_id; - // The return type of _beginthreadex is "uintptr_t" on new MS compilers, - // and 'unsigned long' on old MS compilers. uintptr_t works for both. - uintptr_t status = _beginthreadex( NULL, ThreadStackSize, start_routine, - closure, 0, &thread_id ); - if( status==0 ) - handle_perror(errno,"__beginthreadex"); - else { - my_handle = (HANDLE)status; - my_thread_id = thread_id; - } -#else - pthread_t thread_handle; - int status; - pthread_attr_t stack_size; - status = pthread_attr_init( &stack_size ); - if( status ) - handle_perror( status, "pthread_attr_init" ); - status = pthread_attr_setstacksize( &stack_size, ThreadStackSize ); - if( status ) - handle_perror( status, "pthread_attr_setstacksize" ); - - status = pthread_create( &thread_handle, &stack_size, start_routine, closure ); - if( status ) - handle_perror( status, "pthread_create" ); - - my_handle = thread_handle; -#endif // _WIN32||_WIN64 -} - -unsigned tbb_thread_v3::hardware_concurrency() { - return task_scheduler_init::default_num_threads(); -} - -tbb_thread_v3::id thread_get_id_v3() { -#if _WIN32||_WIN64 - return tbb_thread_v3::id( GetCurrentThreadId() ); -#else - return tbb_thread_v3::id( pthread_self() ); -#endif // _WIN32||_WIN64 -} - -void move_v3( tbb_thread_v3& t1, tbb_thread_v3& t2 ) -{ - if (t1.joinable()) - t1.detach(); - t1.my_handle = t2.my_handle; - t2.my_handle = 0; -#if _WIN32||_WIN64 - t1.my_thread_id = t2.my_thread_id; - t2.my_thread_id = 0; -#endif // _WIN32||_WIN64 -} - -void thread_yield_v3() -{ - __TBB_Yield(); -} - -void thread_sleep_v3(const tick_count::interval_t &i) -{ -#if _WIN32||_WIN64 - tick_count t0 = tick_count::now(); - tick_count t1 = t0; - for(;;) { - double remainder = (i-(t1-t0)).seconds()*1e3; // milliseconds remaining to sleep - if( remainder<=0 ) break; - DWORD t = remainder>=INFINITE ? INFINITE-1 : DWORD(remainder); - Sleep( t ); - t1 = tick_count::now(); - } -#else - struct timespec req; - double sec = i.seconds(); - - req.tv_sec = static_cast<long>(sec); - req.tv_nsec = static_cast<long>( (sec - req.tv_sec)*1e9 ); - nanosleep(&req, NULL); -#endif // _WIN32||_WIN64 -} - -} // internal -} // tbb diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/tbb_version.h b/deal.II/bundled/tbb30_104oss/src/tbb/tbb_version.h deleted file mode 100644 index efb9b75c79..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/tbb_version.h +++ /dev/null @@ -1,101 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -// Please define version number in the file: -#include "tbb/tbb_stddef.h" - -// And don't touch anything below -#ifndef ENDL -#define ENDL "\n" -#endif -#include "version_string.tmp" - -#ifndef __TBB_VERSION_STRINGS -#pragma message("Warning: version_string.tmp isn't generated properly by version_info.sh script!") -// here is an example of macros value: -#define __TBB_VERSION_STRINGS \ -"TBB: BUILD_HOST\tUnknown\n" \ -"TBB: BUILD_ARCH\tUnknown\n" \ -"TBB: BUILD_OS\t\tUnknown\n" \ -"TBB: BUILD_CL\t\tUnknown\n" \ -"TBB: BUILD_COMPILER\tUnknown\n" \ -"TBB: BUILD_COMMAND\tUnknown\n" -#endif -#ifndef __TBB_DATETIME -#ifdef RC_INVOKED -#define __TBB_DATETIME "Unknown" -#else -#define __TBB_DATETIME __DATE__ __TIME__ -#endif -#endif - -#define __TBB_VERSION_NUMBER "TBB: VERSION\t\t" __TBB_STRING(TBB_VERSION_MAJOR.TBB_VERSION_MINOR) ENDL -#define __TBB_INTERFACE_VERSION_NUMBER "TBB: INTERFACE VERSION\t" __TBB_STRING(TBB_INTERFACE_VERSION) ENDL -#define __TBB_VERSION_DATETIME "TBB: BUILD_DATE\t\t" __TBB_DATETIME ENDL -#ifndef TBB_USE_DEBUG - #define __TBB_VERSION_USE_DEBUG "TBB: TBB_USE_DEBUG\tundefined" ENDL -#elif TBB_USE_DEBUG==0 - #define __TBB_VERSION_USE_DEBUG "TBB: TBB_USE_DEBUG\t0" ENDL -#elif TBB_USE_DEBUG==1 - #define __TBB_VERSION_USE_DEBUG "TBB: TBB_USE_DEBUG\t1" ENDL -#elif TBB_USE_DEBUG==2 - #define __TBB_VERSION_USE_DEBUG "TBB: TBB_USE_DEBUG\t2" ENDL -#else - #error Unexpected value for TBB_USE_DEBUG -#endif -#ifndef TBB_USE_ASSERT - #define __TBB_VERSION_USE_ASSERT "TBB: TBB_USE_ASSERT\tundefined" ENDL -#elif TBB_USE_ASSERT==0 - #define __TBB_VERSION_USE_ASSERT "TBB: TBB_USE_ASSERT\t0" ENDL -#elif TBB_USE_ASSERT==1 - #define __TBB_VERSION_USE_ASSERT "TBB: TBB_USE_ASSERT\t1" ENDL -#elif TBB_USE_ASSERT==2 - #define __TBB_VERSION_USE_ASSERT "TBB: TBB_USE_ASSERT\t2" ENDL -#else - #error Unexpected value for TBB_USE_ASSERT -#endif -#ifndef DO_ITT_NOTIFY - #define __TBB_VERSION_DO_NOTIFY "TBB: DO_ITT_NOTIFY\tundefined" ENDL -#elif DO_ITT_NOTIFY==1 - #define __TBB_VERSION_DO_NOTIFY "TBB: DO_ITT_NOTIFY\t1" ENDL -#elif DO_ITT_NOTIFY==0 - #define __TBB_VERSION_DO_NOTIFY -#else - #error Unexpected value for DO_ITT_NOTIFY -#endif - -#define TBB_VERSION_STRINGS __TBB_VERSION_NUMBER __TBB_INTERFACE_VERSION_NUMBER __TBB_VERSION_DATETIME __TBB_VERSION_STRINGS __TBB_VERSION_USE_DEBUG __TBB_VERSION_USE_ASSERT __TBB_VERSION_DO_NOTIFY - -// numbers -#ifndef __TBB_VERSION_YMD -#define __TBB_VERSION_YMD 0, 0 -#endif - -#define TBB_VERNUMBERS TBB_VERSION_MAJOR, TBB_VERSION_MINOR, __TBB_VERSION_YMD - -#define TBB_VERSION __TBB_STRING(TBB_VERNUMBERS) diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/tls.h b/deal.II/bundled/tbb30_104oss/src/tbb/tls.h deleted file mode 100644 index 4baa84d4ac..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/tls.h +++ /dev/null @@ -1,119 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef _TBB_tls_H -#define _TBB_tls_H - -#if USE_PTHREAD -#include <pthread.h> -#else /* assume USE_WINTHREAD */ -#include "tbb/machine/windows_api.h" -#endif - -namespace tbb { - -namespace internal { - -typedef void (*tls_dtor_t)(void*); - -//! Basic cross-platform wrapper class for TLS operations. -template <typename T> -class basic_tls { -#if USE_PTHREAD - typedef pthread_key_t tls_key_t; -public: - int create( tls_dtor_t dtor = NULL ) { - return pthread_key_create(&my_key, dtor); - } - int destroy() { return pthread_key_delete(my_key); } - void set( T value ) { pthread_setspecific(my_key, (void*)value); } - T get() { return (T)pthread_getspecific(my_key); } -#else /* USE_WINTHREAD */ - typedef DWORD tls_key_t; -public: - int create() { - tls_key_t tmp = TlsAlloc(); - if( tmp==TLS_OUT_OF_INDEXES ) - return TLS_OUT_OF_INDEXES; - my_key = tmp; - return 0; - } - int destroy() { TlsFree(my_key); my_key=0; return 0; } - void set( T value ) { TlsSetValue(my_key, (LPVOID)value); } - T get() { return (T)TlsGetValue(my_key); } -#endif -private: - tls_key_t my_key; -}; - -//! More advanced TLS support template class. -/** It supports RAII and to some extent mimic __declspec(thread) variables. */ -template <typename T> -class tls : public basic_tls<T> { - typedef basic_tls<T> base; -public: - tls() { base::create(); } - ~tls() { base::destroy(); } - T operator=(T value) { base::set(value); return value; } - operator T() { return base::get(); } -}; - -template <typename T> -class tls<T*> : basic_tls<T*> { - typedef basic_tls<T*> base; - static void internal_dtor(void* ptr) { - if (ptr) delete (T*)ptr; - } - T* internal_get() { - T* result = base::get(); - if (!result) { - result = new T; - base::set(result); - } - return result; - } -public: - tls() { -#if USE_PTHREAD - base::create( internal_dtor ); -#else - base::create(); -#endif - } - ~tls() { base::destroy(); } - T* operator=(T* value) { base::set(value); return value; } - operator T*() { return internal_get(); } - T* operator->() { return internal_get(); } - T& operator*() { return *internal_get(); } -}; - -} // namespace internal - -} // namespace tbb - -#endif /* _TBB_tls_H */ diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/tools_api/disable_warnings.h b/deal.II/bundled/tbb30_104oss/src/tbb/tools_api/disable_warnings.h deleted file mode 100644 index 28100e117a..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/tools_api/disable_warnings.h +++ /dev/null @@ -1,47 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#include "ittnotify_config.h" - -#if ITT_PLATFORM==ITT_PLATFORM_WIN - -#pragma warning (disable: 593) /* parameter "XXXX" was set but never used */ -#pragma warning (disable: 344) /* typedef name has already been declared (with same type) */ -#pragma warning (disable: 174) /* expression has no effect */ -#pragma warning (disable: 4127) /* conditional expression is constant */ -#pragma warning (disable: 4306) /* conversion from '?' to '?' of greater size */ - -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ - -#if defined __INTEL_COMPILER - -#pragma warning (disable: 869) /* parameter "XXXXX" was never referenced */ -#pragma warning (disable: 1418) /* external function definition with no prior declaration */ -#pragma warning (disable: 1419) /* external declaration in primary source file */ - -#endif /* __INTEL_COMPILER */ diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/tools_api/internal/ittnotify.h b/deal.II/bundled/tbb30_104oss/src/tbb/tools_api/internal/ittnotify.h deleted file mode 100644 index f037807687..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/tools_api/internal/ittnotify.h +++ /dev/null @@ -1,661 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef _INTERNAL_ITTNOTIFY_H_ -#define _INTERNAL_ITTNOTIFY_H_ -/** - * @file - * @brief Internal User API functions and types - */ - -/** @cond exclude_from_documentation */ -#ifndef ITT_OS_WIN -# define ITT_OS_WIN 1 -#endif /* ITT_OS_WIN */ - -#ifndef ITT_OS_LINUX -# define ITT_OS_LINUX 2 -#endif /* ITT_OS_LINUX */ - -#ifndef ITT_OS_MAC -# define ITT_OS_MAC 3 -#endif /* ITT_OS_MAC */ - -#ifndef ITT_OS -# if defined WIN32 || defined _WIN32 -# define ITT_OS ITT_OS_WIN -# elif defined( __APPLE__ ) && defined( __MACH__ ) -# define ITT_OS ITT_OS_MAC -# else -# define ITT_OS ITT_OS_LINUX -# endif -#endif /* ITT_OS */ - -#ifndef ITT_PLATFORM_WIN -# define ITT_PLATFORM_WIN 1 -#endif /* ITT_PLATFORM_WIN */ - -#ifndef ITT_PLATFORM_POSIX -# define ITT_PLATFORM_POSIX 2 -#endif /* ITT_PLATFORM_POSIX */ - -#ifndef ITT_PLATFORM -# if ITT_OS==ITT_OS_WIN -# define ITT_PLATFORM ITT_PLATFORM_WIN -# else -# define ITT_PLATFORM ITT_PLATFORM_POSIX -# endif /* _WIN32 */ -#endif /* ITT_PLATFORM */ - -#include <stddef.h> -#include <stdarg.h> -#if ITT_PLATFORM==ITT_PLATFORM_WIN -#include <tchar.h> -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ - -#ifndef CDECL -# if ITT_PLATFORM==ITT_PLATFORM_WIN -# define CDECL __cdecl -# else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -# define CDECL /* nothing */ -# endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#endif /* CDECL */ - -#ifndef STDCALL -# if ITT_PLATFORM==ITT_PLATFORM_WIN -# define STDCALL __stdcall -# else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -# define STDCALL /* nothing */ -# endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#endif /* STDCALL */ - -#define ITTAPI CDECL -#define LIBITTAPI /* nothing */ - -#define ITT_JOIN_AUX(p,n) p##n -#define ITT_JOIN(p,n) ITT_JOIN_AUX(p,n) - -#ifndef INTEL_ITTNOTIFY_PREFIX -# define INTEL_ITTNOTIFY_PREFIX __itt_ -#endif /* INTEL_ITTNOTIFY_PREFIX */ -#ifndef INTEL_ITTNOTIFY_POSTFIX -# define INTEL_ITTNOTIFY_POSTFIX _ptr_ -#endif /* INTEL_ITTNOTIFY_POSTFIX */ - -#define ITTNOTIFY_NAME_AUX(n) ITT_JOIN(INTEL_ITTNOTIFY_PREFIX,n) -#define ITTNOTIFY_NAME(n) ITTNOTIFY_NAME_AUX(ITT_JOIN(n,INTEL_ITTNOTIFY_POSTFIX)) - -#define ITTNOTIFY_VOID(n) (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n) -#define ITTNOTIFY_DATA(n) (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n) - -#ifdef ITT_STUB -#undef ITT_STUB -#endif -#ifdef ITT_STUBV -#undef ITT_STUBV -#endif -#define ITT_STUBV(api,type,name,args,params) \ - typedef type (api* ITT_JOIN(ITTNOTIFY_NAME(name),_t)) args; \ - extern ITT_JOIN(ITTNOTIFY_NAME(name),_t) ITTNOTIFY_NAME(name); -#define ITT_STUB ITT_STUBV - -#ifdef __cplusplus -extern "C" { -#endif /* __cplusplus */ -/** @endcond */ - -/** - * @defgroup internal Internal API - * @{ - * @} - */ - -/** - * @defgroup makrs Marks - * @ingroup internal - * Marks group - * @warning Internal API: - * - It is not shipped to outside of Intel - * - It is delivered to internal Intel teams using e-mail or SVN access only - * @{ - */ -/** @brief user mark type */ -typedef int __itt_mark_type; - -/** - * @brief Creates a user mark type with the specified name using char or Unicode string. - * @param[in] name - name of mark to create - * @return Returns a handle to the mark type - */ -#if ITT_PLATFORM==ITT_PLATFORM_WIN -__itt_mark_type ITTAPI __itt_mark_createA(const char *name); -__itt_mark_type ITTAPI __itt_mark_createW(const wchar_t *name); -#ifdef UNICODE -# define __itt_mark_create __itt_mark_createW -# define __itt_mark_create_ptr __itt_mark_createW_ptr -#else /* UNICODE */ -# define __itt_mark_create __itt_mark_createA -# define __itt_mark_create_ptr __itt_mark_createA_ptr -#endif /* UNICODE */ -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -__itt_mark_type ITTAPI __itt_mark_create(const char *name); -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ - -/** @cond exclude_from_documentation */ -#ifndef INTEL_NO_MACRO_BODY -#ifndef INTEL_NO_ITTNOTIFY_API -#if ITT_PLATFORM==ITT_PLATFORM_WIN -ITT_STUB(ITTAPI, __itt_mark_type, mark_createA, (const char *name), (name)) -ITT_STUB(ITTAPI, __itt_mark_type, mark_createW, (const wchar_t *name), (name)) -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -ITT_STUB(ITTAPI, __itt_mark_type, mark_create, (const char *name), (name)) -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#if ITT_PLATFORM==ITT_PLATFORM_WIN -#define __itt_mark_createA ITTNOTIFY_DATA(mark_createA) -#define __itt_mark_createA_ptr ITTNOTIFY_NAME(mark_createA) -#define __itt_mark_createW ITTNOTIFY_DATA(mark_createW) -#define __itt_mark_createW_ptr ITTNOTIFY_NAME(mark_createW) -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#define __itt_mark_create ITTNOTIFY_DATA(mark_create) -#define __itt_mark_create_ptr ITTNOTIFY_NAME(mark_create) -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#else /* INTEL_NO_ITTNOTIFY_API */ -#if ITT_PLATFORM==ITT_PLATFORM_WIN -#define __itt_mark_createA(name) (__itt_mark_type)0 -#define __itt_mark_createA_ptr 0 -#define __itt_mark_createW(name) (__itt_mark_type)0 -#define __itt_mark_createW_ptr 0 -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#define __itt_mark_create(name) (__itt_mark_type)0 -#define __itt_mark_create_ptr 0 -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#endif /* INTEL_NO_ITTNOTIFY_API */ -#else /* INTEL_NO_MACRO_BODY */ -#if ITT_PLATFORM==ITT_PLATFORM_WIN -#define __itt_mark_createA_ptr 0 -#define __itt_mark_createW_ptr 0 -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#define __itt_mark_create_ptr 0 -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ - -/** - * @brief Creates a "discrete" user mark type of the specified type and an optional parameter using char or Unicode string. - * - * - The mark of "discrete" type is placed to collection results in case of success. It appears in overtime view(s) as a special tick sign. - * - The call is "synchronous" - function returns after mark is actually added to results. - * - This function is useful, for example, to mark different phases of application - * (beginning of the next mark automatically meand end of current region). - * - Can be used together with "continuous" marks (see below) at the same collection session - * @param[in] mt - mark, created by __itt_mark_create(const char* name) function - * @param[in] parameter - string parameter of mark - * @return Returns zero value in case of success, non-zero value otherwise. - */ -#if ITT_PLATFORM==ITT_PLATFORM_WIN -int ITTAPI __itt_markA(__itt_mark_type mt, const char *parameter); -int ITTAPI __itt_markW(__itt_mark_type mt, const wchar_t *parameter); -#ifdef UNICODE -# define __itt_mark __itt_markW -# define __itt_mark_ptr __itt_markW_ptr -#else /* UNICODE */ -# define __itt_mark __itt_markA -# define __itt_mark_ptr __itt_markA_ptr -#endif /* UNICODE */ -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -int ITTAPI __itt_mark(__itt_mark_type mt, const char *parameter); -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ - -/** @cond exclude_from_documentation */ -#ifndef INTEL_NO_MACRO_BODY -#ifndef INTEL_NO_ITTNOTIFY_API -#if ITT_PLATFORM==ITT_PLATFORM_WIN -ITT_STUB(ITTAPI, int, markA, (__itt_mark_type mt, const char *parameter), (mt, parameter)) -ITT_STUB(ITTAPI, int, markW, (__itt_mark_type mt, const wchar_t *parameter), (mt, parameter)) -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -ITT_STUB(ITTAPI, int, mark, (__itt_mark_type mt, const char *parameter), (mt, parameter)) -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#if ITT_PLATFORM==ITT_PLATFORM_WIN -#define __itt_markA ITTNOTIFY_DATA(markA) -#define __itt_markA_ptr ITTNOTIFY_NAME(markA) -#define __itt_markW ITTNOTIFY_DATA(markW) -#define __itt_markW_ptr ITTNOTIFY_NAME(markW) -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#define __itt_mark ITTNOTIFY_DATA(mark) -#define __itt_mark_ptr ITTNOTIFY_NAME(mark) -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#else /* INTEL_NO_ITTNOTIFY_API */ -#if ITT_PLATFORM==ITT_PLATFORM_WIN -#define __itt_markA(mt, parameter) (int)0 -#define __itt_markA_ptr 0 -#define __itt_markW(mt, parameter) (int)0 -#define __itt_markW_ptr 0 -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#define __itt_mark(mt, parameter) (int)0 -#define __itt_mark_ptr 0 -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#endif /* INTEL_NO_ITTNOTIFY_API */ -#else /* INTEL_NO_MACRO_BODY */ -#if ITT_PLATFORM==ITT_PLATFORM_WIN -#define __itt_markA_ptr 0 -#define __itt_markW_ptr 0 -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#define __itt_mark_ptr 0 -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ - -/** - * @brief Use this if necessary to create a "discrete" user event type (mark) for process - * rather then for one thread - * @see int __itt_mark(__itt_mark_type mt, const char* parameter); - */ -#if ITT_PLATFORM==ITT_PLATFORM_WIN -int ITTAPI __itt_mark_globalA(__itt_mark_type mt, const char *parameter); -int ITTAPI __itt_mark_globalW(__itt_mark_type mt, const wchar_t *parameter); -#ifdef UNICODE -# define __itt_mark_global __itt_mark_globalW -# define __itt_mark_global_ptr __itt_mark_globalW_ptr -#else /* UNICODE */ -# define __itt_mark_global __itt_mark_globalA -# define __itt_mark_global_ptr __itt_mark_globalA_ptr -#endif /* UNICODE */ -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -int ITTAPI __itt_mark_global(__itt_mark_type mt, const char *parameter); -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ - -/** @cond exclude_from_documentation */ -#ifndef INTEL_NO_MACRO_BODY -#ifndef INTEL_NO_ITTNOTIFY_API -#if ITT_PLATFORM==ITT_PLATFORM_WIN -ITT_STUB(ITTAPI, int, mark_globalA, (__itt_mark_type mt, const char *parameter), (mt, parameter)) -ITT_STUB(ITTAPI, int, mark_globalW, (__itt_mark_type mt, const wchar_t *parameter), (mt, parameter)) -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -ITT_STUB(ITTAPI, int, mark_global, (__itt_mark_type mt, const char *parameter), (mt, parameter)) -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#if ITT_PLATFORM==ITT_PLATFORM_WIN -#define __itt_mark_globalA ITTNOTIFY_DATA(mark_globalA) -#define __itt_mark_globalA_ptr ITTNOTIFY_NAME(mark_globalA) -#define __itt_mark_globalW ITTNOTIFY_DATA(mark_globalW) -#define __itt_mark_globalW_ptr ITTNOTIFY_NAME(mark_globalW) -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#define __itt_mark_global ITTNOTIFY_DATA(mark_global) -#define __itt_mark_global_ptr ITTNOTIFY_NAME(mark_global) -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#else /* INTEL_NO_ITTNOTIFY_API */ -#if ITT_PLATFORM==ITT_PLATFORM_WIN -#define __itt_mark_globalA(mt, parameter) (int)0 -#define __itt_mark_globalA_ptr 0 -#define __itt_mark_globalW(mt, parameter) (int)0 -#define __itt_mark_globalW_ptr 0 -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#define __itt_mark_global(mt, parameter) (int)0 -#define __itt_mark_global_ptr 0 -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#endif /* INTEL_NO_ITTNOTIFY_API */ -#else /* INTEL_NO_MACRO_BODY */ -#if ITT_PLATFORM==ITT_PLATFORM_WIN -#define __itt_mark_globalA_ptr 0 -#define __itt_mark_globalW_ptr 0 -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#define __itt_mark_global_ptr 0 -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ - -/** - * @brief Creates an "end" point for "continuous" mark with specified name. - * - * - Returns zero value in case of success, non-zero value otherwise. - * Also returns non-zero value when preceding "begin" point for the - * mark with the same name failed to be created or not created. - * - The mark of "continuous" type is placed to collection results in - * case of success. It appears in overtime view(s) as a special tick - * sign (different from "discrete" mark) together with line from - * corresponding "begin" mark to "end" mark. - * @note Continuous marks can overlap and be nested inside each other. - * Discrete mark can be nested inside marked region - * @param[in] mt - mark, created by __itt_mark_create(const char* name) function - * @return Returns zero value in case of success, non-zero value otherwise. - */ -int ITTAPI __itt_mark_off(__itt_mark_type mt); - -/** @cond exclude_from_documentation */ -#ifndef INTEL_NO_MACRO_BODY -#ifndef INTEL_NO_ITTNOTIFY_API -ITT_STUB(ITTAPI, int, mark_off, (__itt_mark_type mt), (mt)) -#define __itt_mark_off ITTNOTIFY_DATA(mark_off) -#define __itt_mark_off_ptr ITTNOTIFY_NAME(mark_off) -#else /* INTEL_NO_ITTNOTIFY_API */ -#define __itt_mark_off(mt) (int)0 -#define __itt_mark_off_ptr 0 -#endif /* INTEL_NO_ITTNOTIFY_API */ -#else /* INTEL_NO_MACRO_BODY */ -#define __itt_mark_off_ptr 0 -#endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ - -/** - * @brief Use this if necessary to create an "end" point for mark of process - * @see int __itt_mark_off(__itt_mark_type mt); - */ -int ITTAPI __itt_mark_global_off(__itt_mark_type mt); - -/** @cond exclude_from_documentation */ -#ifndef INTEL_NO_MACRO_BODY -#ifndef INTEL_NO_ITTNOTIFY_API -ITT_STUB(ITTAPI, int, mark_global_off, (__itt_mark_type mt), (mt)) -#define __itt_mark_global_off ITTNOTIFY_DATA(mark_global_off) -#define __itt_mark_global_off_ptr ITTNOTIFY_NAME(mark_global_off) -#else /* INTEL_NO_ITTNOTIFY_API */ -#define __itt_mark_global_off(mt) (int)0 -#define __itt_mark_global_off_ptr 0 -#endif /* INTEL_NO_ITTNOTIFY_API */ -#else /* INTEL_NO_MACRO_BODY */ -#define __itt_mark_global_off_ptr 0 -#endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ -/** @} marks group */ - -/** - * @defgroup counters Counters - * @ingroup internal - * Counters group - * @{ - */ -/** - * @brief opaque structure for counter identification - */ -typedef struct ___itt_counter *__itt_counter; - -/** - * @brief Create a counter with given name/domain for the calling thread - * - * After __itt_counter_create() is called, __itt_counter_inc() / __itt_counter_inc_delta() can be used - * to increment the counter on any thread - */ -#if ITT_PLATFORM==ITT_PLATFORM_WIN -__itt_counter ITTAPI __itt_counter_createA(const char *name, const char *domain); -__itt_counter ITTAPI __itt_counter_createW(const wchar_t *name, const wchar_t *domain); -#ifdef UNICODE -# define __itt_counter_create __itt_counter_createW -# define __itt_counter_create_ptr __itt_counter_createW_ptr -#else /* UNICODE */ -# define __itt_counter_create __itt_counter_createA -# define __itt_counter_create_ptr __itt_counter_createA_ptr -#endif /* UNICODE */ -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -__itt_counter ITTAPI __itt_counter_create(const char *name, const char *domain); -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ - -/** @cond exclude_from_documentation */ -#ifndef INTEL_NO_MACRO_BODY -#ifndef INTEL_NO_ITTNOTIFY_API -#if ITT_PLATFORM==ITT_PLATFORM_WIN -ITT_STUB(ITTAPI, __itt_counter, counter_createA, (const char *name, const char *domain), (name, domain)) -ITT_STUB(ITTAPI, __itt_counter, counter_createW, (const wchar_t *name, const wchar_t *domain), (name, domain)) -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -ITT_STUB(ITTAPI, __itt_counter, counter_create, (const char *name, const char *domain), (name, domain)) -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#if ITT_PLATFORM==ITT_PLATFORM_WIN -#define __itt_counter_createA ITTNOTIFY_DATA(counter_createA) -#define __itt_counter_createA_ptr ITTNOTIFY_NAME(counter_createA) -#define __itt_counter_createW ITTNOTIFY_DATA(counter_createW) -#define __itt_counter_createW_ptr ITTNOTIFY_NAME(counter_createW) -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#define __itt_counter_create ITTNOTIFY_DATA(counter_create) -#define __itt_counter_create_ptr ITTNOTIFY_NAME(counter_create) -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#else /* INTEL_NO_ITTNOTIFY_API */ -#if ITT_PLATFORM==ITT_PLATFORM_WIN -#define __itt_counter_createA(name, domain) -#define __itt_counter_createA_ptr 0 -#define __itt_counter_createW(name, domain) -#define __itt_counter_createW_ptr 0 -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#define __itt_counter_create(name, domain) -#define __itt_counter_create_ptr 0 -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#endif /* INTEL_NO_ITTNOTIFY_API */ -#else /* INTEL_NO_MACRO_BODY */ -#if ITT_PLATFORM==ITT_PLATFORM_WIN -#define __itt_counter_createA_ptr 0 -#define __itt_counter_createW_ptr 0 -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#define __itt_counter_create_ptr 0 -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ - -/** - * @brief Destroy the counter identified by the pointer previously returned by __itt_counter_create() - */ -void ITTAPI __itt_counter_destroy(__itt_counter id); - -/** @cond exclude_from_documentation */ -#ifndef INTEL_NO_MACRO_BODY -#ifndef INTEL_NO_ITTNOTIFY_API -ITT_STUBV(ITTAPI, void, counter_destroy, (__itt_counter id), (id)) -#define __itt_counter_destroy ITTNOTIFY_VOID(counter_destroy) -#define __itt_counter_destroy_ptr ITTNOTIFY_NAME(counter_destroy) -#else /* INTEL_NO_ITTNOTIFY_API */ -#define __itt_counter_destroy(id) -#define __itt_counter_destroy_ptr 0 -#endif /* INTEL_NO_ITTNOTIFY_API */ -#else /* INTEL_NO_MACRO_BODY */ -#define __itt_counter_destroy_ptr 0 -#endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ - -/** - * @brief Increment the counter value - */ -void ITTAPI __itt_counter_inc(__itt_counter id); - -/** @cond exclude_from_documentation */ -#ifndef INTEL_NO_MACRO_BODY -#ifndef INTEL_NO_ITTNOTIFY_API -ITT_STUBV(ITTAPI, void, counter_inc, (__itt_counter id), (id)) -#define __itt_counter_inc ITTNOTIFY_VOID(counter_inc) -#define __itt_counter_inc_ptr ITTNOTIFY_NAME(counter_inc) -#else /* INTEL_NO_ITTNOTIFY_API */ -#define __itt_counter_inc(id) -#define __itt_counter_inc_ptr 0 -#endif /* INTEL_NO_ITTNOTIFY_API */ -#else /* INTEL_NO_MACRO_BODY */ -#define __itt_counter_inc_ptr 0 -#endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ - -/** - * @brief Increment the counter value with x - */ -void ITTAPI __itt_counter_inc_delta(__itt_counter id, unsigned long long value); - -/** @cond exclude_from_documentation */ -#ifndef INTEL_NO_MACRO_BODY -#ifndef INTEL_NO_ITTNOTIFY_API -ITT_STUBV(ITTAPI, void, counter_inc_delta, (__itt_counter id, unsigned long long value), (id, value)) -#define __itt_counter_inc_delta ITTNOTIFY_VOID(counter_inc_delta) -#define __itt_counter_inc_delta_ptr ITTNOTIFY_NAME(counter_inc_delta) -#else /* INTEL_NO_ITTNOTIFY_API */ -#define __itt_counter_inc_delta(id, value) -#define __itt_counter_inc_delta_ptr 0 -#endif /* INTEL_NO_ITTNOTIFY_API */ -#else /* INTEL_NO_MACRO_BODY */ -#define __itt_counter_inc_delta_ptr 0 -#endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ -/** @} counters group */ - -/** - * @defgroup stitch Stack Stitching - * @ingroup internal - * Stack Stitching group - * @{ - */ -/** - * @brief opaque structure for counter identification - */ -typedef struct ___itt_caller *__itt_caller; - -/** - * @brief Create the stitch point e.g. a point in call stack where other stacks should be stitched to. - * The function returns a unique identifier which is used to match the cut points with corresponding stitch points. - */ -__itt_caller ITTAPI __itt_stack_caller_create(void); - -/** @cond exclude_from_documentation */ -#ifndef INTEL_NO_MACRO_BODY -#ifndef INTEL_NO_ITTNOTIFY_API -ITT_STUB(ITTAPI, __itt_caller, stack_caller_create, (void), ()) -#define __itt_stack_caller_create ITTNOTIFY_DATA(stack_caller_create) -#define __itt_stack_caller_create_ptr ITTNOTIFY_NAME(stack_caller_create) -#else /* INTEL_NO_ITTNOTIFY_API */ -#define __itt_stack_caller_create() (__itt_caller)0 -#define __itt_stack_caller_create_ptr 0 -#endif /* INTEL_NO_ITTNOTIFY_API */ -#else /* INTEL_NO_MACRO_BODY */ -#define __itt_stack_caller_create_ptr 0 -#endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ - -/** - * @brief Destroy the inforamtion about stitch point identified by the pointer previously returned by __itt_stack_caller_create() - */ -void ITTAPI __itt_stack_caller_destroy(__itt_caller id); - -/** @cond exclude_from_documentation */ -#ifndef INTEL_NO_MACRO_BODY -#ifndef INTEL_NO_ITTNOTIFY_API -ITT_STUBV(ITTAPI, void, stack_caller_destroy, (__itt_caller id), (id)) -#define __itt_stack_caller_destroy ITTNOTIFY_VOID(stack_caller_destroy) -#define __itt_stack_caller_destroy_ptr ITTNOTIFY_NAME(stack_caller_destroy) -#else /* INTEL_NO_ITTNOTIFY_API */ -#define __itt_stack_caller_destroy(id) -#define __itt_stack_caller_destroy_ptr 0 -#endif /* INTEL_NO_ITTNOTIFY_API */ -#else /* INTEL_NO_MACRO_BODY */ -#define __itt_stack_caller_destroy_ptr 0 -#endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ - -/** - * @brief Sets the cut point. Stack from each event which occurs after this call will be cut - * at the same stack level the function was called and stitched to the corresponding stitch point. - */ -void ITTAPI __itt_stack_callee_enter(__itt_caller id); - -/** @cond exclude_from_documentation */ -#ifndef INTEL_NO_MACRO_BODY -#ifndef INTEL_NO_ITTNOTIFY_API -ITT_STUBV(ITTAPI, void, stack_callee_enter, (__itt_caller id), (id)) -#define __itt_stack_callee_enter ITTNOTIFY_VOID(stack_callee_enter) -#define __itt_stack_callee_enter_ptr ITTNOTIFY_NAME(stack_callee_enter) -#else /* INTEL_NO_ITTNOTIFY_API */ -#define __itt_stack_callee_enter(id) -#define __itt_stack_callee_enter_ptr 0 -#endif /* INTEL_NO_ITTNOTIFY_API */ -#else /* INTEL_NO_MACRO_BODY */ -#define __itt_stack_callee_enter_ptr 0 -#endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ - -/** - * @brief This function eliminates the cut point which was set by latest __itt_stack_callee_enter(). - */ -void ITTAPI __itt_stack_callee_leave(__itt_caller id); - -/** @cond exclude_from_documentation */ -#ifndef INTEL_NO_MACRO_BODY -#ifndef INTEL_NO_ITTNOTIFY_API -ITT_STUBV(ITTAPI, void, stack_callee_leave, (__itt_caller id), (id)) -#define __itt_stack_callee_leave ITTNOTIFY_VOID(stack_callee_leave) -#define __itt_stack_callee_leave_ptr ITTNOTIFY_NAME(stack_callee_leave) -#else /* INTEL_NO_ITTNOTIFY_API */ -#define __itt_stack_callee_leave(id) -#define __itt_stack_callee_leave_ptr 0 -#endif /* INTEL_NO_ITTNOTIFY_API */ -#else /* INTEL_NO_MACRO_BODY */ -#define __itt_stack_callee_leave_ptr 0 -#endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ - -/** @} stitch group */ - -/* ***************************************************************************************************************************** */ - -/** @cond exclude_from_documentation */ -typedef enum __itt_error_code { - __itt_error_success = 0, /*!< no error */ - __itt_error_no_module = 1, /*!< module can't be loaded */ - /* %1$s -- library name; win: %2$d -- system error code; unx: %2$s -- system error message. */ - __itt_error_no_symbol = 2, /*!< symbol not found */ - /* %1$s -- library name, %2$s -- symbol name. */ - __itt_error_unknown_group = 3, /*!< unknown group specified */ - /* %1$s -- env var name, %2$s -- group name. */ - __itt_error_cant_read_env = 4, /*!< GetEnvironmentVariable() failed */ - /* %1$s -- env var name, %2$d -- system error. */ - __itt_error_env_too_long = 5, /*!< variable value too long */ - /* %1$s -- env var name, %2$d -- actual length of the var, %3$d -- max allowed length. */ - __itt_error_system = 6 /*!< pthread_mutexattr_init or pthread_mutex_init failed */ - /* %1$s -- function name, %2$d -- errno. */ -} __itt_error_code; - -typedef void (__itt_error_notification_t)(__itt_error_code code, va_list); -__itt_error_notification_t* __itt_set_error_handler(__itt_error_notification_t*); - -const char* ITTAPI __itt_api_version(void); -/** @endcond */ - -/** @cond exclude_from_documentation */ -#ifndef INTEL_NO_MACRO_BODY -#ifndef INTEL_NO_ITTNOTIFY_API -#define __itt_error_handler ITT_JOIN(INTEL_ITTNOTIFY_PREFIX, error_handler) -void __itt_error_handler(__itt_error_code code, va_list args); -extern const int ITTNOTIFY_NAME(err); -#define __itt_err ITTNOTIFY_NAME(err) -ITT_STUB(ITTAPI, const char*, api_version, (void), ()) -#define __itt_api_version ITTNOTIFY_DATA(api_version) -#define __itt_api_version_ptr ITTNOTIFY_NAME(api_version) -#else /* INTEL_NO_ITTNOTIFY_API */ -#define __itt_api_version() (const char*)0 -#define __itt_api_version_ptr 0 -#endif /* INTEL_NO_ITTNOTIFY_API */ -#else /* INTEL_NO_MACRO_BODY */ -#define __itt_api_version_ptr 0 -#endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ - -/** @cond exclude_from_documentation */ -#ifdef __cplusplus -} -#endif /* __cplusplus */ -/** @endcond */ - -#endif /* _INTERNAL_ITTNOTIFY_H_ */ diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/tools_api/ittnotify.h b/deal.II/bundled/tbb30_104oss/src/tbb/tools_api/ittnotify.h deleted file mode 100644 index 254f7d39a4..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/tools_api/ittnotify.h +++ /dev/null @@ -1,1409 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef _ITTNOTIFY_H_ -#define _ITTNOTIFY_H_ -/** - * @file - * @brief Public User API functions and types - * @mainpage - * Ability to control the collection during runtime. User API can be inserted into the user application. - * Commands include: - * - Collection control - * - Marking - * - Thread manipulation - * - User-defined synchronization primitives - * - * The User API provides ability to control the collection, set marks at the execution of specific user code and - * specify custom synchronization primitives implemented without standard system APIs. - * - * Use case: User inserts API calls to the desired places in her code. The code is then compiled and - * linked with static part of User API library. User can recompile the code with specific macro defined - * to enable API calls. If this macro is not defined there is no run-time overhead and no need to link - * with static part of User API library. During runtime the static library loads and initializes the dynamic part. - * In case of instrumentation-based collection, only a stub library is loaded; otherwise a proxy library is loaded, - * which calls the collector. - * - * User API set is native (C/C++) only (no MRTE support). As a mitigation can use JNI or C/C++ function - * call from managed code where needed. If the collector causes significant overhead or data storage, then - * pausing analysis should reduce the overhead to minimal levels. - */ - -/** @cond exclude_from_documentation */ -#ifndef ITT_OS_WIN -# define ITT_OS_WIN 1 -#endif /* ITT_OS_WIN */ - -#ifndef ITT_OS_LINUX -# define ITT_OS_LINUX 2 -#endif /* ITT_OS_LINUX */ - -#ifndef ITT_OS_MAC -# define ITT_OS_MAC 3 -#endif /* ITT_OS_MAC */ - -#ifndef ITT_OS -# if defined WIN32 || defined _WIN32 -# define ITT_OS ITT_OS_WIN -# elif defined( __APPLE__ ) && defined( __MACH__ ) -# define ITT_OS ITT_OS_MAC -# else -# define ITT_OS ITT_OS_LINUX -# endif -#endif /* ITT_OS */ - -#ifndef ITT_PLATFORM_WIN -# define ITT_PLATFORM_WIN 1 -#endif /* ITT_PLATFORM_WIN */ - -#ifndef ITT_PLATFORM_POSIX -# define ITT_PLATFORM_POSIX 2 -#endif /* ITT_PLATFORM_POSIX */ - -#ifndef ITT_PLATFORM -# if ITT_OS==ITT_OS_WIN -# define ITT_PLATFORM ITT_PLATFORM_WIN -# else -# define ITT_PLATFORM ITT_PLATFORM_POSIX -# endif /* _WIN32 */ -#endif /* ITT_PLATFORM */ - -#include <stddef.h> -#include <stdarg.h> -#if ITT_PLATFORM==ITT_PLATFORM_WIN -#include <tchar.h> -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ - -#ifndef CDECL -# if ITT_PLATFORM==ITT_PLATFORM_WIN -# define CDECL __cdecl -# else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -# define CDECL /* nothing */ -# endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#endif /* CDECL */ - -#ifndef STDCALL -# if ITT_PLATFORM==ITT_PLATFORM_WIN -# define STDCALL __stdcall -# else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -# define STDCALL /* nothing */ -# endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#endif /* STDCALL */ - -#define ITTAPI CDECL -#define LIBITTAPI /* nothing */ - -#ifdef INTEL_ITTNOTIFY_ENABLE_LEGACY -# if ITT_PLATFORM==ITT_PLATFORM_WIN -# pragma message("WARNING!!! Deprecated API is used. Please undefine INTEL_ITTNOTIFY_ENABLE_LEGACY macro") -# else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -// #warning usage leads to ICC's compilation error -// # warning "Deprecated API is used. Please undefine INTEL_ITTNOTIFY_ENABLE_LEGACY macro" -# endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -# include "legacy/ittnotify.h" -#endif /* INTEL_ITTNOTIFY_ENABLE_LEGACY */ - -#define ITT_JOIN_AUX(p,n) p##n -#define ITT_JOIN(p,n) ITT_JOIN_AUX(p,n) - -#ifndef INTEL_ITTNOTIFY_PREFIX -# define INTEL_ITTNOTIFY_PREFIX __itt_ -#endif /* INTEL_ITTNOTIFY_PREFIX */ -#ifndef INTEL_ITTNOTIFY_POSTFIX -# define INTEL_ITTNOTIFY_POSTFIX _ptr_ -#endif /* INTEL_ITTNOTIFY_POSTFIX */ - -#define ITTNOTIFY_NAME_AUX(n) ITT_JOIN(INTEL_ITTNOTIFY_PREFIX,n) -#define ITTNOTIFY_NAME(n) ITTNOTIFY_NAME_AUX(ITT_JOIN(n,INTEL_ITTNOTIFY_POSTFIX)) - -#define ITTNOTIFY_VOID(n) (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n) -#define ITTNOTIFY_DATA(n) (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n) - -#ifdef ITT_STUB -#undef ITT_STUB -#endif -#ifdef ITT_STUBV -#undef ITT_STUBV -#endif -#define ITT_STUBV(api,type,name,args,params) \ - typedef type (api* ITT_JOIN(ITTNOTIFY_NAME(name),_t)) args; \ - extern ITT_JOIN(ITTNOTIFY_NAME(name),_t) ITTNOTIFY_NAME(name); -#define ITT_STUB ITT_STUBV - -#ifdef __cplusplus -extern "C" { -#endif /* __cplusplus */ -/** @endcond */ - -/** - * @defgroup public Public API - * @{ - * @} - */ - -/** - * @defgroup control Collection Control - * @ingroup public - * General behavior: application continues to run, but no profiling information is being collected - * - * Pausing occurs not only for the current thread but for all process as well as spawned processes - * - Intel(R) Parallel Inspector: - * - Does not analyze or report errors that involve memory access. - * - Other errors are reported as usual. Pausing data collection in - * Intel(R) Parallel Inspector only pauses tracing and analyzing - * memory access. It does not pause tracing or analyzing threading APIs. - * . - * - Intel(R) Parallel Amplifier: - * - Does continue to record when new threads are started. - * . - * - Other effects: - * - Possible reduction of runtime overhead. - * . - * @{ - */ -/** @brief Pause collection */ -void ITTAPI __itt_pause(void); -/** @brief Resume collection */ -void ITTAPI __itt_resume(void); - -/** @cond exclude_from_documentation */ -#ifndef INTEL_NO_MACRO_BODY -#ifndef INTEL_NO_ITTNOTIFY_API -ITT_STUBV(ITTAPI, void, pause, (void), ()) -ITT_STUBV(ITTAPI, void, resume, (void), ()) -#define __itt_pause ITTNOTIFY_VOID(pause) -#define __itt_pause_ptr ITTNOTIFY_NAME(pause) -#define __itt_resume ITTNOTIFY_VOID(resume) -#define __itt_resume_ptr ITTNOTIFY_NAME(resume) -#else /* INTEL_NO_ITTNOTIFY_API */ -#define __itt_pause() -#define __itt_pause_ptr 0 -#define __itt_resume() -#define __itt_resume_ptr 0 -#endif /* INTEL_NO_ITTNOTIFY_API */ -#else /* INTEL_NO_MACRO_BODY */ -#define __itt_pause_ptr 0 -#define __itt_resume_ptr 0 -#endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ -/** @} control group */ - -/** - * @defgroup threads Threads - * @ingroup public - * Threads name group - * @{ - */ -/** - * @brief Sets thread name using char or Unicode string - * @param[in] name - name of thread - */ -#if ITT_PLATFORM==ITT_PLATFORM_WIN -void ITTAPI __itt_thread_set_nameA(const char *name); -void ITTAPI __itt_thread_set_nameW(const wchar_t *name); -#ifdef UNICODE -# define __itt_thread_set_name __itt_thread_set_nameW -# define __itt_thread_set_name_ptr __itt_thread_set_nameW_ptr -#else /* UNICODE */ -# define __itt_thread_set_name __itt_thread_set_nameA -# define __itt_thread_set_name_ptr __itt_thread_set_nameA_ptr -#endif /* UNICODE */ -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -void ITTAPI __itt_thread_set_name(const char *name); -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ - -/** @cond exclude_from_documentation */ -#ifndef INTEL_NO_MACRO_BODY -#ifndef INTEL_NO_ITTNOTIFY_API -#if ITT_PLATFORM==ITT_PLATFORM_WIN -ITT_STUBV(ITTAPI, void, thread_set_nameA, (const char *name), (name)) -ITT_STUBV(ITTAPI, void, thread_set_nameW, (const wchar_t *name), (name)) -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -ITT_STUBV(ITTAPI, void, thread_set_name, (const char *name), (name)) -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#if ITT_PLATFORM==ITT_PLATFORM_WIN -#define __itt_thread_set_nameA ITTNOTIFY_VOID(thread_set_nameA) -#define __itt_thread_set_nameA_ptr ITTNOTIFY_NAME(thread_set_nameA) -#define __itt_thread_set_nameW ITTNOTIFY_VOID(thread_set_nameW) -#define __itt_thread_set_nameW_ptr ITTNOTIFY_NAME(thread_set_nameW) -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#define __itt_thread_set_name ITTNOTIFY_VOID(thread_set_name) -#define __itt_thread_set_name_ptr ITTNOTIFY_NAME(thread_set_name) -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#else /* INTEL_NO_ITTNOTIFY_API */ -#if ITT_PLATFORM==ITT_PLATFORM_WIN -#define __itt_thread_set_nameA(name) -#define __itt_thread_set_nameA_ptr 0 -#define __itt_thread_set_nameW(name) -#define __itt_thread_set_nameW_ptr 0 -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#define __itt_thread_set_name(name) -#define __itt_thread_set_name_ptr 0 -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#endif /* INTEL_NO_ITTNOTIFY_API */ -#else /* INTEL_NO_MACRO_BODY */ -#if ITT_PLATFORM==ITT_PLATFORM_WIN -#define __itt_thread_set_nameA_ptr 0 -#define __itt_thread_set_nameW_ptr 0 -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#define __itt_thread_set_name_ptr 0 -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ - -/** - * @brief Mark current thread as ignored from this point on, for the duration of its existence. - */ -void ITTAPI __itt_thread_ignore(void); - -/** @cond exclude_from_documentation */ -#ifndef INTEL_NO_MACRO_BODY -#ifndef INTEL_NO_ITTNOTIFY_API -ITT_STUBV(ITTAPI, void, thread_ignore, (void), ()) -#define __itt_thread_ignore ITTNOTIFY_VOID(thread_ignore) -#define __itt_thread_ignore_ptr ITTNOTIFY_NAME(thread_ignore) -#else /* INTEL_NO_ITTNOTIFY_API */ -#define __itt_thread_ignore() -#define __itt_thread_ignore_ptr 0 -#endif /* INTEL_NO_ITTNOTIFY_API */ -#else /* INTEL_NO_MACRO_BODY */ -#define __itt_thread_ignore_ptr 0 -#endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ -/** @} threads group */ - -/** - * @defgroup sync Synchronization - * @ingroup public - * Synchronization group - * @{ - */ -/** - * @hideinitializer - * @brief possible value of attribute argument for sync object type - */ -#define __itt_attr_barrier 1 - -/** - * @hideinitializer - * @brief possible value of attribute argument for sync object type - */ -#define __itt_attr_mutex 2 - -/** - * @brief Register the creation of a sync object using char or Unicode string - * @param[in] addr - pointer to the sync object. You should use a real pointer to your object - * to make sure that the values don't clash with other object addresses - * @param[in] objtype - null-terminated object type string. If NULL is passed, the object will - * be assumed to be of generic "User Synchronization" type - * @param[in] objname - null-terminated object name string. If NULL, no name will be assigned - * to the object -- you can use the __itt_sync_rename call later to assign - * the name - * @param[in] attribute - one of [#__itt_attr_barrier, #__itt_attr_mutex] values which defines the - * exact semantics of how prepare/acquired/releasing calls work. - */ -#if ITT_PLATFORM==ITT_PLATFORM_WIN -void ITTAPI __itt_sync_createA(void *addr, const char *objtype, const char *objname, int attribute); -void ITTAPI __itt_sync_createW(void *addr, const wchar_t *objtype, const wchar_t *objname, int attribute); -#ifdef UNICODE -# define __itt_sync_create __itt_sync_createW -# define __itt_sync_create_ptr __itt_sync_createW_ptr -#else /* UNICODE */ -# define __itt_sync_create __itt_sync_createA -# define __itt_sync_create_ptr __itt_sync_createA_ptr -#endif /* UNICODE */ -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -void ITTAPI __itt_sync_create (void *addr, const char *objtype, const char *objname, int attribute); -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ - -/** @cond exclude_from_documentation */ -#ifndef INTEL_NO_MACRO_BODY -#ifndef INTEL_NO_ITTNOTIFY_API -#if ITT_PLATFORM==ITT_PLATFORM_WIN -ITT_STUBV(ITTAPI, void, sync_createA, (void *addr, const char *objtype, const char *objname, int attribute), (addr, objtype, objname, attribute)) -ITT_STUBV(ITTAPI, void, sync_createW, (void *addr, const wchar_t *objtype, const wchar_t *objname, int attribute), (addr, objtype, objname, attribute)) -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -ITT_STUBV(ITTAPI, void, sync_create, (void *addr, const char* objtype, const char* objname, int attribute), (addr, objtype, objname, attribute)) -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#if ITT_PLATFORM==ITT_PLATFORM_WIN -#define __itt_sync_createA ITTNOTIFY_VOID(sync_createA) -#define __itt_sync_createA_ptr ITTNOTIFY_NAME(sync_createA) -#define __itt_sync_createW ITTNOTIFY_VOID(sync_createW) -#define __itt_sync_createW_ptr ITTNOTIFY_NAME(sync_createW) -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#define __itt_sync_create ITTNOTIFY_VOID(sync_create) -#define __itt_sync_create_ptr ITTNOTIFY_NAME(sync_create) -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#else /* INTEL_NO_ITTNOTIFY_API */ -#if ITT_PLATFORM==ITT_PLATFORM_WIN -#define __itt_sync_createA(addr, objtype, objname, attribute) -#define __itt_sync_createA_ptr 0 -#define __itt_sync_createW(addr, objtype, objname, attribute) -#define __itt_sync_createW_ptr 0 -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#define __itt_sync_create(addr, objtype, objname, attribute) -#define __itt_sync_create_ptr 0 -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#endif /* INTEL_NO_ITTNOTIFY_API */ -#else /* INTEL_NO_MACRO_BODY */ -#if ITT_PLATFORM==ITT_PLATFORM_WIN -#define __itt_sync_createA_ptr 0 -#define __itt_sync_createW_ptr 0 -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#define __itt_sync_create_ptr 0 -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ - -/** - * @brief Assign a name to a sync object using char or Unicode string. - * - * Sometimes you cannot assign the name to a sync object in the __itt_sync_set_name() call because it - * is not yet known there. In this case you should use the rename call which allows to assign the - * name after the creation has been registered. The renaming can be done multiple times. All waits - * after a new name has been assigned will be attributed to the sync object with this name. - * @param[in] addr - pointer to the sync object - * @param[in] name - null-terminated object name string - */ -#if ITT_PLATFORM==ITT_PLATFORM_WIN -void ITTAPI __itt_sync_renameA(void *addr, const char *name); -void ITTAPI __itt_sync_renameW(void *addr, const wchar_t *name); -#ifdef UNICODE -# define __itt_sync_rename __itt_sync_renameW -# define __itt_sync_rename_ptr __itt_sync_renameW_ptr -#else /* UNICODE */ -# define __itt_sync_rename __itt_sync_renameA -# define __itt_sync_rename_ptr __itt_sync_renameA_ptr -#endif /* UNICODE */ -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -void ITTAPI __itt_sync_rename(void *addr, const char *name); -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ - -/** @cond exclude_from_documentation */ -#ifndef INTEL_NO_MACRO_BODY -#ifndef INTEL_NO_ITTNOTIFY_API -#if ITT_PLATFORM==ITT_PLATFORM_WIN -ITT_STUBV(ITTAPI, void, sync_renameA, (void *addr, const char *name), (addr, name)) -ITT_STUBV(ITTAPI, void, sync_renameW, (void *addr, const wchar_t *name), (addr, name)) -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -ITT_STUBV(ITTAPI, void, sync_rename, (void *addr, const char *name), (addr, name)) -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#if ITT_PLATFORM==ITT_PLATFORM_WIN -#define __itt_sync_renameA ITTNOTIFY_VOID(sync_renameA) -#define __itt_sync_renameA_ptr ITTNOTIFY_NAME(sync_renameA) -#define __itt_sync_renameW ITTNOTIFY_VOID(sync_renameW) -#define __itt_sync_renameW_ptr ITTNOTIFY_NAME(sync_renameW) -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#define __itt_sync_rename ITTNOTIFY_VOID(sync_rename) -#define __itt_sync_rename_ptr ITTNOTIFY_NAME(sync_rename) -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#else /* INTEL_NO_ITTNOTIFY_API */ -#if ITT_PLATFORM==ITT_PLATFORM_WIN -#define __itt_sync_renameA(addr, name) -#define __itt_sync_renameA_ptr 0 -#define __itt_sync_renameW(addr, name) -#define __itt_sync_renameW_ptr 0 -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#define __itt_sync_rename(addr, name) -#define __itt_sync_rename_ptr 0 -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#endif /* INTEL_NO_ITTNOTIFY_API */ -#else /* INTEL_NO_MACRO_BODY */ -#if ITT_PLATFORM==ITT_PLATFORM_WIN -#define __itt_sync_renameA_ptr 0 -#define __itt_sync_renameW_ptr 0 -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#define __itt_sync_rename_ptr 0 -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ - -/** - * @brief Is called when sync object is destroyed (needed to track lifetime of objects) - */ -void ITTAPI __itt_sync_destroy(void *addr); - -/** @cond exclude_from_documentation */ -#ifndef INTEL_NO_MACRO_BODY -#ifndef INTEL_NO_ITTNOTIFY_API -ITT_STUBV(ITTAPI, void, sync_destroy, (void *addr), (addr)) -#define __itt_sync_destroy ITTNOTIFY_VOID(sync_destroy) -#define __itt_sync_destroy_ptr ITTNOTIFY_NAME(sync_destroy) -#else /* INTEL_NO_ITTNOTIFY_API */ -#define __itt_sync_destroy(addr) -#define __itt_sync_destroy_ptr 0 -#endif /* INTEL_NO_ITTNOTIFY_API */ -#else /* INTEL_NO_MACRO_BODY */ -#define __itt_sync_destroy_ptr 0 -#endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ - -/*****************************************************************//** - * @name group of functions is used for performance measurement tools - *********************************************************************/ -/** @{ */ -/** - * @brief Enter spin loop on user-defined sync object - */ -void ITTAPI __itt_sync_prepare(void* addr); - -/** @cond exclude_from_documentation */ -#ifndef INTEL_NO_MACRO_BODY -#ifndef INTEL_NO_ITTNOTIFY_API -ITT_STUBV(ITTAPI, void, sync_prepare, (void *addr), (addr)) -#define __itt_sync_prepare ITTNOTIFY_VOID(sync_prepare) -#define __itt_sync_prepare_ptr ITTNOTIFY_NAME(sync_prepare) -#else /* INTEL_NO_ITTNOTIFY_API */ -#define __itt_sync_prepare(addr) -#define __itt_sync_prepare_ptr 0 -#endif /* INTEL_NO_ITTNOTIFY_API */ -#else /* INTEL_NO_MACRO_BODY */ -#define __itt_sync_prepare_ptr 0 -#endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ - -/** - * @brief Quit spin loop without acquiring spin object - */ -void ITTAPI __itt_sync_cancel(void *addr); - -/** @cond exclude_from_documentation */ -#ifndef INTEL_NO_MACRO_BODY -#ifndef INTEL_NO_ITTNOTIFY_API -ITT_STUBV(ITTAPI, void, sync_cancel, (void *addr), (addr)) -#define __itt_sync_cancel ITTNOTIFY_VOID(sync_cancel) -#define __itt_sync_cancel_ptr ITTNOTIFY_NAME(sync_cancel) -#else /* INTEL_NO_ITTNOTIFY_API */ -#define __itt_sync_cancel(addr) -#define __itt_sync_cancel_ptr 0 -#endif /* INTEL_NO_ITTNOTIFY_API */ -#else /* INTEL_NO_MACRO_BODY */ -#define __itt_sync_cancel_ptr 0 -#endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ - -/** - * @brief Successful spin loop completion (sync object acquired) - */ -void ITTAPI __itt_sync_acquired(void *addr); - -/** @cond exclude_from_documentation */ -#ifndef INTEL_NO_MACRO_BODY -#ifndef INTEL_NO_ITTNOTIFY_API -ITT_STUBV(ITTAPI, void, sync_acquired, (void *addr), (addr)) -#define __itt_sync_acquired ITTNOTIFY_VOID(sync_acquired) -#define __itt_sync_acquired_ptr ITTNOTIFY_NAME(sync_acquired) -#else /* INTEL_NO_ITTNOTIFY_API */ -#define __itt_sync_acquired(addr) -#define __itt_sync_acquired_ptr 0 -#endif /* INTEL_NO_ITTNOTIFY_API */ -#else /* INTEL_NO_MACRO_BODY */ -#define __itt_sync_acquired_ptr 0 -#endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ - -/** - * @brief Start sync object releasing code. Is called before the lock release call. - */ -void ITTAPI __itt_sync_releasing(void* addr); - -/** @cond exclude_from_documentation */ -#ifndef INTEL_NO_MACRO_BODY -#ifndef INTEL_NO_ITTNOTIFY_API -ITT_STUBV(ITTAPI, void, sync_releasing, (void *addr), (addr)) -#define __itt_sync_releasing ITTNOTIFY_VOID(sync_releasing) -#define __itt_sync_releasing_ptr ITTNOTIFY_NAME(sync_releasing) -#else /* INTEL_NO_ITTNOTIFY_API */ -#define __itt_sync_releasing(addr) -#define __itt_sync_releasing_ptr 0 -#endif /* INTEL_NO_ITTNOTIFY_API */ -#else /* INTEL_NO_MACRO_BODY */ -#define __itt_sync_releasing_ptr 0 -#endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ -/** @} */ - -/**************************************************************//** - * @name group of functions is used for correctness checking tools - ******************************************************************/ -/** @{ */ -/** - * @brief Fast synchronization which does no require spinning. - * - This special function is to be used by TBB and OpenMP libraries only when they know - * there is no spin but they need to suppress TC warnings about shared variable modifications. - * - It only has corresponding pointers in static library and does not have corresponding function - * in dynamic library. - * @see void __itt_sync_prepare(void* addr); - */ -void ITTAPI __itt_fsync_prepare(void* addr); - -/** @cond exclude_from_documentation */ -#ifndef INTEL_NO_MACRO_BODY -#ifndef INTEL_NO_ITTNOTIFY_API -ITT_STUBV(ITTAPI, void, fsync_prepare, (void *addr), (addr)) -#define __itt_fsync_prepare ITTNOTIFY_VOID(fsync_prepare) -#define __itt_fsync_prepare_ptr ITTNOTIFY_NAME(fsync_prepare) -#else /* INTEL_NO_ITTNOTIFY_API */ -#define __itt_fsync_prepare(addr) -#define __itt_fsync_prepare_ptr 0 -#endif /* INTEL_NO_ITTNOTIFY_API */ -#else /* INTEL_NO_MACRO_BODY */ -#define __itt_fsync_prepare_ptr 0 -#endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ - -/** - * @brief Fast synchronization which does no require spinning. - * - This special function is to be used by TBB and OpenMP libraries only when they know - * there is no spin but they need to suppress TC warnings about shared variable modifications. - * - It only has corresponding pointers in static library and does not have corresponding function - * in dynamic library. - * @see void __itt_sync_cancel(void *addr); - */ -void ITTAPI __itt_fsync_cancel(void *addr); - -/** @cond exclude_from_documentation */ -#ifndef INTEL_NO_MACRO_BODY -#ifndef INTEL_NO_ITTNOTIFY_API -ITT_STUBV(ITTAPI, void, fsync_cancel, (void *addr), (addr)) -#define __itt_fsync_cancel ITTNOTIFY_VOID(fsync_cancel) -#define __itt_fsync_cancel_ptr ITTNOTIFY_NAME(fsync_cancel) -#else /* INTEL_NO_ITTNOTIFY_API */ -#define __itt_fsync_cancel(addr) -#define __itt_fsync_cancel_ptr 0 -#endif /* INTEL_NO_ITTNOTIFY_API */ -#else /* INTEL_NO_MACRO_BODY */ -#define __itt_fsync_cancel_ptr 0 -#endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ - -/** - * @brief Fast synchronization which does no require spinning. - * - This special function is to be used by TBB and OpenMP libraries only when they know - * there is no spin but they need to suppress TC warnings about shared variable modifications. - * - It only has corresponding pointers in static library and does not have corresponding function - * in dynamic library. - * @see void __itt_sync_acquired(void *addr); - */ -void ITTAPI __itt_fsync_acquired(void *addr); - -/** @cond exclude_from_documentation */ -#ifndef INTEL_NO_MACRO_BODY -#ifndef INTEL_NO_ITTNOTIFY_API -ITT_STUBV(ITTAPI, void, fsync_acquired, (void *addr), (addr)) -#define __itt_fsync_acquired ITTNOTIFY_VOID(fsync_acquired) -#define __itt_fsync_acquired_ptr ITTNOTIFY_NAME(fsync_acquired) -#else /* INTEL_NO_ITTNOTIFY_API */ -#define __itt_fsync_acquired(addr) -#define __itt_fsync_acquired_ptr 0 -#endif /* INTEL_NO_ITTNOTIFY_API */ -#else /* INTEL_NO_MACRO_BODY */ -#define __itt_fsync_acquired_ptr 0 -#endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ - -/** - * @brief Fast synchronization which does no require spinning. - * - This special function is to be used by TBB and OpenMP libraries only when they know - * there is no spin but they need to suppress TC warnings about shared variable modifications. - * - It only has corresponding pointers in static library and does not have corresponding function - * in dynamic library. - * @see void __itt_sync_releasing(void* addr); - */ -void ITTAPI __itt_fsync_releasing(void* addr); - -/** @cond exclude_from_documentation */ -#ifndef INTEL_NO_MACRO_BODY -#ifndef INTEL_NO_ITTNOTIFY_API -ITT_STUBV(ITTAPI, void, fsync_releasing, (void *addr), (addr)) -#define __itt_fsync_releasing ITTNOTIFY_VOID(fsync_releasing) -#define __itt_fsync_releasing_ptr ITTNOTIFY_NAME(fsync_releasing) -#else /* INTEL_NO_ITTNOTIFY_API */ -#define __itt_fsync_releasing(addr) -#define __itt_fsync_releasing_ptr 0 -#endif /* INTEL_NO_ITTNOTIFY_API */ -#else /* INTEL_NO_MACRO_BODY */ -#define __itt_fsync_releasing_ptr 0 -#endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ -/** @} */ -/** @} sync group */ - -/** - * @defgroup model Modeling by Advisor - * @ingroup public - * This is the subset of itt used for modeling by Advisor. - * This API is called ONLY using annotate.h, by "Annotation" macros - * the user places in their sources during the parallelism modeling steps. - * - * The requirements, constraints, design and implementation - * for this interface are covered in: - * Shared%20Documents/Design%20Documents/AdvisorAnnotations.doc - * - * site_begin/end and task_begin/end take the address of handle variables, - * which are writeable by the API. Handles must be 0 initialized prior - * to the first call to begin, or may cause a run-time failure. - * The handles are initialized in a multi-thread safe way by the API if - * the handle is 0. The commonly expected idiom is one static handle to - * identify a site or task. If a site or task of the same name has already - * been started during this collection, the same handle MAY be returned, - * but is not required to be - it is unspecified if data merging is done - * based on name. These routines also take an instance variable. Like - * the lexical instance, these must be 0 initialized. Unlike the lexical - * instance, this is used to track a single dynamic instance. - * - * API used by the Intel Parallel Advisor to describe potential concurrency - * and related activities. User-added source annotations expand to calls - * to these procedures to enable modeling of a hypothetical concurrent - * execution serially. - * @{ - */ -typedef void* __itt_model_site; /*!< @brief handle for lexical site */ -typedef void* __itt_model_site_instance; /*!< @brief handle for dynamic instance */ -typedef void* __itt_model_task; /*!< @brief handle for lexical site */ -typedef void* __itt_model_task_instance; /*!< @brief handle for dynamic instance */ - -/** - * @enum __itt_model_disable - * @brief Enumerator for the disable methods - */ -typedef enum { - __itt_model_disable_observation, - __itt_model_disable_collection -} __itt_model_disable; - -/** - * @brief ANNOTATE_SITE_BEGIN/ANNOTATE_SITE_END support. - * - * site_begin/end model a potential concurrency site. - * site instances may be recursively nested with themselves. - * site_end exits the most recently started but unended site for the current - * thread. The handle passed to end may be used to validate structure. - * Instances of a site encountered on different threads concurrently - * are considered completely distinct. If the site name for two different - * lexical sites match, it is unspecified whether they are treated as the - * same or different for data presentation. - */ -void ITTAPI __itt_model_site_begin(__itt_model_site *site, __itt_model_site_instance *instance, const char *name); -void ITTAPI __itt_model_site_end (__itt_model_site *site, __itt_model_site_instance *instance); - -/** @cond exclude_from_documentation */ -#ifndef INTEL_NO_MACRO_BODY -#ifndef INTEL_NO_ITTNOTIFY_API -ITT_STUBV(ITTAPI, void, model_site_begin, (__itt_model_site *site, __itt_model_site_instance *instance, const char *name), (site, instance, name)) -ITT_STUBV(ITTAPI, void, model_site_end, (__itt_model_site *site, __itt_model_site_instance *instance), (site, instance)) -#define __itt_model_site_begin ITTNOTIFY_VOID(model_site_begin) -#define __itt_model_site_begin_ptr ITTNOTIFY_NAME(model_site_begin) -#define __itt_model_site_end ITTNOTIFY_VOID(model_site_end) -#define __itt_model_site_end_ptr ITTNOTIFY_NAME(model_site_end) -#else /* INTEL_NO_ITTNOTIFY_API */ -#define __itt_model_site_begin(site, instance, name) -#define __itt_model_site_begin_ptr 0 -#define __itt_model_site_end(site, instance) -#define __itt_model_site_end_ptr 0 -#endif /* INTEL_NO_ITTNOTIFY_API */ -#else /* INTEL_NO_MACRO_BODY */ -#define __itt_model_site_begin_ptr 0 -#define __itt_model_site_end_ptr 0 -#endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ - -/** - * @brief ANNOTATE_TASK_BEGIN/ANNOTATE_TASK_END support - * - * task_begin/end model a potential task, which is contained within the most - * closely enclosing dynamic site. task_end exits the most recently started - * but unended task. The handle passed to end may be used to validate - * structure. It is unspecified if bad dynamic nesting is detected. If it - * is, it should be encoded in the resulting data collection. The collector - * should not fail due to construct nesting issues, nor attempt to directly - * indicate the problem. - */ -void ITTAPI __itt_model_task_begin(__itt_model_task *task, __itt_model_task_instance *instance, const char *name); -void ITTAPI __itt_model_task_end (__itt_model_task *task, __itt_model_task_instance *instance); - -/** @cond exclude_from_documentation */ -#ifndef INTEL_NO_MACRO_BODY -#ifndef INTEL_NO_ITTNOTIFY_API -ITT_STUBV(ITTAPI, void, model_task_begin, (__itt_model_task *task, __itt_model_task_instance *instance, const char *name), (task, instance, name)) -ITT_STUBV(ITTAPI, void, model_task_end, (__itt_model_task *task, __itt_model_task_instance *instance), (task, instance)) -#define __itt_model_task_begin ITTNOTIFY_VOID(model_task_begin) -#define __itt_model_task_begin_ptr ITTNOTIFY_NAME(model_task_begin) -#define __itt_model_task_end ITTNOTIFY_VOID(model_task_end) -#define __itt_model_task_end_ptr ITTNOTIFY_NAME(model_task_end) -#else /* INTEL_NO_ITTNOTIFY_API */ -#define __itt_model_task_begin(task, instance, name) -#define __itt_model_task_begin_ptr 0 -#define __itt_model_task_end(task, instance) -#define __itt_model_task_end_ptr 0 -#endif /* INTEL_NO_ITTNOTIFY_API */ -#else /* INTEL_NO_MACRO_BODY */ -#define __itt_model_task_begin_ptr 0 -#define __itt_model_task_end_ptr 0 -#endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ - -/** - * @brief ANNOTATE_LOCK_ACQUIRE/ANNOTATE_LOCK_RELEASE support - * - * lock_acquire/release model a potential lock for both lockset and - * performance modeling. Each unique address is modeled as a separate - * lock, with invalid addresses being valid lock IDs. Specifically: - * no storage is accessed by the API at the specified address - it is only - * used for lock identification. Lock acquires may be self-nested and are - * unlocked by a corresponding number of releases. - * (These closely correspond to __itt_sync_acquired/__itt_sync_releasing, - * but may not have identical semantics.) - */ -void ITTAPI __itt_model_lock_acquire(void *lock); -void ITTAPI __itt_model_lock_release(void *lock); - -/** @cond exclude_from_documentation */ -#ifndef INTEL_NO_MACRO_BODY -#ifndef INTEL_NO_ITTNOTIFY_API -ITT_STUBV(ITTAPI, void, model_lock_acquire, (void *lock), (lock)) -ITT_STUBV(ITTAPI, void, model_lock_release, (void *lock), (lock)) -#define __itt_model_lock_acquire ITTNOTIFY_VOID(model_lock_acquire) -#define __itt_model_lock_acquire_ptr ITTNOTIFY_NAME(model_lock_acquire) -#define __itt_model_lock_release ITTNOTIFY_VOID(model_lock_release) -#define __itt_model_lock_release_ptr ITTNOTIFY_NAME(model_lock_release) -#else /* INTEL_NO_ITTNOTIFY_API */ -#define __itt_model_lock_acquire(lock) -#define __itt_model_lock_acquire_ptr 0 -#define __itt_model_lock_release(lock) -#define __itt_model_lock_release_ptr 0 -#endif /* INTEL_NO_ITTNOTIFY_API */ -#else /* INTEL_NO_MACRO_BODY */ -#define __itt_model_lock_acquire_ptr 0 -#define __itt_model_lock_release_ptr 0 -#endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ - -/** - * @brief ANNOTATE_RECORD_ALLOCATION/ANNOTATE_RECORD_DEALLOCATION support - * - * record_allocation/deallocation describe user-defined memory allocator - * behavior, which may be required for correctness modeling to understand - * when storage is not expected to be actually reused across threads. - */ -void ITTAPI __itt_model_record_allocation (void *addr, size_t size); -void ITTAPI __itt_model_record_deallocation(void *addr); - -/** @cond exclude_from_documentation */ -#ifndef INTEL_NO_MACRO_BODY -#ifndef INTEL_NO_ITTNOTIFY_API -ITT_STUBV(ITTAPI, void, model_record_allocation, (void *addr, size_t size), (addr, size)) -ITT_STUBV(ITTAPI, void, model_record_deallocation, (void *addr), (addr)) -#define __itt_model_record_allocation ITTNOTIFY_VOID(model_record_allocation) -#define __itt_model_record_allocation_ptr ITTNOTIFY_NAME(model_record_allocation) -#define __itt_model_record_deallocation ITTNOTIFY_VOID(model_record_deallocation) -#define __itt_model_record_deallocation_ptr ITTNOTIFY_NAME(model_record_deallocation) -#else /* INTEL_NO_ITTNOTIFY_API */ -#define __itt_model_record_allocation(addr, size) -#define __itt_model_record_allocation_ptr 0 -#define __itt_model_record_deallocation(addr) -#define __itt_model_record_deallocation_ptr 0 -#endif /* INTEL_NO_ITTNOTIFY_API */ -#else /* INTEL_NO_MACRO_BODY */ -#define __itt_model_record_allocation_ptr 0 -#define __itt_model_record_deallocation_ptr 0 -#endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ - -/** - * @brief ANNOTATE_INDUCTION_USES support - * - * Note particular storage is inductive through the end of the current site - */ -void ITTAPI __itt_model_induction_uses(void* addr, size_t size); - -/** @cond exclude_from_documentation */ -#ifndef INTEL_NO_MACRO_BODY -#ifndef INTEL_NO_ITTNOTIFY_API -ITT_STUBV(ITTAPI, void, model_induction_uses, (void *addr, size_t size), (addr, size)) -#define __itt_model_induction_uses ITTNOTIFY_VOID(model_induction_uses) -#define __itt_model_induction_uses_ptr ITTNOTIFY_NAME(model_induction_uses) -#else /* INTEL_NO_ITTNOTIFY_API */ -#define __itt_model_induction_uses(addr, size) -#define __itt_model_induction_uses_ptr 0 -#endif /* INTEL_NO_ITTNOTIFY_API */ -#else /* INTEL_NO_MACRO_BODY */ -#define __itt_model_induction_uses_ptr 0 -#endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ - -/** - * @brief ANNOTATE_REDUCTION_USES support - * - * Note particular storage is used for reduction through the end - * of the current site - */ -void ITTAPI __itt_model_reduction_uses(void* addr, size_t size); - -/** @cond exclude_from_documentation */ -#ifndef INTEL_NO_MACRO_BODY -#ifndef INTEL_NO_ITTNOTIFY_API -ITT_STUBV(ITTAPI, void, model_reduction_uses, (void *addr, size_t size), (addr, size)) -#define __itt_model_reduction_uses ITTNOTIFY_VOID(model_reduction_uses) -#define __itt_model_reduction_uses_ptr ITTNOTIFY_NAME(model_reduction_uses) -#else /* INTEL_NO_ITTNOTIFY_API */ -#define __itt_model_reduction_uses(addr, size) -#define __itt_model_reduction_uses_ptr 0 -#endif /* INTEL_NO_ITTNOTIFY_API */ -#else /* INTEL_NO_MACRO_BODY */ -#define __itt_model_reduction_uses_ptr 0 -#endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ - -/** - * @brief ANNOTATE_OBSERVE_USES support - * - * Have correctness modeling record observations about uses of storage - * through the end of the current site - */ -void ITTAPI __itt_model_observe_uses(void* addr, size_t size); - -/** @cond exclude_from_documentation */ -#ifndef INTEL_NO_MACRO_BODY -#ifndef INTEL_NO_ITTNOTIFY_API -ITT_STUBV(ITTAPI, void, model_observe_uses, (void *addr, size_t size), (addr, size)) -#define __itt_model_observe_uses ITTNOTIFY_VOID(model_observe_uses) -#define __itt_model_observe_uses_ptr ITTNOTIFY_NAME(model_observe_uses) -#else /* INTEL_NO_ITTNOTIFY_API */ -#define __itt_model_observe_uses(addr, size) -#define __itt_model_observe_uses_ptr 0 -#endif /* INTEL_NO_ITTNOTIFY_API */ -#else /* INTEL_NO_MACRO_BODY */ -#define __itt_model_observe_uses_ptr 0 -#endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ - -/** - * @brief ANNOTATE_CLEAR_USES support - * - * Clear the special handling of a piece of storage related to induction, - * reduction or observe_uses - */ -void ITTAPI __itt_model_clear_uses(void* addr); - -/** @cond exclude_from_documentation */ -#ifndef INTEL_NO_MACRO_BODY -#ifndef INTEL_NO_ITTNOTIFY_API -ITT_STUBV(ITTAPI, void, model_clear_uses, (void *addr), (addr)) -#define __itt_model_clear_uses ITTNOTIFY_VOID(model_clear_uses) -#define __itt_model_clear_uses_ptr ITTNOTIFY_NAME(model_clear_uses) -#else /* INTEL_NO_ITTNOTIFY_API */ -#define __itt_model_clear_uses(addr) -#define __itt_model_clear_uses_ptr 0 -#endif /* INTEL_NO_ITTNOTIFY_API */ -#else /* INTEL_NO_MACRO_BODY */ -#define __itt_model_clear_uses_ptr 0 -#endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ - -/** - * @brief ANNOTATE_DISABLE_*_PUSH/ANNOTATE_DISABLE_*_POP support - * - * disable_push/disable_pop push and pop disabling based on a parameter. - * Disabling observations stops processing of memory references during - * correctness modeling, and all annotations that occur in the disabled - * region. This allows description of code that is expected to be handled - * specially during conversion to parallelism or that is not recognized - * by tools (e.g. some kinds of synchronization operations.) - * This mechanism causes all annotations in the disabled region, other - * than disable_push and disable_pop, to be ignored. (For example, this - * might validly be used to disable an entire parallel site and the contained - * tasks and locking in it for data collection purposes.) - * The disable for collection is a more expensive operation, but reduces - * collector overhead significantly. This applies to BOTH correctness data - * collection and performance data collection. For example, a site - * containing a task might only enable data collection for the first 10 - * iterations. Both performance and correctness data should reflect this, - * and the program should run as close to full speed as possible when - * collection is disabled. - */ -void ITTAPI __itt_model_disable_push(__itt_model_disable x); -void ITTAPI __itt_model_disable_pop(void); - -/** @cond exclude_from_documentation */ -#ifndef INTEL_NO_MACRO_BODY -#ifndef INTEL_NO_ITTNOTIFY_API -ITT_STUBV(ITTAPI, void, model_disable_push, (__itt_model_disable x), (x)) -ITT_STUBV(ITTAPI, void, model_disable_pop, (void), ()) -#define __itt_model_disable_push ITTNOTIFY_VOID(model_disable_push) -#define __itt_model_disable_push_ptr ITTNOTIFY_NAME(model_disable_push) -#define __itt_model_disable_pop ITTNOTIFY_VOID(model_disable_pop) -#define __itt_model_disable_pop_ptr ITTNOTIFY_NAME(model_disable_pop) -#else /* INTEL_NO_ITTNOTIFY_API */ -#define __itt_model_disable_push(x) -#define __itt_model_disable_push_ptr 0 -#define __itt_model_disable_pop() -#define __itt_model_disable_pop_ptr 0 -#endif /* INTEL_NO_ITTNOTIFY_API */ -#else /* INTEL_NO_MACRO_BODY */ -#define __itt_model_disable_push_ptr 0 -#define __itt_model_disable_pop_ptr 0 -#endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ -/** @} model group */ - -/** - * @defgroup frames Frames - * @ingroup public - * Frames group - * @{ - */ -/** - * @brief opaque structure for frame identification - */ -typedef struct __itt_frame_t *__itt_frame; - -/** - * @brief Create a global frame with given domain - */ -#if ITT_PLATFORM==ITT_PLATFORM_WIN -__itt_frame ITTAPI __itt_frame_createA(const char *domain); -__itt_frame ITTAPI __itt_frame_createW(const wchar_t *domain); -#ifdef UNICODE -# define __itt_frame_create __itt_frame_createW -# define __itt_frame_create_ptr __itt_frame_createW_ptr -#else /* UNICODE */ -# define __itt_frame_create __itt_frame_createA -# define __itt_frame_create_ptr __itt_frame_createA_ptr -#endif /* UNICODE */ -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -__itt_frame ITTAPI __itt_frame_create(const char *domain); -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ - -/** @cond exclude_from_documentation */ -#ifndef INTEL_NO_MACRO_BODY -#ifndef INTEL_NO_ITTNOTIFY_API -#if ITT_PLATFORM==ITT_PLATFORM_WIN -ITT_STUB(ITTAPI, __itt_frame, frame_createA, (const char *domain), (domain)) -ITT_STUB(ITTAPI, __itt_frame, frame_createW, (const wchar_t *domain), (domain)) -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -ITT_STUB(ITTAPI, __itt_frame, frame_create, (const char *domain), (domain)) -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#if ITT_PLATFORM==ITT_PLATFORM_WIN -#define __itt_frame_createA ITTNOTIFY_DATA(frame_createA) -#define __itt_frame_createA_ptr ITTNOTIFY_NAME(frame_createA) -#define __itt_frame_createW ITTNOTIFY_DATA(frame_createW) -#define __itt_frame_createW_ptr ITTNOTIFY_NAME(frame_createW) -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#define __itt_frame_create ITTNOTIFY_DATA(frame_create) -#define __itt_frame_create_ptr ITTNOTIFY_NAME(frame_create) -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#else /* INTEL_NO_ITTNOTIFY_API */ -#if ITT_PLATFORM==ITT_PLATFORM_WIN -#define __itt_frame_createA(domain) -#define __itt_frame_createA_ptr 0 -#define __itt_frame_createW(domain) -#define __itt_frame_createW_ptr 0 -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#define __itt_frame_create(domain) -#define __itt_frame_create_ptr 0 -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#endif /* INTEL_NO_ITTNOTIFY_API */ -#else /* INTEL_NO_MACRO_BODY */ -#if ITT_PLATFORM==ITT_PLATFORM_WIN -#define __itt_frame_createA_ptr 0 -#define __itt_frame_createW_ptr 0 -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#define __itt_frame_create_ptr 0 -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ - -/** @brief Record an frame begin occurrence. */ -void ITTAPI __itt_frame_begin(__itt_frame frame); -/** @brief Record an frame end occurrence. */ -void ITTAPI __itt_frame_end (__itt_frame frame); - -/** @cond exclude_from_documentation */ -#ifndef INTEL_NO_MACRO_BODY -#ifndef INTEL_NO_ITTNOTIFY_API -ITT_STUBV(ITTAPI, void, frame_begin, (__itt_frame frame), (frame)) -ITT_STUBV(ITTAPI, void, frame_end, (__itt_frame frame), (frame)) -#define __itt_frame_begin ITTNOTIFY_VOID(frame_begin) -#define __itt_frame_begin_ptr ITTNOTIFY_NAME(frame_begin) -#define __itt_frame_end ITTNOTIFY_VOID(frame_end) -#define __itt_frame_end_ptr ITTNOTIFY_NAME(frame_end) -#else /* INTEL_NO_ITTNOTIFY_API */ -#define __itt_frame_begin(frame) -#define __itt_frame_begin_ptr 0 -#define __itt_frame_end(frame) -#define __itt_frame_end_ptr 0 -#endif /* INTEL_NO_ITTNOTIFY_API */ -#else /* INTEL_NO_MACRO_BODY */ -#define __itt_frame_begin_ptr 0 -#define __itt_frame_end_ptr 0 -#endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ -/** @} frames group */ - -/** - * @defgroup events Events - * @ingroup public - * Events group - * @{ - */ -/** @brief user event type */ -typedef int __itt_event; - -/** - * @brief Create an event notification - * @note name or namelen being null/name and namelen not matching, user event feature not enabled - * @return non-zero event identifier upon success and __itt_err otherwise - */ -#if ITT_PLATFORM==ITT_PLATFORM_WIN -__itt_event LIBITTAPI __itt_event_createA(const char *name, int namelen); -__itt_event LIBITTAPI __itt_event_createW(const wchar_t *name, int namelen); -#ifdef UNICODE -# define __itt_event_create __itt_event_createW -# define __itt_event_create_ptr __itt_event_createW_ptr -#else -# define __itt_event_create __itt_event_createA -# define __itt_event_create_ptr __itt_event_createA_ptr -#endif /* UNICODE */ -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -__itt_event LIBITTAPI __itt_event_create(const char *name, int namelen); -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ - -/** @cond exclude_from_documentation */ -#ifndef INTEL_NO_MACRO_BODY -#ifndef INTEL_NO_ITTNOTIFY_API -#if ITT_PLATFORM==ITT_PLATFORM_WIN -ITT_STUB(LIBITTAPI, __itt_event, event_createA, (const char *name, int namelen), (name, namelen)) -ITT_STUB(LIBITTAPI, __itt_event, event_createW, (const wchar_t *name, int namelen), (name, namelen)) -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -ITT_STUB(LIBITTAPI, __itt_event, event_create, (const char *name, int namelen), (name, namelen)) -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#if ITT_PLATFORM==ITT_PLATFORM_WIN -#define __itt_event_createA ITTNOTIFY_DATA(event_createA) -#define __itt_event_createA_ptr ITTNOTIFY_NAME(event_createA) -#define __itt_event_createW ITTNOTIFY_DATA(event_createW) -#define __itt_event_createW_ptr ITTNOTIFY_NAME(event_createW) -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#define __itt_event_create ITTNOTIFY_DATA(event_create) -#define __itt_event_create_ptr ITTNOTIFY_NAME(event_create) -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#else /* INTEL_NO_ITTNOTIFY_API */ -#if ITT_PLATFORM==ITT_PLATFORM_WIN -#define __itt_event_createA(name, namelen) (__itt_event)0 -#define __itt_event_createA_ptr 0 -#define __itt_event_createW(name, namelen) (__itt_event)0 -#define __itt_event_createW_ptr 0 -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#define __itt_event_create(name, namelen) (__itt_event)0 -#define __itt_event_create_ptr 0 -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#endif /* INTEL_NO_ITTNOTIFY_API */ -#else /* INTEL_NO_MACRO_BODY */ -#if ITT_PLATFORM==ITT_PLATFORM_WIN -#define __itt_event_createA_ptr 0 -#define __itt_event_createW_ptr 0 -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#define __itt_event_create_ptr 0 -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ - -/** - * @brief Record an event occurrence. - * @return __itt_err upon failure (invalid event id/user event feature not enabled) - */ -int LIBITTAPI __itt_event_start(__itt_event event); - -/** @cond exclude_from_documentation */ -#ifndef INTEL_NO_MACRO_BODY -#ifndef INTEL_NO_ITTNOTIFY_API -ITT_STUB(LIBITTAPI, int, event_start, (__itt_event event), (event)) -#define __itt_event_start ITTNOTIFY_DATA(event_start) -#define __itt_event_start_ptr ITTNOTIFY_NAME(event_start) -#else /* INTEL_NO_ITTNOTIFY_API */ -#define __itt_event_start(event) (int)0 -#define __itt_event_start_ptr 0 -#endif /* INTEL_NO_ITTNOTIFY_API */ -#else /* INTEL_NO_MACRO_BODY */ -#define __itt_event_start_ptr 0 -#endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ - -/** - * @brief Record an event end occurrence. - * @note It is optional if events do not have durations. - * @return __itt_err upon failure (invalid event id/user event feature not enabled) - */ -int LIBITTAPI __itt_event_end(__itt_event event); - -/** @cond exclude_from_documentation */ -#ifndef INTEL_NO_MACRO_BODY -#ifndef INTEL_NO_ITTNOTIFY_API -ITT_STUB(LIBITTAPI, int, event_end, (__itt_event event), (event)) -#define __itt_event_end ITTNOTIFY_DATA(event_end) -#define __itt_event_end_ptr ITTNOTIFY_NAME(event_end) -#else /* INTEL_NO_ITTNOTIFY_API */ -#define __itt_event_end(event) (int)0 -#define __itt_event_end_ptr 0 -#endif /* INTEL_NO_ITTNOTIFY_API */ -#else /* INTEL_NO_MACRO_BODY */ -#define __itt_event_end_ptr 0 -#endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ -/** @} events group */ - -/** - * @defgroup heap Heap - * @ingroup public - * Heap group - * @{ - */ - -typedef void* __itt_heap_function; - -/** - * @brief Create an identification for heap function - * @return non-zero identifier or NULL - */ -#if ITT_PLATFORM==ITT_PLATFORM_WIN -__itt_heap_function ITTAPI __itt_heap_function_createA(const char* name, const char* domain); -__itt_heap_function ITTAPI __itt_heap_function_createW(const wchar_t* name, const wchar_t* domain); -#ifdef UNICODE -# define __itt_heap_function_create __itt_heap_function_createW -# define __itt_heap_function_create_ptr __itt_heap_function_createW_ptr -#else -# define __itt_heap_function_create __itt_heap_function_createA -# define __itt_heap_function_create_ptr __itt_heap_function_createA_ptr -#endif /* UNICODE */ -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -__itt_heap_function ITTAPI __itt_heap_function_create(const char* name, const char* domain); -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ - -/** @cond exclude_from_documentation */ -#ifndef INTEL_NO_MACRO_BODY -#ifndef INTEL_NO_ITTNOTIFY_API -#if ITT_PLATFORM==ITT_PLATFORM_WIN -ITT_STUB(ITTAPI, __itt_heap_function, heap_function_createA, (const char* name, const char* domain), (name, domain)) -ITT_STUB(ITTAPI, __itt_heap_function, heap_function_createW, (const wchar_t* name, const wchar_t* domain), (name, domain)) -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -ITT_STUB(ITTAPI, __itt_heap_function, heap_function_create, (const char* name, const char* domain), (name, domain)) -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#if ITT_PLATFORM==ITT_PLATFORM_WIN -#define __itt_heap_function_createA ITTNOTIFY_DATA(heap_function_createA) -#define __itt_heap_function_createA_ptr ITTNOTIFY_NAME(heap_function_createA) -#define __itt_heap_function_createW ITTNOTIFY_DATA(heap_function_createW) -#define __itt_heap_function_createW_ptr ITTNOTIFY_NAME(heap_function_createW) -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#define __itt_heap_function_create ITTNOTIFY_DATA(heap_function_create) -#define __itt_heap_function_create_ptr ITTNOTIFY_NAME(heap_function_create) -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#else /* INTEL_NO_ITTNOTIFY_API */ -#if ITT_PLATFORM==ITT_PLATFORM_WIN -#define __itt_heap_function_createA(name, domain) (__itt_heap_function)0 -#define __itt_heap_function_createA_ptr 0 -#define __itt_heap_function_createW(name, domain) (__itt_heap_function)0 -#define __itt_heap_function_createW_ptr 0 -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#define __itt_heap_function_create(name, domain) (__itt_heap_function)0 -#define __itt_heap_function_create_ptr 0 -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#endif /* INTEL_NO_ITTNOTIFY_API */ -#else /* INTEL_NO_MACRO_BODY */ -#if ITT_PLATFORM==ITT_PLATFORM_WIN -#define __itt_heap_function_createA_ptr 0 -#define __itt_heap_function_createW_ptr 0 -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#define __itt_heap_function_create_ptr 0 -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ - -/** - * @brief Record an allocation begin occurrence. - */ -void ITTAPI __itt_heap_allocate_begin(__itt_heap_function h, size_t size, int initialized); - -/** @cond exclude_from_documentation */ -#ifndef INTEL_NO_MACRO_BODY -#ifndef INTEL_NO_ITTNOTIFY_API -ITT_STUBV(ITTAPI, void, heap_allocate_begin, (__itt_heap_function h, size_t size, int initialized), (h, size, initialized)) -#define __itt_heap_allocate_begin ITTNOTIFY_VOID(heap_allocate_begin) -#define __itt_heap_allocate_begin_ptr ITTNOTIFY_NAME(heap_allocate_begin) -#else /* INTEL_NO_ITTNOTIFY_API */ -#define __itt_heap_allocate_begin(h, size, initialized) -#define __itt_heap_allocate_begin_ptr 0 -#endif /* INTEL_NO_ITTNOTIFY_API */ -#else /* INTEL_NO_MACRO_BODY */ -#define __itt_heap_allocate_begin_ptr 0 -#endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ - -/** - * @brief Record an allocation end occurrence. - */ -void ITTAPI __itt_heap_allocate_end(__itt_heap_function h, void* addr, size_t size, int initialized); - -/** @cond exclude_from_documentation */ -#ifndef INTEL_NO_MACRO_BODY -#ifndef INTEL_NO_ITTNOTIFY_API -ITT_STUBV(ITTAPI, void, heap_allocate_end, (__itt_heap_function h, void* addr, size_t size, int initialized), (h, addr, size, initialized)) -#define __itt_heap_allocate_end ITTNOTIFY_VOID(heap_allocate_end) -#define __itt_heap_allocate_end_ptr ITTNOTIFY_NAME(heap_allocate_end) -#else /* INTEL_NO_ITTNOTIFY_API */ -#define __itt_heap_allocate_end(h, addr, size, initialized) -#define __itt_heap_allocate_end_ptr 0 -#endif /* INTEL_NO_ITTNOTIFY_API */ -#else /* INTEL_NO_MACRO_BODY */ -#define __itt_heap_allocate_end_ptr 0 -#endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ - -/** - * @brief Record an free begin occurrence. - */ -void ITTAPI __itt_heap_free_begin(__itt_heap_function h, void* addr); - -/** @cond exclude_from_documentation */ -#ifndef INTEL_NO_MACRO_BODY -#ifndef INTEL_NO_ITTNOTIFY_API -ITT_STUBV(ITTAPI, void, heap_free_begin, (__itt_heap_function h, void* addr), (h, addr)) -#define __itt_heap_free_begin ITTNOTIFY_VOID(heap_free_begin) -#define __itt_heap_free_begin_ptr ITTNOTIFY_NAME(heap_free_begin) -#else /* INTEL_NO_ITTNOTIFY_API */ -#define __itt_heap_free_begin(h, addr) -#define __itt_heap_free_begin_ptr 0 -#endif /* INTEL_NO_ITTNOTIFY_API */ -#else /* INTEL_NO_MACRO_BODY */ -#define __itt_heap_free_begin_ptr 0 -#endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ - -/** - * @brief Record an free end occurrence. - */ -void ITTAPI __itt_heap_free_end(__itt_heap_function h, void* addr); - -/** @cond exclude_from_documentation */ -#ifndef INTEL_NO_MACRO_BODY -#ifndef INTEL_NO_ITTNOTIFY_API -ITT_STUBV(ITTAPI, void, heap_free_end, (__itt_heap_function h, void* addr), (h, addr)) -#define __itt_heap_free_end ITTNOTIFY_VOID(heap_free_end) -#define __itt_heap_free_end_ptr ITTNOTIFY_NAME(heap_free_end) -#else /* INTEL_NO_ITTNOTIFY_API */ -#define __itt_heap_free_end(h, addr) -#define __itt_heap_free_end_ptr 0 -#endif /* INTEL_NO_ITTNOTIFY_API */ -#else /* INTEL_NO_MACRO_BODY */ -#define __itt_heap_free_end_ptr 0 -#endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ - -/** - * @brief Record an reallocation begin occurrence. - */ -void ITTAPI __itt_heap_reallocate_begin(__itt_heap_function h, void* addr, size_t new_size, int initialized); - -/** @cond exclude_from_documentation */ -#ifndef INTEL_NO_MACRO_BODY -#ifndef INTEL_NO_ITTNOTIFY_API -ITT_STUBV(ITTAPI, void, heap_reallocate_begin, (__itt_heap_function h, void* addr, size_t new_size, int initialized), (h, addr, new_size, initialized)) -#define __itt_heap_reallocate_begin ITTNOTIFY_VOID(heap_reallocate_begin) -#define __itt_heap_reallocate_begin_ptr ITTNOTIFY_NAME(heap_reallocate_begin) -#else /* INTEL_NO_ITTNOTIFY_API */ -#define __itt_heap_reallocate_begin(h, addr, new_size, initialized) -#define __itt_heap_reallocate_begin_ptr 0 -#endif /* INTEL_NO_ITTNOTIFY_API */ -#else /* INTEL_NO_MACRO_BODY */ -#define __itt_heap_reallocate_begin_ptr 0 -#endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ - -/** - * @brief Record an reallocation end occurrence. - */ -void ITTAPI __itt_heap_reallocate_end(__itt_heap_function h, void* addr, void* new_addr, size_t new_size, int initialized); - -/** @cond exclude_from_documentation */ -#ifndef INTEL_NO_MACRO_BODY -#ifndef INTEL_NO_ITTNOTIFY_API -ITT_STUBV(ITTAPI, void, heap_reallocate_end, (__itt_heap_function h, void* addr, void* new_addr, size_t new_size, int initialized), (h, addr, new_addr, new_size, initialized)) -#define __itt_heap_reallocate_end ITTNOTIFY_VOID(heap_reallocate_end) -#define __itt_heap_reallocate_end_ptr ITTNOTIFY_NAME(heap_reallocate_end) -#else /* INTEL_NO_ITTNOTIFY_API */ -#define __itt_heap_reallocate_end(h, addr, new_addr, new_size, initialized) -#define __itt_heap_reallocate_end_ptr 0 -#endif /* INTEL_NO_ITTNOTIFY_API */ -#else /* INTEL_NO_MACRO_BODY */ -#define __itt_heap_reallocate_end_ptr 0 -#endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ - -/** @brief internal access begin */ -void ITTAPI __itt_heap_internal_access_begin(void); - -/** @cond exclude_from_documentation */ -#ifndef INTEL_NO_MACRO_BODY -#ifndef INTEL_NO_ITTNOTIFY_API -ITT_STUBV(ITTAPI, void, heap_internal_access_begin, (void), ()) -#define __itt_heap_internal_access_begin ITTNOTIFY_VOID(heap_internal_access_begin) -#define __itt_heap_internal_access_begin_ptr ITTNOTIFY_NAME(heap_internal_access_begin) -#else /* INTEL_NO_ITTNOTIFY_API */ -#define __itt_heap_internal_access_begin() -#define __itt_heap_internal_access_begin_ptr 0 -#endif /* INTEL_NO_ITTNOTIFY_API */ -#else /* INTEL_NO_MACRO_BODY */ -#define __itt_heap_internal_access_begin_ptr 0 -#endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ - -/** @brief internal access end */ -void ITTAPI __itt_heap_internal_access_end(void); - -/** @cond exclude_from_documentation */ -#ifndef INTEL_NO_MACRO_BODY -#ifndef INTEL_NO_ITTNOTIFY_API -ITT_STUBV(ITTAPI, void, heap_internal_access_end, (void), ()) -#define __itt_heap_internal_access_end ITTNOTIFY_VOID(heap_internal_access_end) -#define __itt_heap_internal_access_end_ptr ITTNOTIFY_NAME(heap_internal_access_end) -#else /* INTEL_NO_ITTNOTIFY_API */ -#define __itt_heap_internal_access_end() -#define __itt_heap_internal_access_end_ptr 0 -#endif /* INTEL_NO_ITTNOTIFY_API */ -#else /* INTEL_NO_MACRO_BODY */ -#define __itt_heap_internal_access_end_ptr 0 -#endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ -/** @} heap group */ - -/** @cond exclude_from_documentation */ -#ifdef __cplusplus -} -#endif /* __cplusplus */ -/** @endcond */ - -#endif /* _ITTNOTIFY_H_ */ diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/tools_api/ittnotify_config.h b/deal.II/bundled/tbb30_104oss/src/tbb/tools_api/ittnotify_config.h deleted file mode 100644 index f02cc47f03..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/tools_api/ittnotify_config.h +++ /dev/null @@ -1,105 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef _ITTNOTIFY_CONFIG_H_ -#define _ITTNOTIFY_CONFIG_H_ - -#ifndef ITT_OS_WIN -# define ITT_OS_WIN 1 -#endif /* ITT_OS_WIN */ - -#ifndef ITT_OS_LINUX -# define ITT_OS_LINUX 2 -#endif /* ITT_OS_LINUX */ - -#ifndef ITT_OS_MAC -# define ITT_OS_MAC 3 -#endif /* ITT_OS_MAC */ - -#ifndef ITT_OS -# if defined WIN32 || defined _WIN32 -# define ITT_OS ITT_OS_WIN -# elif defined( __APPLE__ ) && defined( __MACH__ ) -# define ITT_OS ITT_OS_MAC -# else -# define ITT_OS ITT_OS_LINUX -# endif -#endif /* ITT_OS */ - -#ifndef ITT_ARCH_IA32 -# define ITT_ARCH_IA32 1 -#endif /* ITT_ARCH_IA32 */ - -#ifndef ITT_ARCH_IA32E -# define ITT_ARCH_IA32E 2 -#endif /* ITT_ARCH_IA32E */ - -#ifndef ITT_ARCH_IA64 -# define ITT_ARCH_IA64 3 -#endif /* ITT_ARCH_IA64 */ - -#ifndef ITT_ARCH -# if defined _M_X64 || defined _M_AMD64 || defined __x86_64__ -# define ITT_ARCH ITT_ARCH_IA32E -# elif defined _M_IA64 || defined __ia64 -# define ITT_ARCH ITT_ARCH_IA64 -# else -# define ITT_ARCH ITT_ARCH_IA32 -# endif -#endif - -#ifndef ITT_PLATFORM_WIN -# define ITT_PLATFORM_WIN 1 -#endif /* ITT_PLATFORM_WIN */ - -#ifndef ITT_PLATFORM_POSIX -# define ITT_PLATFORM_POSIX 2 -#endif /* ITT_PLATFORM_POSIX */ - -#ifndef ITT_PLATFORM -# if ITT_OS==ITT_OS_WIN -# define ITT_PLATFORM ITT_PLATFORM_WIN -# else -# define ITT_PLATFORM ITT_PLATFORM_POSIX -# endif /* _WIN32 */ -#endif /* ITT_PLATFORM */ - -#ifdef __cplusplus -# define ITT_EXTERN_C extern "C" -#else -# define ITT_EXTERN_C /* nothing */ -#endif /* __cplusplus */ - -#define ITT_TO_STR_AUX(x) #x -#define ITT_TO_STR(x) ITT_TO_STR_AUX(x) - -#define __ITT_BUILD_ASSERT(expr, suffix) do { static char __itt_build_check_##suffix[(expr) ? 1 : -1]; __itt_build_check_##suffix[0] = 0; } while(0) -#define _ITT_BUILD_ASSERT(expr, suffix) __ITT_BUILD_ASSERT((expr), suffix) -#define ITT_BUILD_ASSERT(expr) _ITT_BUILD_ASSERT((expr), __LINE__) - -#endif /* _ITTNOTIFY_CONFIG_H_ */ diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/tools_api/ittnotify_static.c b/deal.II/bundled/tbb30_104oss/src/tbb/tools_api/ittnotify_static.c deleted file mode 100644 index 85a0a6eb6a..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/tools_api/ittnotify_static.c +++ /dev/null @@ -1,640 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#include "ittnotify_config.h" - -#if ITT_PLATFORM==ITT_PLATFORM_WIN -#include <windows.h> -#else /* ITT_PLATFORM!=ITT_PLATFORM_WIN */ -#include <pthread.h> -#include <dlfcn.h> -#include <errno.h> -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#include <stdlib.h> -#include <stdio.h> -#include <string.h> - -#include "disable_warnings.h" - -#define INTEL_NO_MACRO_BODY -#include "ittnotify.h" -#include "legacy/ittnotify.h" -#include "internal/ittnotify.h" -#include "prototype/ittnotify.h" - -#include "ittnotify_types.h" - -#ifndef INTEL_ITTNOTIFY_PREFIX -#define INTEL_ITTNOTIFY_PREFIX __itt_ -#endif /* INTEL_ITTNOTIFY_PREFIX */ -#ifndef INTEL_ITTNOTIFY_POSTFIX -#define INTEL_ITTNOTIFY_POSTFIX _ptr_ -#endif /* INTEL_ITTNOTIFY_POSTFIX */ - -#define _N_(n) ITT_JOIN(INTEL_ITTNOTIFY_PREFIX,n) - -#ifndef CDECL -#if ITT_PLATFORM==ITT_PLATFORM_WIN -#define CDECL __cdecl -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#define CDECL -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#endif /* CDECL */ - -#ifndef STDCALL -#if ITT_PLATFORM==ITT_PLATFORM_WIN -#define STDCALL __stdcall -#else /* ITT_PLATFORM!=ITT_PLATFORM_WIN */ -#define STDCALL -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#endif /* STDCALL */ - -#if ITT_PLATFORM==ITT_PLATFORM_WIN -typedef FARPROC FPTR; -typedef DWORD TIDT; -#else /* ITT_PLATFORM!=ITT_PLATFORM_WIN */ -typedef void* FPTR; -typedef pthread_t TIDT; -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ - -/* OS communication functions */ -#if ITT_PLATFORM==ITT_PLATFORM_WIN -typedef HMODULE lib_t; -typedef CRITICAL_SECTION mutex_t; -#else /* ITT_PLATFORM!=ITT_PLATFORM_WIN */ -typedef void* lib_t; -typedef pthread_mutex_t mutex_t; -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ - -static volatile long ittnotify_init = 0; -static lib_t ittnotify_lib = NULL; -static __itt_error_notification_t* error_handler = NULL; - -#if ITT_OS==ITT_OS_WIN -static const char* ittnotify_lib_name = "libittnotify.dll"; -#elif ITT_OS==ITT_OS_LINUX -static const char* ittnotify_lib_name = "libittnotify.so"; -#elif ITT_OS==ITT_OS_MAC -static const char* ittnotify_lib_name = "libittnotify.dylib"; -#else -#error Unsupported or unknown OS. -#endif - -#ifndef LIB_VAR_NAME -#if ITT_ARCH==ITT_ARCH_IA32 -#define LIB_VAR_NAME INTEL_LIBITTNOTIFY32 -#else -#define LIB_VAR_NAME INTEL_LIBITTNOTIFY64 -#endif -#endif /* LIB_VAR_NAME */ - -#if ITT_PLATFORM==ITT_PLATFORM_WIN -#define __itt_get_proc(lib, name) GetProcAddress(lib, name) -#define __itt_mutex_init(mutex) InitializeCriticalSection(mutex) -#define __itt_mutex_lock(mutex) EnterCriticalSection(mutex) -#define __itt_mutex_unlock(mutex) LeaveCriticalSection(mutex) -#define __itt_load_lib(name) LoadLibraryA(name) -#define __itt_unload_lib(handle) FreeLibrary(handle) -#define __itt_system_error() (int)GetLastError() -#define __itt_fstrcmp(s1, s2) lstrcmpA(s1, s2) -#define __itt_fstrlen(s) lstrlenA(s) -#define __itt_fstrcpyn(s1, s2, l) lstrcpynA(s1, s2, l) -#define __itt_thread_id() GetCurrentThreadId() -#define __itt_thread_yield() SwitchToThread() -#ifndef ITT_SIMPLE_INIT -static int __itt_interlocked_increment(volatile int* ptr) -{ - ITT_BUILD_ASSERT(sizeof(int) == sizeof(long)); - return InterlockedIncrement((volatile long *)ptr); -} -#endif /* ITT_SIMPLE_INIT */ -#else /* ITT_PLATFORM!=ITT_PLATFORM_WIN */ -#define __itt_get_proc(lib, name) dlsym(lib, name) -#define __itt_mutex_init(mutex) \ - { \ - pthread_mutexattr_t mutex_attr; \ - int error_code = pthread_mutexattr_init(&mutex_attr); \ - if (error_code) \ - __itt_report_error(__itt_error_system, "pthread_mutexattr_init", error_code); \ - error_code = pthread_mutexattr_settype(&mutex_attr, PTHREAD_MUTEX_RECURSIVE); \ - if (error_code) \ - __itt_report_error(__itt_error_system, "pthread_mutexattr_settype", error_code); \ - error_code = pthread_mutex_init(mutex, &mutex_attr); \ - if (error_code) \ - __itt_report_error(__itt_error_system, "pthread_mutex_init", error_code); \ - error_code = pthread_mutexattr_destroy(&mutex_attr); \ - if (error_code) \ - __itt_report_error(__itt_error_system, "pthread_mutexattr_destroy", error_code); \ - } -#define __itt_mutex_lock(mutex) pthread_mutex_lock(mutex) -#define __itt_mutex_unlock(mutex) pthread_mutex_unlock(mutex) -#define __itt_load_lib(name) dlopen(name, RTLD_LAZY) -#define __itt_unload_lib(handle) dlclose(handle) -#define __itt_system_error() errno -#define __itt_fstrcmp(s1, s2) strcmp(s1, s2) -#define __itt_fstrlen(s) strlen(s) -#define __itt_fstrcpyn(s1, s2, l) strncpy(s1, s2, l) -#define __itt_thread_id() pthread_self() -#define __itt_thread_yield() sched_yield() -#if ITT_ARCH==ITT_ARCH_IA64 -#ifdef __INTEL_COMPILER -#define __TBB_machine_fetchadd4(addr, val) __fetchadd4_acq((void *)addr, val) -#else /* __INTEL_COMPILER */ -// TODO: Add Support for not Intel compilers for IA64 -#endif /* __INTEL_COMPILER */ -#else /* ITT_ARCH!=ITT_ARCH_IA64 */ -#ifndef ITT_SIMPLE_INIT -static int __TBB_machine_fetchadd4(volatile void* ptr, int addend) -{ - int result; - __asm__ __volatile__("lock\nxaddl %0,%1" - : "=r"(result),"=m"(*(int *)ptr) - : "0"(addend), "m"(*(int *)ptr) - : "memory"); - return result; -} -#endif // ITT_SIMPLE_INIT -#endif /* ITT_ARCH==ITT_ARCH_IA64 */ -#ifndef ITT_SIMPLE_INIT -static int __itt_interlocked_increment(volatile int* ptr) -{ - return __TBB_machine_fetchadd4(ptr, 1) + 1; -} -#endif /* ITT_SIMPLE_INIT */ -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ - -const int _N_(err) = 0; - -typedef int (__itt_init_ittlib_t)(const char*, __itt_group_id); - -/* this define used to control initialization function name. */ -#ifndef __itt_init_ittlib_name -static int _N_(init_ittlib)(const char*, __itt_group_id); -static __itt_init_ittlib_t* __itt_init_ittlib_ptr = _N_(init_ittlib); -#define __itt_init_ittlib_name __itt_init_ittlib_ptr -#endif /* __itt_init_ittlib_name */ - -/* building pointers to imported funcs */ -#undef ITT_STUBV -#undef ITT_STUB -#define ITT_STUB(api,type,name,args,params,ptr,group,format) \ - static type api ITT_JOIN(_N_(name),_init) args; \ - typedef type api name##_t args; \ - extern "C" name##_t* ITTNOTIFY_NAME(name); \ - name##_t* ITTNOTIFY_NAME(name) = ITT_JOIN(_N_(name),_init); \ - static type api ITT_JOIN(_N_(name),_init) args \ - { \ - if (__itt_init_ittlib_name(NULL, __itt_group_none) \ - && ITTNOTIFY_NAME(name) \ - && ITTNOTIFY_NAME(name) != ITT_JOIN(_N_(name),_init)) \ - return ITTNOTIFY_NAME(name) params; \ - else \ - return (type)0; \ - } - -#define ITT_STUBV(api,type,name,args,params,ptr,group,format) \ - static type api ITT_JOIN(_N_(name),_init) args; \ - typedef type api name##_t args; \ - extern "C" name##_t* ITTNOTIFY_NAME(name); \ - name##_t* ITTNOTIFY_NAME(name) = ITT_JOIN(_N_(name),_init); \ - static type api ITT_JOIN(_N_(name),_init) args \ - { \ - if (__itt_init_ittlib_name(NULL, __itt_group_none) \ - && ITTNOTIFY_NAME(name) \ - && ITTNOTIFY_NAME(name) != ITT_JOIN(_N_(name),_init)) \ - ITTNOTIFY_NAME(name) params; \ - else \ - return; \ - } - -/* Define types and *_init functions. */ -#include "ittnotify_static.h" - -ITT_GROUP_LIST(group_list); - -typedef struct __itt_group_alias_ -{ - const char* env_var; - __itt_group_id groups; -} __itt_group_alias; - -static __itt_group_alias group_alias[] = { - { "KMP_FOR_TPROFILE", (__itt_group_id)(__itt_group_control | __itt_group_thread | __itt_group_sync | __itt_group_mark) }, - { "KMP_FOR_TCHECK", (__itt_group_id)(__itt_group_control | __itt_group_thread | __itt_group_fsync | __itt_group_mark) }, - { NULL, (__itt_group_none) } -}; - -typedef struct __itt_func_map_ -{ - const char* name; - void** func_ptr; - __itt_group_id group; -} __itt_func_map; - -#define __ptr_(pname,name,group) {ITT_TO_STR(ITT_JOIN(__itt_,pname)), (void**)(void*)&ITTNOTIFY_NAME(name), (__itt_group_id)(group)}, -#undef ITT_STUB -#undef ITT_STUBV -#define ITT_STUB(api,type,name,args,params,nameindll,group,format) __ptr_(nameindll,name,group) -#define ITT_STUBV ITT_STUB - -static __itt_func_map func_map[] = { -#include "ittnotify_static.h" - {NULL, NULL, __itt_group_none} -}; - -#ifndef ITT_SIMPLE_INIT - -#undef ITT_STUBV -#undef ITT_STUB -#define ITT_STUBV(api,type,name,args,params,ptr,group,format) \ -ITT_EXTERN_C type api _N_(name) args \ -{ \ - if (ITTNOTIFY_NAME(name)) \ - ITTNOTIFY_NAME(name) params; \ - else \ - return; \ -} - -#define ITT_STUB(api,type,name,args,params,ptr,group,format) \ -ITT_EXTERN_C type api _N_(name) args \ -{ \ - if (ITTNOTIFY_NAME(name)) \ - return ITTNOTIFY_NAME(name) params; \ - else \ - return (type)0; \ -} - -/* Define ITT functions. */ -#include "ittnotify_static.h" - -#endif /* ITT_SIMPLE_INIT */ - -static const char* __itt_fsplit(const char* s, const char* sep, const char** out, int* len) -{ - int i; - int j; - - if (!s || !sep || !out || !len) - return 0; - - for (i = 0; s[i]; i++) - { - int b = 0; - for (j = 0; sep[j]; j++) - if (s[i] == sep[j]) - { - b = 1; - break; - } - if (!b) - break; - } - - if (!s[i]) - return 0; - - *len = 0; - *out = s + i; - - for (; s[i]; i++, (*len)++) - { - int b = 0; - for (j = 0; sep[j]; j++) - if (s[i] == sep[j]) - { - b = 1; - break; - } - if (b) - break; - } - - for (; s[i]; i++) - { - int b = 0; - for (j = 0; sep[j]; j++) - if (s[i] == sep[j]) - { - b = 1; - break; - } - if (!b) - break; - } - - return s + i; -} - -#ifdef ITT_NOTIFY_EXT_REPORT -ITT_EXTERN_C void _N_(error_handler)(__itt_error_code, va_list args); -#endif /* ITT_NOTIFY_EXT_REPORT */ - -static void __itt_report_error(__itt_error_code code, ...) -{ - va_list args; - va_start( args, code ); - if (error_handler != NULL) - error_handler(code, args); -#ifdef ITT_NOTIFY_EXT_REPORT - _N_(error_handler)(code, args); -#endif /* ITT_NOTIFY_EXT_REPORT */ - va_end(args); -} - -static const char* __itt_get_env_var(const char* name) -{ -#define MAX_ENV_VALUE_SIZE 4086 - static char env_buff[MAX_ENV_VALUE_SIZE]; - static char* env_value = (char*)&env_buff; - - if (name != NULL) - { -#if ITT_PLATFORM==ITT_PLATFORM_WIN - size_t max_len = MAX_ENV_VALUE_SIZE - ((size_t)env_value - (size_t)&env_buff); - DWORD rc = GetEnvironmentVariableA(name, env_value, (DWORD)max_len); - if (rc >= max_len) - { - __itt_report_error(__itt_error_env_too_long, name, (size_t)rc - 1, (size_t)(max_len - 1)); - } - else if (rc > 0) - { - char* ret = env_value; - env_value += rc + 1; - return ret; - } - else - { - /* If environment variable is empty, GetEnvirornmentVariables() returns zero (number of */ - /* characters (not including terminating null), and GetLastError() returns ERROR_SUCCESS. */ - DWORD err = GetLastError(); - if (err == ERROR_SUCCESS) - return env_value; - - if (err != ERROR_ENVVAR_NOT_FOUND) - __itt_report_error(__itt_error_cant_read_env, name, (int)err); - } -#else /* ITT_PLATFORM!=ITT_PLATFORM_WIN */ - char* env = getenv(name); - if (env != NULL) - { - size_t len = strlen(env); - size_t max_len = MAX_ENV_VALUE_SIZE - ((size_t)env_value - (size_t)&env_buff); - if (len < max_len) - { - char* ret = env_value; - strncpy(env_value, env, len + 1); - env_value += len + 1; - return ret; - } else - __itt_report_error(__itt_error_env_too_long, name, (size_t)len, (size_t)(max_len - 1)); - } -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ - } - return NULL; -} - -static const char* __itt_get_lib_name() -{ - const char* lib_name = __itt_get_env_var(ITT_TO_STR(LIB_VAR_NAME)); - return (lib_name == NULL) ? ittnotify_lib_name : lib_name; -} - -#ifndef min -#define min(a,b) (a) < (b) ? (a) : (b) -#endif /* min */ - -static __itt_group_id __itt_get_groups() -{ - int i; - __itt_group_id res = __itt_group_none; - - const char* var_name = "INTEL_ITTNOTIFY_GROUPS"; - const char* group_str = __itt_get_env_var(var_name); - if (group_str != NULL) - { - int len; - char gr[255]; - const char* chunk; - while ((group_str = __itt_fsplit(group_str, ",; ", &chunk, &len)) != NULL) - { - __itt_fstrcpyn(gr, chunk, sizeof(gr)); - - gr[min((size_t)len, sizeof(gr) - 1)] = 0; - - for (i = 0; group_list[i].name != NULL; i++) - { - if (!__itt_fstrcmp(gr, group_list[i].name)) - { - res = (__itt_group_id)(res | group_list[i].id); - break; - } - } - } - /* TODO: !!! Workaround for bug with warning for unknown group !!! - * Should be fixed in new initialization scheme. - * Now the following groups should be set always. - */ - for (i = 0; group_list[i].id != __itt_group_none; i++) - if (group_list[i].id != __itt_group_all && group_list[i].id > __itt_group_splitter) - res = (__itt_group_id)(res | group_list[i].id); - return res; - } - else - { - for (i = 0; group_alias[i].env_var != NULL; i++) - if (__itt_get_env_var(group_alias[i].env_var) != NULL) - return group_alias[i].groups; - } - - return res; -} - -static int __itt_is_legacy_lib(lib_t lib) -{ - if (lib == NULL) - return 0; // if unknown assume NO - - if (__itt_get_proc(lib, "__itt_api_version")) - return 0; // New interface - NO - return 1; // It's legacy otherwise -} - -#if ITT_PLATFORM==ITT_PLATFORM_WIN -#pragma warning(push) -#pragma warning(disable: 4054) -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ - -/* ITT_EXTERN_C - should be exported after agreament -static void _N_(fini_ittlib)(void) -{ - int i; - - if (ittnotify_init) - { - // Clear all pointers - for (i = 0; func_map[i].name != NULL; i++) - *func_map[i].func_ptr = NULL; - - if (ittnotify_lib != NULL) - __itt_unload_lib(ittnotify_lib); - - ittnotify_lib = NULL; - ittnotify_init = 0; - } -} -*/ - -static int _N_(init_ittlib)(const char* lib_name, __itt_group_id groups) -{ - int i, ret = 0; - static volatile TIDT current_thread = 0; - - if (!ittnotify_init) - { -#ifndef ITT_SIMPLE_INIT - static mutex_t mutex; - static volatile int inter_counter = 0; - static volatile int mutex_initialized = 0; - - if (!mutex_initialized) - { - if (__itt_interlocked_increment(&inter_counter) == 1) - { - __itt_mutex_init(&mutex); - mutex_initialized = 1; - } - else - while (!mutex_initialized) - __itt_thread_yield(); - } - - __itt_mutex_lock(&mutex); -#endif /* ITT_SIMPLE_INIT */ - - if (!ittnotify_init) - { - if (current_thread == 0) - { - current_thread = __itt_thread_id(); - if (groups == __itt_group_none) - groups = __itt_get_groups(); - if (groups == __itt_group_none) - { - // Clear all pointers - for (i = 0; func_map[i].name != NULL; i++ ) - *func_map[i].func_ptr = NULL; - } - else - { - __itt_group_id zero_group = __itt_group_none; - if (lib_name == NULL) - lib_name = __itt_get_lib_name(); - ittnotify_lib = __itt_load_lib(lib_name); - if (ittnotify_lib != NULL) - { - if (__itt_is_legacy_lib(ittnotify_lib)) - groups = __itt_group_legacy; - - for (i = 0; func_map[i].name != NULL; i++) - { - if (func_map[i].group & groups) - { - *func_map[i].func_ptr = (void*)__itt_get_proc(ittnotify_lib, func_map[i].name); - if (*func_map[i].func_ptr == NULL) - { - __itt_report_error(__itt_error_no_symbol, lib_name, func_map[i].name ); - zero_group = (__itt_group_id)(zero_group | func_map[i].group); - } - } - else - *func_map[i].func_ptr = NULL; - } - - if (groups == __itt_group_legacy) - { - // Compatibility with legacy tools - ITTNOTIFY_NAME(sync_prepare) = ITTNOTIFY_NAME(notify_sync_prepare); - ITTNOTIFY_NAME(sync_cancel) = ITTNOTIFY_NAME(notify_sync_cancel); - ITTNOTIFY_NAME(sync_acquired) = ITTNOTIFY_NAME(notify_sync_acquired); - ITTNOTIFY_NAME(sync_releasing) = ITTNOTIFY_NAME(notify_sync_releasing); - } - } - else - { - // Clear all pointers - for (i = 0; func_map[i].name != NULL; i++) - *func_map[i].func_ptr = NULL; - - __itt_report_error(__itt_error_no_module, lib_name, -#if ITT_PLATFORM==ITT_PLATFORM_WIN - __itt_system_error() -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ - dlerror() -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ - ); - } -#ifdef ITT_COMPLETE_GROUP - for (i = 0; func_map[i].name != NULL; i++) - if (func_map[i].group & zero_group) - *func_map[i].func_ptr = NULL; -#endif /* ITT_COMPLETE_GROUP */ - - /* evaluating if any function ptr is non empty */ - for (i = 0; func_map[i].name != NULL; i++) - { - if (*func_map[i].func_ptr != NULL) - { - ret = 1; - break; - } - } - } - - ittnotify_init = 1; - current_thread = 0; - } - } - -#ifndef ITT_SIMPLE_INIT - __itt_mutex_unlock(&mutex); -#endif /* ITT_SIMPLE_INIT */ - } - - return ret; -} - -ITT_EXTERN_C __itt_error_notification_t* _N_(set_error_handler)(__itt_error_notification_t* handler) -{ - __itt_error_notification_t* prev = error_handler; - error_handler = handler; - return prev; -} - -#if ITT_PLATFORM==ITT_PLATFORM_WIN -#pragma warning(pop) -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/tools_api/ittnotify_static.h b/deal.II/bundled/tbb30_104oss/src/tbb/tools_api/ittnotify_static.h deleted file mode 100644 index 109674fa7c..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/tools_api/ittnotify_static.h +++ /dev/null @@ -1,231 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#include "ittnotify_config.h" - -#ifndef ITT_STUB -# define ITT_STUB ITT_STUBV -#endif /* ITT_STUB */ - -#ifndef ITTAPI -# define ITTAPI CDECL -#endif /* ITTAPI */ - -#ifndef LIBITTAPI -# define LIBITTAPI /* nothing */ -#endif /* LIBITTAPI */ - -#ifndef ITT_FORMAT_DEFINED -# ifndef ITT_FORMAT -# define ITT_FORMAT -# endif /* ITT_FORMAT */ -# ifndef ITT_NO_PARAMS -# define ITT_NO_PARAMS -# endif /* ITT_NO_PARAMS */ -#endif /* ITT_FORMAT_DEFINED */ - -/* - * parameters for macro expected: - * ITT_STUB(api, type, func_name, arguments, params, func_name_in_dll, group, printf_fmt) - */ -/* public */ -ITT_STUBV(ITTAPI, void, pause, (void), (ITT_NO_PARAMS), pause, __itt_group_control | __itt_group_legacy, "no args") -ITT_STUBV(ITTAPI, void, resume, (void), (ITT_NO_PARAMS), resume, __itt_group_control | __itt_group_legacy, "no args") - -#if ITT_PLATFORM==ITT_PLATFORM_WIN -ITT_STUBV(ITTAPI, void, thread_set_nameA, (const char *name), (ITT_FORMAT name), thread_set_nameA, __itt_group_thread, "\"%s\"") -ITT_STUBV(ITTAPI, void, thread_set_nameW, (const wchar_t *name), (ITT_FORMAT name), thread_set_nameW, __itt_group_thread, "\"%S\"") -#else /* ITT_PLATFORM!=ITT_PLATFORM_WIN */ -ITT_STUBV(ITTAPI, void, thread_set_name, (const char *name), (ITT_FORMAT name), thread_set_name, __itt_group_thread, "\"%s\"") -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -ITT_STUBV(ITTAPI, void, thread_ignore, (void), (ITT_NO_PARAMS), thread_ignore, __itt_group_thread, "no args") - -#if ITT_PLATFORM==ITT_PLATFORM_WIN -ITT_STUBV(ITTAPI, void, sync_createA, (void *addr, const char *objtype, const char *objname, int attribute), (ITT_FORMAT addr, objtype, objname, attribute), sync_createA, __itt_group_sync | __itt_group_fsync, "%p, \"%s\", \"%s\", %x") -ITT_STUBV(ITTAPI, void, sync_createW, (void *addr, const wchar_t *objtype, const wchar_t *objname, int attribute), (ITT_FORMAT addr, objtype, objname, attribute), sync_createW, __itt_group_sync | __itt_group_fsync, "%p, \"%S\", \"%S\", %x") -ITT_STUBV(ITTAPI, void, sync_renameA, (void *addr, const char *name), (ITT_FORMAT addr, name), sync_renameA, __itt_group_sync | __itt_group_fsync, "%p, \"%s\"") -ITT_STUBV(ITTAPI, void, sync_renameW, (void *addr, const wchar_t *name), (ITT_FORMAT addr, name), sync_renameW, __itt_group_sync | __itt_group_fsync, "%p, \"%S\"") -#else /* ITT_PLATFORM!=ITT_PLATFORM_WIN */ -ITT_STUBV(ITTAPI, void, sync_create, (void *addr, const char *objtype, const char *objname, int attribute), (ITT_FORMAT addr, objtype, objname, attribute), sync_create, __itt_group_sync | __itt_group_fsync, "%p, \"%s\", \"%s\", %x") -ITT_STUBV(ITTAPI, void, sync_rename, (void *addr, const char *name), (ITT_FORMAT addr, name), sync_rename, __itt_group_sync | __itt_group_fsync, "%p, \"%s\"") -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -ITT_STUBV(ITTAPI, void, sync_destroy, (void *addr), (ITT_FORMAT addr), sync_destroy, __itt_group_sync | __itt_group_fsync, "%p") - -ITT_STUBV(ITTAPI, void, sync_prepare, (void* addr), (ITT_FORMAT addr), sync_prepare, __itt_group_sync, "%p") -ITT_STUBV(ITTAPI, void, sync_cancel, (void *addr), (ITT_FORMAT addr), sync_cancel, __itt_group_sync, "%p") -ITT_STUBV(ITTAPI, void, sync_acquired, (void *addr), (ITT_FORMAT addr), sync_acquired, __itt_group_sync, "%p") -ITT_STUBV(ITTAPI, void, sync_releasing, (void* addr), (ITT_FORMAT addr), sync_releasing, __itt_group_sync, "%p") - -ITT_STUBV(ITTAPI, void, fsync_prepare, (void* addr), (ITT_FORMAT addr), sync_prepare, __itt_group_fsync, "%p") -ITT_STUBV(ITTAPI, void, fsync_cancel, (void *addr), (ITT_FORMAT addr), sync_cancel, __itt_group_fsync, "%p") -ITT_STUBV(ITTAPI, void, fsync_acquired, (void *addr), (ITT_FORMAT addr), sync_acquired, __itt_group_fsync, "%p") -ITT_STUBV(ITTAPI, void, fsync_releasing, (void* addr), (ITT_FORMAT addr), sync_releasing, __itt_group_fsync, "%p") - -ITT_STUBV(ITTAPI, void, model_site_begin, (__itt_model_site *site, __itt_model_site_instance *instance, const char *name), (ITT_FORMAT site, instance, name), model_site_begin, __itt_group_model, "%p, %p, \"%s\"") -ITT_STUBV(ITTAPI, void, model_site_end, (__itt_model_site *site, __itt_model_site_instance *instance), (ITT_FORMAT site, instance), model_site_end, __itt_group_model, "%p, %p") -ITT_STUBV(ITTAPI, void, model_task_begin, (__itt_model_task *task, __itt_model_task_instance *instance, const char *name), (ITT_FORMAT task, instance, name), model_task_begin, __itt_group_model, "%p, %p, \"%s\"") -ITT_STUBV(ITTAPI, void, model_task_end, (__itt_model_task *task, __itt_model_task_instance *instance), (ITT_FORMAT task, instance), model_task_end, __itt_group_model, "%p, %p") -ITT_STUBV(ITTAPI, void, model_lock_acquire, (void *lock), (ITT_FORMAT lock), model_lock_acquire, __itt_group_model, "%p") -ITT_STUBV(ITTAPI, void, model_lock_release, (void *lock), (ITT_FORMAT lock), model_lock_release, __itt_group_model, "%p") -ITT_STUBV(ITTAPI, void, model_record_allocation, (void *addr, size_t size), (ITT_FORMAT addr, size), model_record_allocation, __itt_group_model, "%p, %d") -ITT_STUBV(ITTAPI, void, model_record_deallocation, (void *addr), (ITT_FORMAT addr), model_record_deallocation, __itt_group_model, "%p") -ITT_STUBV(ITTAPI, void, model_induction_uses, (void* addr, size_t size), (ITT_FORMAT addr, size), model_induction_uses, __itt_group_model, "%p, %d") -ITT_STUBV(ITTAPI, void, model_reduction_uses, (void* addr, size_t size), (ITT_FORMAT addr, size), model_reduction_uses, __itt_group_model, "%p, %d") -ITT_STUBV(ITTAPI, void, model_observe_uses, (void* addr, size_t size), (ITT_FORMAT addr, size), model_observe_uses, __itt_group_model, "%p, %d") -ITT_STUBV(ITTAPI, void, model_clear_uses, (void* addr), (ITT_FORMAT addr), model_clear_uses, __itt_group_model, "%p") -ITT_STUBV(ITTAPI, void, model_disable_push, (__itt_model_disable x), (ITT_FORMAT x), model_disable_push, __itt_group_model, "%p") -ITT_STUBV(ITTAPI, void, model_disable_pop, (void), (ITT_NO_PARAMS), model_disable_pop, __itt_group_model, "no args") - -#ifndef __ITT_INTERNAL_BODY -#if ITT_PLATFORM==ITT_PLATFORM_WIN -ITT_STUB(ITTAPI, __itt_counter, counter_createA, (const char *name, const char *domain), (ITT_FORMAT name, domain), counter_createA, __itt_group_counter, "\"%s\", \"%s\"") -ITT_STUB(ITTAPI, __itt_counter, counter_createW, (const wchar_t *name, const wchar_t *domain), (ITT_FORMAT name, domain), counter_createW, __itt_group_counter, "\"%s\", \"%s\"") -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -ITT_STUB(ITTAPI, __itt_counter, counter_create, (const char *name, const char *domain), (ITT_FORMAT name, domain), counter_create, __itt_group_counter, "\"%s\", \"%s\"") -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#endif /* __ITT_INTERNAL_BODY */ -ITT_STUBV(ITTAPI, void, counter_destroy, (__itt_counter id), (ITT_FORMAT id), counter_destroy, __itt_group_counter, "%p") -ITT_STUBV(ITTAPI, void, counter_inc, (__itt_counter id), (ITT_FORMAT id), counter_inc, __itt_group_counter, "%p") -ITT_STUBV(ITTAPI, void, counter_inc_delta, (__itt_counter id, unsigned long long value), (ITT_FORMAT id, value), counter_inc_delta, __itt_group_counter, "%p, %lu") - -#ifndef __ITT_INTERNAL_BODY -ITT_STUB(ITTAPI, __itt_caller, stack_caller_create, (void), (ITT_NO_PARAMS), stack_caller_create, __itt_group_stitch, "no args") -#endif /* __ITT_INTERNAL_BODY */ -ITT_STUBV(ITTAPI, void, stack_caller_destroy, (__itt_caller id), (ITT_FORMAT id), stack_caller_destroy, __itt_group_stitch, "%p") -ITT_STUBV(ITTAPI, void, stack_callee_enter, (__itt_caller id), (ITT_FORMAT id), stack_callee_enter, __itt_group_stitch, "%p") -ITT_STUBV(ITTAPI, void, stack_callee_leave, (__itt_caller id), (ITT_FORMAT id), stack_callee_leave, __itt_group_stitch, "%p") - -#ifndef __ITT_INTERNAL_BODY -#if ITT_PLATFORM==ITT_PLATFORM_WIN -ITT_STUB(ITTAPI, __itt_frame, frame_createA, (const char *domain), (ITT_FORMAT domain), frame_createA, __itt_group_frame, "\"%s\"") -ITT_STUB(ITTAPI, __itt_frame, frame_createW, (const wchar_t *domain), (ITT_FORMAT domain), frame_createW, __itt_group_frame, "\"%s\"") -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -ITT_STUB(ITTAPI, __itt_frame, frame_create, (const char *domain), (ITT_FORMAT domain), frame_create, __itt_group_frame, "\"%s\"") -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#endif /* __ITT_INTERNAL_BODY */ -ITT_STUBV(ITTAPI, void, frame_begin, (__itt_frame frame), (ITT_FORMAT frame), frame_begin, __itt_group_frame, "%p") -ITT_STUBV(ITTAPI, void, frame_end, (__itt_frame frame), (ITT_FORMAT frame), frame_end, __itt_group_frame, "%p") - -#ifndef __ITT_INTERNAL_BODY -#if ITT_PLATFORM==ITT_PLATFORM_WIN -ITT_STUB(LIBITTAPI, __itt_event, event_createA, (const char *name, int namelen), (ITT_FORMAT name, namelen), event_createA, __itt_group_mark | __itt_group_legacy, "\"%s\", %d") -ITT_STUB(LIBITTAPI, __itt_event, event_createW, (const wchar_t *name, int namelen), (ITT_FORMAT name, namelen), event_createW, __itt_group_mark | __itt_group_legacy, "\"%S\", %d") -#else /* ITT_PLATFORM!=ITT_PLATFORM_WIN */ -ITT_STUB(LIBITTAPI, __itt_event, event_create, (const char *name, int namelen), (ITT_FORMAT name, namelen), event_create, __itt_group_mark | __itt_group_legacy, "\"%s\", %d") -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -ITT_STUB(LIBITTAPI, int, event_start, (__itt_event event), (ITT_FORMAT event), event_start, __itt_group_mark | __itt_group_legacy, "%d") -ITT_STUB(LIBITTAPI, int, event_end, (__itt_event event), (ITT_FORMAT event), event_end, __itt_group_mark | __itt_group_legacy, "%d") -#endif /* __ITT_INTERNAL_BODY */ - -#ifndef __ITT_INTERNAL_BODY -#if ITT_PLATFORM==ITT_PLATFORM_WIN -ITT_STUB(ITTAPI, __itt_heap_function, heap_function_createA, (const char *name, const char *domain), (ITT_FORMAT name, domain), heap_function_createA, __itt_group_heap, "\"%s\", \"%s\"") -ITT_STUB(ITTAPI, __itt_heap_function, heap_function_createW, (const wchar_t *name, const wchar_t *domain), (ITT_FORMAT name, domain), heap_function_createW, __itt_group_heap, "\"%s\", \"%s\"") -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -ITT_STUB(ITTAPI, __itt_heap_function, heap_function_create, (const char *name, const char *domain), (ITT_FORMAT name, domain), heap_function_create, __itt_group_heap, "\"%s\", \"%s\"") -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#endif /* __ITT_INTERNAL_BODY */ -ITT_STUBV(ITTAPI, void, heap_allocate_begin, (__itt_heap_function h, size_t size, int initialized), (ITT_FORMAT h, size, initialized), heap_allocate_begin, __itt_group_heap, "%p, %lu, %d") -ITT_STUBV(ITTAPI, void, heap_allocate_end, (__itt_heap_function h, void* addr, size_t size, int initialized), (ITT_FORMAT h, addr, size, initialized), heap_allocate_end, __itt_group_heap, "%p, %p, %lu, %d") -ITT_STUBV(ITTAPI, void, heap_free_begin, (__itt_heap_function h, void* addr), (ITT_FORMAT h, addr), heap_free_begin, __itt_group_heap, "%p, %p") -ITT_STUBV(ITTAPI, void, heap_free_end, (__itt_heap_function h, void* addr), (ITT_FORMAT h, addr), heap_free_end, __itt_group_heap, "%p, %p") -ITT_STUBV(ITTAPI, void, heap_reallocate_begin, (__itt_heap_function h, void* addr, size_t new_size, int initialized), (ITT_FORMAT h, addr, new_size, initialized), heap_reallocate_begin, __itt_group_heap, "%p, %p, %lu, %d") -ITT_STUBV(ITTAPI, void, heap_reallocate_end, (__itt_heap_function h, void* addr, void* new_addr, size_t new_size, int initialized), (ITT_FORMAT h, addr, new_addr, new_size, initialized), heap_reallocate_end, __itt_group_heap, "%p, %p, %p, %lu, %d") -ITT_STUBV(ITTAPI, void, heap_internal_access_begin, (void), (ITT_NO_PARAMS), heap_internal_access_begin, __itt_group_heap, "no args") -ITT_STUBV(ITTAPI, void, heap_internal_access_end, (void), (ITT_NO_PARAMS), heap_internal_access_end, __itt_group_heap, "no args") - -/* legacy */ -#ifndef __ITT_INTERNAL_BODY -#if ITT_PLATFORM==ITT_PLATFORM_WIN -ITT_STUB(LIBITTAPI, int, thr_name_setA, (const char *name, int namelen), (ITT_FORMAT name, namelen), thr_name_setA, __itt_group_thread | __itt_group_legacy, "\"%s\", %d") -ITT_STUB(LIBITTAPI, int, thr_name_setW, (const wchar_t *name, int namelen), (ITT_FORMAT name, namelen), thr_name_setW, __itt_group_thread | __itt_group_legacy, "\"%S\", %d") -#else /* ITT_PLATFORM!=ITT_PLATFORM_WIN */ -ITT_STUB(LIBITTAPI, int, thr_name_set, (const char *name, int namelen), (ITT_FORMAT name, namelen), thr_name_set, __itt_group_thread | __itt_group_legacy, "\"%s\", %d") -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -ITT_STUBV(LIBITTAPI, void, thr_ignore, (void), (ITT_NO_PARAMS), thr_ignore, __itt_group_thread | __itt_group_legacy, "no args") - -#if ITT_PLATFORM==ITT_PLATFORM_WIN -ITT_STUBV(ITTAPI, void, sync_set_nameA, (void *addr, const char *objtype, const char *objname, int attribute), (ITT_FORMAT addr, objtype, objname, attribute), sync_set_nameA, __itt_group_sync | __itt_group_fsync | __itt_group_legacy, "%p, \"%s\", \"%s\", %x") -ITT_STUBV(ITTAPI, void, sync_set_nameW, (void *addr, const wchar_t *objtype, const wchar_t *objname, int attribute), (ITT_FORMAT addr, objtype, objname, attribute), sync_set_nameW, __itt_group_sync | __itt_group_fsync | __itt_group_legacy, "%p, \"%S\", \"%S\", %x") -#else /* ITT_PLATFORM!=ITT_PLATFORM_WIN */ -ITT_STUBV(ITTAPI, void, sync_set_name, (void *addr, const char *objtype, const char *objname, int attribute), (ITT_FORMAT addr, objtype, objname, attribute), sync_set_name, __itt_group_sync | __itt_group_fsync | __itt_group_legacy, "p, \"%s\", \"%s\", %x") -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ - -#if ITT_PLATFORM==ITT_PLATFORM_WIN -ITT_STUB(LIBITTAPI, int, notify_sync_nameA, (void *p, const char *objtype, int typelen, const char *objname, int namelen, int attribute), (ITT_FORMAT p, objtype, typelen, objname, namelen, attribute), notify_sync_nameA, __itt_group_sync | __itt_group_fsync | __itt_group_legacy, "%p, \"%s\", %d, \"%s\", %d, %x") -ITT_STUB(LIBITTAPI, int, notify_sync_nameW, (void *p, const wchar_t *objtype, int typelen, const wchar_t *objname, int namelen, int attribute), (ITT_FORMAT p, objtype, typelen, objname, namelen, attribute), notify_sync_nameW, __itt_group_sync | __itt_group_fsync | __itt_group_legacy, "%p, \"%S\", %d, \"%S\", %d, %x") -#else /* ITT_PLATFORM!=ITT_PLATFORM_WIN */ -ITT_STUB(LIBITTAPI, int, notify_sync_name, (void *p, const char *objtype, int typelen, const char *objname, int namelen, int attribute), (ITT_FORMAT p, objtype, typelen, objname, namelen, attribute), notify_sync_name, __itt_group_sync | __itt_group_fsync | __itt_group_legacy, "%p, \"%s\", %d, \"%s\", %d, %x") -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ - -ITT_STUBV(LIBITTAPI, void, notify_sync_prepare, (void *p), (ITT_FORMAT p), notify_sync_prepare, __itt_group_sync | __itt_group_fsync | __itt_group_legacy, "%p") -ITT_STUBV(LIBITTAPI, void, notify_sync_cancel, (void *p), (ITT_FORMAT p), notify_sync_cancel, __itt_group_sync | __itt_group_fsync | __itt_group_legacy, "%p") -ITT_STUBV(LIBITTAPI, void, notify_sync_acquired, (void *p), (ITT_FORMAT p), notify_sync_acquired, __itt_group_sync | __itt_group_fsync | __itt_group_legacy, "%p") -ITT_STUBV(LIBITTAPI, void, notify_sync_releasing, (void *p), (ITT_FORMAT p), notify_sync_releasing, __itt_group_sync | __itt_group_fsync | __itt_group_legacy, "%p") -#endif /* __ITT_INTERNAL_BODY */ - -ITT_STUBV(LIBITTAPI, void, memory_read, (void *addr, size_t size), (ITT_FORMAT addr, size), memory_read, __itt_group_legacy, "%p, %lu") -ITT_STUBV(LIBITTAPI, void, memory_write, (void *addr, size_t size), (ITT_FORMAT addr, size), memory_write, __itt_group_legacy, "%p, %lu") -ITT_STUBV(LIBITTAPI, void, memory_update, (void *addr, size_t size), (ITT_FORMAT addr, size), memory_update, __itt_group_legacy, "%p, %lu") - -ITT_STUB(LIBITTAPI, __itt_state_t, state_get, (void), (ITT_NO_PARAMS), state_get, __itt_group_legacy, "no args") -ITT_STUB(LIBITTAPI, __itt_state_t, state_set, (__itt_state_t s), (ITT_FORMAT s), state_set, __itt_group_legacy, "%d") -ITT_STUB(LIBITTAPI, __itt_obj_state_t, obj_mode_set, (__itt_obj_prop_t p, __itt_obj_state_t s), (ITT_FORMAT p, s), obj_mode_set, __itt_group_legacy, "%d, %d") -ITT_STUB(LIBITTAPI, __itt_thr_state_t, thr_mode_set, (__itt_thr_prop_t p, __itt_thr_state_t s), (ITT_FORMAT p, s), thr_mode_set, __itt_group_legacy, "%d, %d") - -/* internal */ -#ifndef __ITT_INTERNAL_BODY -#if ITT_PLATFORM==ITT_PLATFORM_WIN -ITT_STUB(ITTAPI, __itt_mark_type, mark_createA, (const char *name), (ITT_FORMAT name), mark_createA, __itt_group_mark, "\"%s\"") -ITT_STUB(ITTAPI, __itt_mark_type, mark_createW, (const wchar_t *name), (ITT_FORMAT name), mark_createW, __itt_group_mark, "\"%S\"") -#else /* ITT_PLATFORM!=ITT_PLATFORM_WIN */ -ITT_STUB(ITTAPI, __itt_mark_type, mark_create, (const char *name), (ITT_FORMAT name), mark_create, __itt_group_mark, "\"%s\"") -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#endif /* __ITT_INTERNAL_BODY */ -#if ITT_PLATFORM==ITT_PLATFORM_WIN -ITT_STUB(ITTAPI, int, markA, (__itt_mark_type mt, const char *parameter), (ITT_FORMAT mt, parameter), markA, __itt_group_mark, "%d, \"%s\"") -ITT_STUB(ITTAPI, int, markW, (__itt_mark_type mt, const wchar_t *parameter), (ITT_FORMAT mt, parameter), markW, __itt_group_mark, "%d, \"%S\"") -#else /* ITT_PLATFORM!=ITT_PLATFORM_WIN */ -ITT_STUB(ITTAPI, int, mark, (__itt_mark_type mt, const char *parameter), (ITT_FORMAT mt, parameter), mark, __itt_group_mark, "%d, \"%s\"") -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -ITT_STUB(ITTAPI, int, mark_off, (__itt_mark_type mt), (ITT_FORMAT mt), mark_off, __itt_group_mark, "%d") -#if ITT_PLATFORM==ITT_PLATFORM_WIN -ITT_STUB(ITTAPI, int, mark_globalA, (__itt_mark_type mt, const char *parameter), (ITT_FORMAT mt, parameter), mark_globalA, __itt_group_mark, "%d, \"%s\"") -ITT_STUB(ITTAPI, int, mark_globalW, (__itt_mark_type mt, const wchar_t *parameter), (ITT_FORMAT mt, parameter), mark_globalW, __itt_group_mark, "%d, \"%S\"") -#else /* ITT_PLATFORM!=ITT_PLATFORM_WIN */ -ITT_STUB(ITTAPI, int, mark_global, (__itt_mark_type mt, const char *parameter), (ITT_FORMAT mt, parameter), mark_global, __itt_group_mark, "%d, \"%S\"") -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -ITT_STUB(ITTAPI, int, mark_global_off, (__itt_mark_type mt), (ITT_FORMAT mt), mark_global_off, __itt_group_mark, "%d") - -/* prototype */ -/* empty so far */ - -/* hidden */ -#ifndef __ITT_INTERNAL_BODY -ITT_STUB(ITTAPI, const char*, api_version, (void), (ITT_NO_PARAMS), api_version, __itt_group_all & ~__itt_group_legacy, "no args") -#endif /* __ITT_INTERNAL_BODY */ diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/tools_api/ittnotify_types.h b/deal.II/bundled/tbb30_104oss/src/tbb/tools_api/ittnotify_types.h deleted file mode 100644 index ec0d696de9..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/tools_api/ittnotify_types.h +++ /dev/null @@ -1,75 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef _ITTNOTIFY_TYPES_H_ -#define _ITTNOTIFY_TYPES_H_ - -typedef enum __itt_group_id_ -{ - __itt_group_none = 0, - __itt_group_legacy = 1<<0, - __itt_group_control = 1<<1, - __itt_group_thread = 1<<2, - __itt_group_mark = 1<<3, - __itt_group_sync = 1<<4, - __itt_group_fsync = 1<<5, - __itt_group_jit = 1<<6, - __itt_group_model = 1<<7, - __itt_group_splitter= 1<<7, -//----------------------------- - __itt_group_counter = 1<<8, - __itt_group_frame = 1<<9, - __itt_group_stitch = 1<<10, - __itt_group_heap = 1<<11, - __itt_group_all = -1 -} __itt_group_id; - -typedef struct __itt_group_list_ -{ - __itt_group_id id; - const char* name; -} __itt_group_list; - -#define ITT_GROUP_LIST(varname) \ - static __itt_group_list varname[] = { \ - { __itt_group_all, "all" }, \ - { __itt_group_control, "control" }, \ - { __itt_group_thread, "thread" }, \ - { __itt_group_mark, "mark" }, \ - { __itt_group_sync, "sync" }, \ - { __itt_group_fsync, "fsync" }, \ - { __itt_group_jit, "jit" }, \ - { __itt_group_model, "model" }, \ - { __itt_group_counter, "counter" }, \ - { __itt_group_frame, "frame" }, \ - { __itt_group_stitch, "stitch" }, \ - { __itt_group_heap, "heap" }, \ - { __itt_group_none, NULL } \ - } - -#endif /* _ITTNOTIFY_TYPES_H_ */ diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/tools_api/legacy/ittnotify.h b/deal.II/bundled/tbb30_104oss/src/tbb/tools_api/legacy/ittnotify.h deleted file mode 100644 index dff5bbcf26..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/tools_api/legacy/ittnotify.h +++ /dev/null @@ -1,817 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef _LEGACY_ITTNOTIFY_H_ -#define _LEGACY_ITTNOTIFY_H_ -/** - * @file - * @brief Legacy User API functions and types - */ - -/** @cond exclude_from_documentation */ -#ifndef ITT_OS_WIN -# define ITT_OS_WIN 1 -#endif /* ITT_OS_WIN */ - -#ifndef ITT_OS_LINUX -# define ITT_OS_LINUX 2 -#endif /* ITT_OS_LINUX */ - -#ifndef ITT_OS_MAC -# define ITT_OS_MAC 3 -#endif /* ITT_OS_MAC */ - -#ifndef ITT_OS -# if defined WIN32 || defined _WIN32 -# define ITT_OS ITT_OS_WIN -# elif defined( __APPLE__ ) && defined( __MACH__ ) -# define ITT_OS ITT_OS_MAC -# else -# define ITT_OS ITT_OS_LINUX -# endif -#endif /* ITT_OS */ - -#ifndef ITT_PLATFORM_WIN -# define ITT_PLATFORM_WIN 1 -#endif /* ITT_PLATFORM_WIN */ - -#ifndef ITT_PLATFORM_POSIX -# define ITT_PLATFORM_POSIX 2 -#endif /* ITT_PLATFORM_POSIX */ - -#ifndef ITT_PLATFORM -# if ITT_OS==ITT_OS_WIN -# define ITT_PLATFORM ITT_PLATFORM_WIN -# else -# define ITT_PLATFORM ITT_PLATFORM_POSIX -# endif /* _WIN32 */ -#endif /* ITT_PLATFORM */ - -#include <stddef.h> -#include <stdarg.h> -#if ITT_PLATFORM==ITT_PLATFORM_WIN -#include <tchar.h> -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ - -#ifndef CDECL -# if ITT_PLATFORM==ITT_PLATFORM_WIN -# define CDECL __cdecl -# else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -# define CDECL /* nothing */ -# endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#endif /* CDECL */ - -#ifndef STDCALL -# if ITT_PLATFORM==ITT_PLATFORM_WIN -# define STDCALL __stdcall -# else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -# define STDCALL /* nothing */ -# endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#endif /* STDCALL */ - -#define ITTAPI CDECL -#define LIBITTAPI /* nothing */ - -#define ITT_JOIN_AUX(p,n) p##n -#define ITT_JOIN(p,n) ITT_JOIN_AUX(p,n) - -#ifndef INTEL_ITTNOTIFY_PREFIX -# define INTEL_ITTNOTIFY_PREFIX __itt_ -#endif /* INTEL_ITTNOTIFY_PREFIX */ -#ifndef INTEL_ITTNOTIFY_POSTFIX -# define INTEL_ITTNOTIFY_POSTFIX _ptr_ -#endif /* INTEL_ITTNOTIFY_POSTFIX */ - -#define ITTNOTIFY_NAME_AUX(n) ITT_JOIN(INTEL_ITTNOTIFY_PREFIX,n) -#define ITTNOTIFY_NAME(n) ITTNOTIFY_NAME_AUX(ITT_JOIN(n,INTEL_ITTNOTIFY_POSTFIX)) - -#define ITTNOTIFY_VOID(n) (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n) -#define ITTNOTIFY_DATA(n) (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n) - -#ifdef ITT_STUB -#undef ITT_STUB -#endif -#ifdef ITT_STUBV -#undef ITT_STUBV -#endif -#define ITT_STUBV(api,type,name,args,params) \ - typedef type (api* ITT_JOIN(ITTNOTIFY_NAME(name),_t)) args; \ - extern ITT_JOIN(ITTNOTIFY_NAME(name),_t) ITTNOTIFY_NAME(name); -#define ITT_STUB ITT_STUBV - -#ifdef __cplusplus -extern "C" { -#endif /* __cplusplus */ -/** @endcond */ - -/** - * @defgroup legacy Legacy API - * @{ - * @} - */ - -/** - * @defgroup legacy_control Collection Control - * @ingroup legacy - * General behavior: application continues to run, but no profiling information is being collected - * - * Pausing occurs not only for the current thread but for all process as well as spawned processes - * - Intel(R) Parallel Inspector: - * - Does not analyze or report errors that involve memory access. - * - Other errors are reported as usual. Pausing data collection in - * Intel(R) Parallel Inspector only pauses tracing and analyzing - * memory access. It does not pause tracing or analyzing threading APIs. - * . - * - Intel(R) Parallel Amplifier: - * - Does continue to record when new threads are started. - * . - * - Other effects: - * - Possible reduction of runtime overhead. - * . - * @{ - */ -#ifndef _ITTNOTIFY_H_ -/** @brief Pause collection */ -void ITTAPI __itt_pause(void); -/** @brief Resume collection */ -void ITTAPI __itt_resume(void); - -/** @cond exclude_from_documentation */ -#ifndef INTEL_NO_MACRO_BODY -#ifndef INTEL_NO_ITTNOTIFY_API -ITT_STUBV(ITTAPI, void, pause, (void), ()) -ITT_STUBV(ITTAPI, void, resume, (void), ()) -#define __itt_pause ITTNOTIFY_VOID(pause) -#define __itt_pause_ptr ITTNOTIFY_NAME(pause) -#define __itt_resume ITTNOTIFY_VOID(resume) -#define __itt_resume_ptr ITTNOTIFY_NAME(resume) -#else /* INTEL_NO_ITTNOTIFY_API */ -#define __itt_pause() -#define __itt_pause_ptr 0 -#define __itt_resume() -#define __itt_resume_ptr 0 -#endif /* INTEL_NO_ITTNOTIFY_API */ -#else /* INTEL_NO_MACRO_BODY */ -#define __itt_pause_ptr 0 -#define __itt_resume_ptr 0 -#endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ -#endif /* _ITTNOTIFY_H_ */ -/** @} legacy_control group */ - -/** - * @defgroup legacy_threads Threads - * @ingroup legacy - * Threads group - * @warning Legacy API - * @{ - */ -/** - * @deprecated Legacy API - * @brief Set name to be associated with thread in analysis GUI. - * @return __itt_err upon failure (name or namelen being null,name and namelen mismatched) - */ -#if ITT_PLATFORM==ITT_PLATFORM_WIN -int LIBITTAPI __itt_thr_name_setA(const char *name, int namelen); -int LIBITTAPI __itt_thr_name_setW(const wchar_t *name, int namelen); -#ifdef UNICODE -# define __itt_thr_name_set __itt_thr_name_setW -# define __itt_thr_name_set_ptr __itt_thr_name_setW_ptr -#else -# define __itt_thr_name_set __itt_thr_name_setA -# define __itt_thr_name_set_ptr __itt_thr_name_setA_ptr -#endif /* UNICODE */ -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -int LIBITTAPI __itt_thr_name_set(const char *name, int namelen); -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ - -/** @cond exclude_from_documentation */ -#ifndef INTEL_NO_MACRO_BODY -#ifndef INTEL_NO_ITTNOTIFY_API -#if ITT_PLATFORM==ITT_PLATFORM_WIN -ITT_STUB(LIBITTAPI, int, thr_name_setA, (const char *name, int namelen), (name, namelen)) -ITT_STUB(LIBITTAPI, int, thr_name_setW, (const wchar_t *name, int namelen), (name, namelen)) -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -ITT_STUB(LIBITTAPI, int, thr_name_set, (const char *name, int namelen), (name, namelen)) -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#if ITT_PLATFORM==ITT_PLATFORM_WIN -#define __itt_thr_name_setA ITTNOTIFY_DATA(thr_name_setA) -#define __itt_thr_name_setA_ptr ITTNOTIFY_NAME(thr_name_setA) -#define __itt_thr_name_setW ITTNOTIFY_DATA(thr_name_setW) -#define __itt_thr_name_setW_ptr ITTNOTIFY_NAME(thr_name_setW) -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#define __itt_thr_name_set ITTNOTIFY_DATA(thr_name_set) -#define __itt_thr_name_set_ptr ITTNOTIFY_NAME(thr_name_set) -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#else /* INTEL_NO_ITTNOTIFY_API */ -#if ITT_PLATFORM==ITT_PLATFORM_WIN -#define __itt_thr_name_setA(name, namelen) -#define __itt_thr_name_setA_ptr 0 -#define __itt_thr_name_setW(name, namelen) -#define __itt_thr_name_setW_ptr 0 -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#define __itt_thr_name_set(name, namelen) -#define __itt_thr_name_set_ptr 0 -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#endif /* INTEL_NO_ITTNOTIFY_API */ -#else /* INTEL_NO_MACRO_BODY */ -#if ITT_PLATFORM==ITT_PLATFORM_WIN -#define __itt_thr_name_setA_ptr 0 -#define __itt_thr_name_setW_ptr 0 -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#define __itt_thr_name_set_ptr 0 -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ - -/** - * @deprecated Legacy API - * @brief Mark current thread as ignored from this point on, for the duration of its existence. - */ -void LIBITTAPI __itt_thr_ignore(void); - -/** @cond exclude_from_documentation */ -#ifndef INTEL_NO_MACRO_BODY -#ifndef INTEL_NO_ITTNOTIFY_API -ITT_STUBV(LIBITTAPI, void, thr_ignore, (void), ()) -#define __itt_thr_ignore ITTNOTIFY_VOID(thr_ignore) -#define __itt_thr_ignore_ptr ITTNOTIFY_NAME(thr_ignore) -#else /* INTEL_NO_ITTNOTIFY_API */ -#define __itt_thr_ignore() -#define __itt_thr_ignore_ptr 0 -#endif /* INTEL_NO_ITTNOTIFY_API */ -#else /* INTEL_NO_MACRO_BODY */ -#define __itt_thr_ignore_ptr 0 -#endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ -/** @} legacy_threads group */ - -/** - * @defgroup legacy_sync Synchronization - * @ingroup legacy - * Synchronization group - * @warning Legacy API - * @{ - */ -/** - * @hideinitializer - * @brief possible value of attribute argument for sync object type - */ -#define __itt_attr_barrier 1 - -/** - * @hideinitializer - * @brief possible value of attribute argument for sync object type - */ -#define __itt_attr_mutex 2 - -/** - * @deprecated Legacy API - * @brief Assign a name to a sync object using char or Unicode string - * @param[in] addr - pointer to the sync object. You should use a real pointer to your object - * to make sure that the values don't clash with other object addresses - * @param[in] objtype - null-terminated object type string. If NULL is passed, the object will - * be assumed to be of generic "User Synchronization" type - * @param[in] objname - null-terminated object name string. If NULL, no name will be assigned - * to the object -- you can use the __itt_sync_rename call later to assign - * the name - * @param[in] attribute - one of [#__itt_attr_barrier, #__itt_attr_mutex] values which defines the - * exact semantics of how prepare/acquired/releasing calls work. - */ -#if ITT_PLATFORM==ITT_PLATFORM_WIN -void ITTAPI __itt_sync_set_nameA(void *addr, const char *objtype, const char *objname, int attribute); -void ITTAPI __itt_sync_set_nameW(void *addr, const wchar_t *objtype, const wchar_t *objname, int attribute); -#ifdef UNICODE -# define __itt_sync_set_name __itt_sync_set_nameW -# define __itt_sync_set_name_ptr __itt_sync_set_nameW_ptr -#else /* UNICODE */ -# define __itt_sync_set_name __itt_sync_set_nameA -# define __itt_sync_set_name_ptr __itt_sync_set_nameA_ptr -#endif /* UNICODE */ -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -void ITTAPI __itt_sync_set_name(void *addr, const char* objtype, const char* objname, int attribute); -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ - -/** @cond exclude_from_documentation */ -#ifndef INTEL_NO_MACRO_BODY -#ifndef INTEL_NO_ITTNOTIFY_API -#if ITT_PLATFORM==ITT_PLATFORM_WIN -ITT_STUBV(ITTAPI, void, sync_set_nameA, (void *addr, const char *objtype, const char *objname, int attribute), (addr, objtype, objname, attribute)) -ITT_STUBV(ITTAPI, void, sync_set_nameW, (void *addr, const wchar_t *objtype, const wchar_t *objname, int attribute), (addr, objtype, objname, attribute)) -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -ITT_STUBV(ITTAPI, void, sync_set_name, (void *addr, const char *objtype, const char *objname, int attribute), (addr, objtype, objname, attribute)) -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#if ITT_PLATFORM==ITT_PLATFORM_WIN -#define __itt_sync_set_nameA ITTNOTIFY_VOID(sync_set_nameA) -#define __itt_sync_set_nameA_ptr ITTNOTIFY_NAME(sync_set_nameA) -#define __itt_sync_set_nameW ITTNOTIFY_VOID(sync_set_nameW) -#define __itt_sync_set_nameW_ptr ITTNOTIFY_NAME(sync_set_nameW) -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#define __itt_sync_set_name ITTNOTIFY_VOID(sync_set_name) -#define __itt_sync_set_name_ptr ITTNOTIFY_NAME(sync_set_name) -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#else /* INTEL_NO_ITTNOTIFY_API */ -#if ITT_PLATFORM==ITT_PLATFORM_WIN -#define __itt_sync_set_nameA(addr, objtype, objname, attribute) -#define __itt_sync_set_nameA_ptr 0 -#define __itt_sync_set_nameW(addr, objtype, objname, attribute) -#define __itt_sync_set_nameW_ptr 0 -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#define __itt_sync_set_name(addr, objtype, objname, attribute) -#define __itt_sync_set_name_ptr 0 -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#endif /* INTEL_NO_ITTNOTIFY_API */ -#else /* INTEL_NO_MACRO_BODY */ -#if ITT_PLATFORM==ITT_PLATFORM_WIN -#define __itt_sync_set_nameA_ptr 0 -#define __itt_sync_set_nameW_ptr 0 -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#define __itt_sync_set_name_ptr 0 -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ - -/** - * @deprecated Legacy API - * @brief Assign a name and type to a sync object using char or Unicode string - * @param[in] addr - pointer to the sync object. You should use a real pointer to your object - * to make sure that the values don't clash with other object addresses - * @param[in] objtype - null-terminated object type string. If NULL is passed, the object will - * be assumed to be of generic "User Synchronization" type - * @param[in] objname - null-terminated object name string. If NULL, no name will be assigned - * to the object -- you can use the __itt_sync_rename call later to assign - * the name - * @param[in] typelen, namelen - a lenght of string for appropriate objtype and objname parameter - * @param[in] attribute - one of [#__itt_attr_barrier, #__itt_attr_mutex] values which defines the - * exact semantics of how prepare/acquired/releasing calls work. - * @return __itt_err upon failure (name or namelen being null,name and namelen mismatched) - */ -#if ITT_PLATFORM==ITT_PLATFORM_WIN -int LIBITTAPI __itt_notify_sync_nameA(void *addr, const char *objtype, int typelen, const char *objname, int namelen, int attribute); -int LIBITTAPI __itt_notify_sync_nameW(void *addr, const wchar_t *objtype, int typelen, const wchar_t *objname, int namelen, int attribute); -#ifdef UNICODE -# define __itt_notify_sync_name __itt_notify_sync_nameW -#else -# define __itt_notify_sync_name __itt_notify_sync_nameA -#endif -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -int LIBITTAPI __itt_notify_sync_name(void *addr, const char *objtype, int typelen, const char *objname, int namelen, int attribute); -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ - -/** @cond exclude_from_documentation */ -#ifndef INTEL_NO_MACRO_BODY -#ifndef INTEL_NO_ITTNOTIFY_API -#if ITT_PLATFORM==ITT_PLATFORM_WIN -ITT_STUB(LIBITTAPI, int, notify_sync_nameA, (void *addr, const char *objtype, int typelen, const char *objname, int namelen, int attribute), (addr, objtype, typelen, objname, namelen, attribute)) -ITT_STUB(LIBITTAPI, int, notify_sync_nameW, (void *addr, const wchar_t *objtype, int typelen, const wchar_t *objname, int namelen, int attribute), (addr, objtype, typelen, objname, namelen, attribute)) -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -ITT_STUB(LIBITTAPI, int, notify_sync_name, (void *addr, const char *objtype, int typelen, const char *objname, int namelen, int attribute), (addr, objtype, typelen, objname, namelen, attribute)) -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#if ITT_PLATFORM==ITT_PLATFORM_WIN -#define __itt_notify_sync_nameA ITTNOTIFY_DATA(notify_sync_nameA) -#define __itt_notify_sync_nameA_ptr ITTNOTIFY_NAME(notify_sync_nameA) -#define __itt_notify_sync_nameW ITTNOTIFY_DATA(notify_sync_nameW) -#define __itt_notify_sync_nameW_ptr ITTNOTIFY_NAME(notify_sync_nameW) -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#define __itt_notify_sync_name ITTNOTIFY_DATA(notify_sync_name) -#define __itt_notify_sync_name_ptr ITTNOTIFY_NAME(notify_sync_name) -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#else /* INTEL_NO_ITTNOTIFY_API */ -#if ITT_PLATFORM==ITT_PLATFORM_WIN -#define __itt_notify_sync_nameA(addr, objtype, typelen, objname, namelen, attribute) -#define __itt_notify_sync_nameA_ptr 0 -#define __itt_notify_sync_nameW(addr, objtype, typelen, objname, namelen, attribute) -#define __itt_notify_sync_nameW_ptr 0 -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#define __itt_notify_sync_name(addr, objtype, typelen, objname, namelen, attribute) -#define __itt_notify_sync_name_ptr 0 -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#endif /* INTEL_NO_ITTNOTIFY_API */ -#else /* INTEL_NO_MACRO_BODY */ -#if ITT_PLATFORM==ITT_PLATFORM_WIN -#define __itt_notify_sync_nameA_ptr 0 -#define __itt_notify_sync_nameW_ptr 0 -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#define __itt_notify_sync_name_ptr 0 -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ - -/** - * @deprecated Legacy API - * @brief Enter spin loop on user-defined sync object - */ -void LIBITTAPI __itt_notify_sync_prepare(void* addr); - -/** @cond exclude_from_documentation */ -#ifndef INTEL_NO_MACRO_BODY -#ifndef INTEL_NO_ITTNOTIFY_API -ITT_STUBV(LIBITTAPI, void, notify_sync_prepare, (void *addr), (addr)) -#define __itt_notify_sync_prepare ITTNOTIFY_VOID(notify_sync_prepare) -#define __itt_notify_sync_prepare_ptr ITTNOTIFY_NAME(notify_sync_prepare) -#else /* INTEL_NO_ITTNOTIFY_API */ -#define __itt_notify_sync_prepare(addr) -#define __itt_notify_sync_prepare_ptr 0 -#endif /* INTEL_NO_ITTNOTIFY_API */ -#else /* INTEL_NO_MACRO_BODY */ -#define __itt_notify_sync_prepare_ptr 0 -#endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ - -/** - * @deprecated Legacy API - * @brief Quit spin loop without acquiring spin object - */ -void LIBITTAPI __itt_notify_sync_cancel(void *addr); - -/** @cond exclude_from_documentation */ -#ifndef INTEL_NO_MACRO_BODY -#ifndef INTEL_NO_ITTNOTIFY_API -ITT_STUBV(LIBITTAPI, void, notify_sync_cancel, (void *addr), (addr)) -#define __itt_notify_sync_cancel ITTNOTIFY_VOID(notify_sync_cancel) -#define __itt_notify_sync_cancel_ptr ITTNOTIFY_NAME(notify_sync_cancel) -#else /* INTEL_NO_ITTNOTIFY_API */ -#define __itt_notify_sync_cancel(addr) -#define __itt_notify_sync_cancel_ptr 0 -#endif /* INTEL_NO_ITTNOTIFY_API */ -#else /* INTEL_NO_MACRO_BODY */ -#define __itt_notify_sync_cancel_ptr 0 -#endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ - -/** - * @deprecated Legacy API - * @brief Successful spin loop completion (sync object acquired) - */ -void LIBITTAPI __itt_notify_sync_acquired(void *addr); - -/** @cond exclude_from_documentation */ -#ifndef INTEL_NO_MACRO_BODY -#ifndef INTEL_NO_ITTNOTIFY_API -ITT_STUBV(LIBITTAPI, void, notify_sync_acquired, (void *addr), (addr)) -#define __itt_notify_sync_acquired ITTNOTIFY_VOID(notify_sync_acquired) -#define __itt_notify_sync_acquired_ptr ITTNOTIFY_NAME(notify_sync_acquired) -#else /* INTEL_NO_ITTNOTIFY_API */ -#define __itt_notify_sync_acquired(addr) -#define __itt_notify_sync_acquired_ptr 0 -#endif /* INTEL_NO_ITTNOTIFY_API */ -#else /* INTEL_NO_MACRO_BODY */ -#define __itt_notify_sync_acquired_ptr 0 -#endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ - -/** - * @deprecated Legacy API - * @brief Start sync object releasing code. Is called before the lock release call. - */ -void LIBITTAPI __itt_notify_sync_releasing(void* addr); - -/** @cond exclude_from_documentation */ -#ifndef INTEL_NO_MACRO_BODY -#ifndef INTEL_NO_ITTNOTIFY_API -ITT_STUBV(LIBITTAPI, void, notify_sync_releasing, (void *addr), (addr)) -#define __itt_notify_sync_releasing ITTNOTIFY_VOID(notify_sync_releasing) -#define __itt_notify_sync_releasing_ptr ITTNOTIFY_NAME(notify_sync_releasing) -#else /* INTEL_NO_ITTNOTIFY_API */ -#define __itt_notify_sync_releasing(addr) -#define __itt_notify_sync_releasing_ptr 0 -#endif /* INTEL_NO_ITTNOTIFY_API */ -#else /* INTEL_NO_MACRO_BODY */ -#define __itt_notify_sync_releasing_ptr 0 -#endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ -/** @} legacy_sync group */ - -#ifndef _ITTNOTIFY_H_ -/** - * @defgroup legacy_events Events - * @ingroup legacy - * Events group - * @{ - */ - -/** @brief user event type */ -typedef int __itt_event; - -/** - * @brief Create an event notification - * @note name or namelen being null/name and namelen not matching, user event feature not enabled - * @return non-zero event identifier upon success and __itt_err otherwise - */ -#if ITT_PLATFORM==ITT_PLATFORM_WIN -__itt_event LIBITTAPI __itt_event_createA(const char *name, int namelen); -__itt_event LIBITTAPI __itt_event_createW(const wchar_t *name, int namelen); -#ifdef UNICODE -# define __itt_event_create __itt_event_createW -# define __itt_event_create_ptr __itt_event_createW_ptr -#else -# define __itt_event_create __itt_event_createA -# define __itt_event_create_ptr __itt_event_createA_ptr -#endif /* UNICODE */ -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -__itt_event LIBITTAPI __itt_event_create(const char *name, int namelen); -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ - -/** @cond exclude_from_documentation */ -#ifndef INTEL_NO_MACRO_BODY -#ifndef INTEL_NO_ITTNOTIFY_API -#if ITT_PLATFORM==ITT_PLATFORM_WIN -ITT_STUB(LIBITTAPI, __itt_event, event_createA, (const char *name, int namelen), (name, namelen)) -ITT_STUB(LIBITTAPI, __itt_event, event_createW, (const wchar_t *name, int namelen), (name, namelen)) -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -ITT_STUB(LIBITTAPI, __itt_event, event_create, (const char *name, int namelen), (name, namelen)) -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#if ITT_PLATFORM==ITT_PLATFORM_WIN -#define __itt_event_createA ITTNOTIFY_DATA(event_createA) -#define __itt_event_createA_ptr ITTNOTIFY_NAME(event_createA) -#define __itt_event_createW ITTNOTIFY_DATA(event_createW) -#define __itt_event_createW_ptr ITTNOTIFY_NAME(event_createW) -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#define __itt_event_create ITTNOTIFY_DATA(event_create) -#define __itt_event_create_ptr ITTNOTIFY_NAME(event_create) -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#else /* INTEL_NO_ITTNOTIFY_API */ -#if ITT_PLATFORM==ITT_PLATFORM_WIN -#define __itt_event_createA(name, namelen) (__itt_event)0 -#define __itt_event_createA_ptr 0 -#define __itt_event_createW(name, namelen) (__itt_event)0 -#define __itt_event_createW_ptr 0 -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#define __itt_event_create(name, namelen) (__itt_event)0 -#define __itt_event_create_ptr 0 -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#endif /* INTEL_NO_ITTNOTIFY_API */ -#else /* INTEL_NO_MACRO_BODY */ -#if ITT_PLATFORM==ITT_PLATFORM_WIN -#define __itt_event_createA_ptr 0 -#define __itt_event_createW_ptr 0 -#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#define __itt_event_create_ptr 0 -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ - -/** - * @brief Record an event occurrence. - * @return __itt_err upon failure (invalid event id/user event feature not enabled) - */ -int LIBITTAPI __itt_event_start(__itt_event event); - -/** @cond exclude_from_documentation */ -#ifndef INTEL_NO_MACRO_BODY -#ifndef INTEL_NO_ITTNOTIFY_API -ITT_STUB(LIBITTAPI, int, event_start, (__itt_event event), (event)) -#define __itt_event_start ITTNOTIFY_DATA(event_start) -#define __itt_event_start_ptr ITTNOTIFY_NAME(event_start) -#else /* INTEL_NO_ITTNOTIFY_API */ -#define __itt_event_start(event) (int)0 -#define __itt_event_start_ptr 0 -#endif /* INTEL_NO_ITTNOTIFY_API */ -#else /* INTEL_NO_MACRO_BODY */ -#define __itt_event_start_ptr 0 -#endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ - -/** - * @brief Record an event end occurrence. - * @note It is optional if events do not have durations. - * @return __itt_err upon failure (invalid event id/user event feature not enabled) - */ -int LIBITTAPI __itt_event_end(__itt_event event); - -/** @cond exclude_from_documentation */ -#ifndef INTEL_NO_MACRO_BODY -#ifndef INTEL_NO_ITTNOTIFY_API -ITT_STUB(LIBITTAPI, int, event_end, (__itt_event event), (event)) -#define __itt_event_end ITTNOTIFY_DATA(event_end) -#define __itt_event_end_ptr ITTNOTIFY_NAME(event_end) -#else /* INTEL_NO_ITTNOTIFY_API */ -#define __itt_event_end(event) (int)0 -#define __itt_event_end_ptr 0 -#endif /* INTEL_NO_ITTNOTIFY_API */ -#else /* INTEL_NO_MACRO_BODY */ -#define __itt_event_end_ptr 0 -#endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ -/** @} legacy_events group */ -#endif /* _ITTNOTIFY_H_ */ - -/** - * @defgroup legacy_memory Memory Accesses - * @ingroup legacy - */ - -/** - * @deprecated Legacy API - * @brief Inform the tool of memory accesses on reading - */ -void LIBITTAPI __itt_memory_read(void *addr, size_t size); - -/** @cond exclude_from_documentation */ -#ifndef INTEL_NO_MACRO_BODY -#ifndef INTEL_NO_ITTNOTIFY_API -ITT_STUBV(LIBITTAPI, void, memory_read, (void *addr, size_t size), (addr, size)) -#define __itt_memory_read ITTNOTIFY_VOID(memory_read) -#define __itt_memory_read_ptr ITTNOTIFY_NAME(memory_read) -#else /* INTEL_NO_ITTNOTIFY_API */ -#define __itt_memory_read(addr, size) -#define __itt_memory_read_ptr 0 -#endif /* INTEL_NO_ITTNOTIFY_API */ -#else /* INTEL_NO_MACRO_BODY */ -#define __itt_memory_read_ptr 0 -#endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ - -/** - * @deprecated Legacy API - * @brief Inform the tool of memory accesses on writing - */ -void LIBITTAPI __itt_memory_write(void *addr, size_t size); - -/** @cond exclude_from_documentation */ -#ifndef INTEL_NO_MACRO_BODY -#ifndef INTEL_NO_ITTNOTIFY_API -ITT_STUBV(LIBITTAPI, void, memory_write, (void *addr, size_t size), (addr, size)) -#define __itt_memory_write ITTNOTIFY_VOID(memory_write) -#define __itt_memory_write_ptr ITTNOTIFY_NAME(memory_write) -#else /* INTEL_NO_ITTNOTIFY_API */ -#define __itt_memory_write(addr, size) -#define __itt_memory_write_ptr 0 -#endif /* INTEL_NO_ITTNOTIFY_API */ -#else /* INTEL_NO_MACRO_BODY */ -#define __itt_memory_write_ptr 0 -#endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ - -/** - * @deprecated Legacy API - * @brief Inform the tool of memory accesses on updating - */ -void LIBITTAPI __itt_memory_update(void *address, size_t size); - -/** @cond exclude_from_documentation */ -#ifndef INTEL_NO_MACRO_BODY -#ifndef INTEL_NO_ITTNOTIFY_API -ITT_STUBV(LIBITTAPI, void, memory_update, (void *addr, size_t size), (addr, size)) -#define __itt_memory_update ITTNOTIFY_VOID(memory_update) -#define __itt_memory_update_ptr ITTNOTIFY_NAME(memory_update) -#else /* INTEL_NO_ITTNOTIFY_API */ -#define __itt_memory_update(addr, size) -#define __itt_memory_update_ptr 0 -#endif /* INTEL_NO_ITTNOTIFY_API */ -#else /* INTEL_NO_MACRO_BODY */ -#define __itt_memory_update_ptr 0 -#endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ -/** @} legacy_memory group */ - -/** - * @defgroup legacy_state Thread and Object States - * @ingroup legacy - */ - -/** @brief state type */ -typedef int __itt_state_t; - -/** @cond exclude_from_documentation */ -typedef enum __itt_obj_state { - __itt_obj_state_err = 0, - __itt_obj_state_clr = 1, - __itt_obj_state_set = 2, - __itt_obj_state_use = 3 -} __itt_obj_state_t; - -typedef enum __itt_thr_state { - __itt_thr_state_err = 0, - __itt_thr_state_clr = 1, - __itt_thr_state_set = 2 -} __itt_thr_state_t; - -typedef enum __itt_obj_prop { - __itt_obj_prop_watch = 1, - __itt_obj_prop_ignore = 2, - __itt_obj_prop_sharable = 3 -} __itt_obj_prop_t; - -typedef enum __itt_thr_prop { - __itt_thr_prop_quiet = 1 -} __itt_thr_prop_t; -/** @endcond */ - -/** - * @deprecated Legacy API - * @brief managing thread and object states - */ -__itt_state_t LIBITTAPI __itt_state_get(void); - -/** @cond exclude_from_documentation */ -#ifndef INTEL_NO_MACRO_BODY -#ifndef INTEL_NO_ITTNOTIFY_API -ITT_STUB(ITTAPI, __itt_state_t, state_get, (void), ()) -#define __itt_state_get ITTNOTIFY_DATA(state_get) -#define __itt_state_get_ptr ITTNOTIFY_NAME(state_get) -#else /* INTEL_NO_ITTNOTIFY_API */ -#define __itt_state_get(void) (__itt_state_t)0 -#define __itt_state_get_ptr 0 -#endif /* INTEL_NO_ITTNOTIFY_API */ -#else /* INTEL_NO_MACRO_BODY */ -#define __itt_state_get_ptr 0 -#endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ - -/** - * @deprecated Legacy API - * @brief managing thread and object states - */ -__itt_state_t LIBITTAPI __itt_state_set(__itt_state_t s); - -/** @cond exclude_from_documentation */ -#ifndef INTEL_NO_MACRO_BODY -#ifndef INTEL_NO_ITTNOTIFY_API -ITT_STUB(ITTAPI, __itt_state_t, state_set, (__itt_state_t s), (s)) -#define __itt_state_set ITTNOTIFY_DATA(state_set) -#define __itt_state_set_ptr ITTNOTIFY_NAME(state_set) -#else /* INTEL_NO_ITTNOTIFY_API */ -#define __itt_state_set(s) (__itt_state_t)0 -#define __itt_state_set_ptr 0 -#endif /* INTEL_NO_ITTNOTIFY_API */ -#else /* INTEL_NO_MACRO_BODY */ -#define __itt_state_set_ptr 0 -#endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ - -/** - * @deprecated Legacy API - * @brief managing thread and object modes - */ -__itt_thr_state_t LIBITTAPI __itt_thr_mode_set(__itt_thr_prop_t p, __itt_thr_state_t s); - -/** @cond exclude_from_documentation */ -#ifndef INTEL_NO_MACRO_BODY -#ifndef INTEL_NO_ITTNOTIFY_API -ITT_STUB(ITTAPI, __itt_thr_state_t, thr_mode_set, (__itt_thr_prop_t p, __itt_thr_state_t s), (p, s)) -#define __itt_thr_mode_set ITTNOTIFY_DATA(thr_mode_set) -#define __itt_thr_mode_set_ptr ITTNOTIFY_NAME(thr_mode_set) -#else /* INTEL_NO_ITTNOTIFY_API */ -#define __itt_thr_mode_set(p, s) (__itt_thr_state_t)0 -#define __itt_thr_mode_set_ptr 0 -#endif /* INTEL_NO_ITTNOTIFY_API */ -#else /* INTEL_NO_MACRO_BODY */ -#define __itt_thr_mode_set_ptr 0 -#endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ - -/** - * @deprecated Legacy API - * @brief managing thread and object modes - */ -__itt_obj_state_t LIBITTAPI __itt_obj_mode_set(__itt_obj_prop_t p, __itt_obj_state_t s); - -/** @cond exclude_from_documentation */ -#ifndef INTEL_NO_MACRO_BODY -#ifndef INTEL_NO_ITTNOTIFY_API -ITT_STUB(ITTAPI, __itt_obj_state_t, obj_mode_set, (__itt_obj_prop_t p, __itt_obj_state_t s), (p, s)) -#define __itt_obj_mode_set ITTNOTIFY_DATA(obj_mode_set) -#define __itt_obj_mode_set_ptr ITTNOTIFY_NAME(obj_mode_set) -#else /* INTEL_NO_ITTNOTIFY_API */ -#define __itt_obj_mode_set(p, s) (__itt_obj_state_t)0 -#define __itt_obj_mode_set_ptr 0 -#endif /* INTEL_NO_ITTNOTIFY_API */ -#else /* INTEL_NO_MACRO_BODY */ -#define __itt_obj_mode_set_ptr 0 -#endif /* INTEL_NO_MACRO_BODY */ -/** @endcond */ -/** @} legacy_state group */ - -/** @cond exclude_from_documentation */ -#ifdef __cplusplus -} -#endif /* __cplusplus */ -/** @endcond */ - -#endif /* _LEGACY_ITTNOTIFY_H_ */ diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/tools_api/prototype/ittnotify.h b/deal.II/bundled/tbb30_104oss/src/tbb/tools_api/prototype/ittnotify.h deleted file mode 100644 index 89fb5cfc20..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/tools_api/prototype/ittnotify.h +++ /dev/null @@ -1,148 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef _PROTOTYPE_ITTNOTIFY_H_ -#define _PROTOTYPE_ITTNOTIFY_H_ -/** - * @file - * @brief Prototype User API functions and types - */ - -/** @cond exclude_from_documentation */ -#ifndef ITT_OS_WIN -# define ITT_OS_WIN 1 -#endif /* ITT_OS_WIN */ - -#ifndef ITT_OS_LINUX -# define ITT_OS_LINUX 2 -#endif /* ITT_OS_LINUX */ - -#ifndef ITT_OS_MAC -# define ITT_OS_MAC 3 -#endif /* ITT_OS_MAC */ - -#ifndef ITT_OS -# if defined WIN32 || defined _WIN32 -# define ITT_OS ITT_OS_WIN -# elif defined( __APPLE__ ) && defined( __MACH__ ) -# define ITT_OS ITT_OS_MAC -# else -# define ITT_OS ITT_OS_LINUX -# endif -#endif /* ITT_OS */ - -#ifndef ITT_PLATFORM_WIN -# define ITT_PLATFORM_WIN 1 -#endif /* ITT_PLATFORM_WIN */ - -#ifndef ITT_PLATFORM_POSIX -# define ITT_PLATFORM_POSIX 2 -#endif /* ITT_PLATFORM_POSIX */ - -#ifndef ITT_PLATFORM -# if ITT_OS==ITT_OS_WIN -# define ITT_PLATFORM ITT_PLATFORM_WIN -# else -# define ITT_PLATFORM ITT_PLATFORM_POSIX -# endif /* _WIN32 */ -#endif /* ITT_PLATFORM */ - -#include <stddef.h> -#include <stdarg.h> -#if ITT_PLATFORM==ITT_PLATFORM_WIN -#include <tchar.h> -#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ - -#ifndef CDECL -# if ITT_PLATFORM==ITT_PLATFORM_WIN -# define CDECL __cdecl -# else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -# define CDECL /* nothing */ -# endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#endif /* CDECL */ - -#ifndef STDCALL -# if ITT_PLATFORM==ITT_PLATFORM_WIN -# define STDCALL __stdcall -# else /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -# define STDCALL /* nothing */ -# endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */ -#endif /* STDCALL */ - -#define ITTAPI_CALL CDECL -#define LIBITTAPI_CALL /* nothing */ - -#define ITT_JOIN_AUX(p,n) p##n -#define ITT_JOIN(p,n) ITT_JOIN_AUX(p,n) - -#ifndef INTEL_ITTNOTIFY_PREFIX -# define INTEL_ITTNOTIFY_PREFIX __itt_ -#endif /* INTEL_ITTNOTIFY_PREFIX */ -#ifndef INTEL_ITTNOTIFY_POSTFIX -# define INTEL_ITTNOTIFY_POSTFIX _ptr_ -#endif /* INTEL_ITTNOTIFY_POSTFIX */ - -#define ITTNOTIFY_NAME_AUX(n) ITT_JOIN(INTEL_ITTNOTIFY_PREFIX,n) -#define ITTNOTIFY_NAME(n) ITTNOTIFY_NAME_AUX(ITT_JOIN(n,INTEL_ITTNOTIFY_POSTFIX)) - -#define ITTNOTIFY_VOID(n) (!ITTNOTIFY_NAME(n)) ? (void)0 : ITTNOTIFY_NAME(n) -#define ITTNOTIFY_DATA(n) (!ITTNOTIFY_NAME(n)) ? 0 : ITTNOTIFY_NAME(n) - -#ifdef ITT_STUB -#undef ITT_STUB -#endif -#ifdef ITT_STUBV -#undef ITT_STUBV -#endif -#define ITT_STUBV(api,type,name,args,params) \ - typedef type (api* ITT_JOIN(ITTNOTIFY_NAME(name),_t)) args; \ - extern ITT_JOIN(ITTNOTIFY_NAME(name),_t) ITTNOTIFY_NAME(name); -#define ITT_STUB ITT_STUBV - -#ifdef __cplusplus -extern "C" { -#endif /* __cplusplus */ -/** @endcond */ - -/** - * @defgroup prototype Prototype API - * @{ - * @} - */ - -/**************************************************************************** - * ??? group - ****************************************************************************/ - -/** @cond exclude_from_documentation */ -#ifdef __cplusplus -} -#endif /* __cplusplus */ -/** @endcond */ - -#endif /* _PROTOTYPE_ITTNOTIFY_H_ */ diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/win32-tbb-export.def b/deal.II/bundled/tbb30_104oss/src/tbb/win32-tbb-export.def deleted file mode 100644 index 8a8ead2819..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/win32-tbb-export.def +++ /dev/null @@ -1,297 +0,0 @@ -; Copyright 2005-2010 Intel Corporation. All Rights Reserved. -; -; This file is part of Threading Building Blocks. -; -; Threading Building Blocks is free software; you can redistribute it -; and/or modify it under the terms of the GNU General Public License -; version 2 as published by the Free Software Foundation. -; -; Threading Building Blocks is distributed in the hope that it will be -; useful, but WITHOUT ANY WARRANTY; without even the implied warranty -; of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -; GNU General Public License for more details. -; -; You should have received a copy of the GNU General Public License -; along with Threading Building Blocks; if not, write to the Free Software -; Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -; -; As a special exception, you may use this file as part of a free software -; library without restriction. Specifically, if other files instantiate -; templates or use macros or inline functions from this file, or you compile -; this file and link it with other files to produce an executable, this -; file does not by itself cause the resulting executable to be covered by -; the GNU General Public License. This exception does not however -; invalidate any other reasons why the executable file might be covered by -; the GNU General Public License. - -#include "tbb/tbb_config.h" - -EXPORTS - -; Assembly-language support that is called directly by clients -;__TBB_machine_cmpswp1 -;__TBB_machine_cmpswp2 -;__TBB_machine_cmpswp4 -__TBB_machine_cmpswp8 -;__TBB_machine_fetchadd1 -;__TBB_machine_fetchadd2 -;__TBB_machine_fetchadd4 -__TBB_machine_fetchadd8 -;__TBB_machine_fetchstore1 -;__TBB_machine_fetchstore2 -;__TBB_machine_fetchstore4 -__TBB_machine_fetchstore8 -__TBB_machine_store8 -__TBB_machine_load8 -__TBB_machine_trylockbyte - -; cache_aligned_allocator.cpp -?NFS_Allocate@internal@tbb@@YAPAXIIPAX@Z -?NFS_GetLineSize@internal@tbb@@YAIXZ -?NFS_Free@internal@tbb@@YAXPAX@Z -?allocate_via_handler_v3@internal@tbb@@YAPAXI@Z -?deallocate_via_handler_v3@internal@tbb@@YAXPAX@Z -?is_malloc_used_v3@internal@tbb@@YA_NXZ - -; task.cpp v3 -?allocate@allocate_additional_child_of_proxy@internal@tbb@@QBEAAVtask@3@I@Z -?allocate@allocate_child_proxy@internal@tbb@@QBEAAVtask@3@I@Z -?allocate@allocate_continuation_proxy@internal@tbb@@QBEAAVtask@3@I@Z -?allocate@allocate_root_proxy@internal@tbb@@SAAAVtask@3@I@Z -?destroy@task_base@internal@interface5@tbb@@SAXAAVtask@4@@Z -?free@allocate_additional_child_of_proxy@internal@tbb@@QBEXAAVtask@3@@Z -?free@allocate_child_proxy@internal@tbb@@QBEXAAVtask@3@@Z -?free@allocate_continuation_proxy@internal@tbb@@QBEXAAVtask@3@@Z -?free@allocate_root_proxy@internal@tbb@@SAXAAVtask@3@@Z -?internal_set_ref_count@task@tbb@@AAEXH@Z -?internal_decrement_ref_count@task@tbb@@AAEHXZ -?is_owned_by_current_thread@task@tbb@@QBE_NXZ -?note_affinity@task@tbb@@UAEXG@Z -?resize@affinity_partitioner_base_v3@internal@tbb@@AAEXI@Z -?self@task@tbb@@SAAAV12@XZ -?spawn_and_wait_for_all@task@tbb@@QAEXAAVtask_list@2@@Z -?default_num_threads@task_scheduler_init@tbb@@SAHXZ -?initialize@task_scheduler_init@tbb@@QAEXHI@Z -?initialize@task_scheduler_init@tbb@@QAEXH@Z -?terminate@task_scheduler_init@tbb@@QAEXXZ -?observe@task_scheduler_observer_v3@internal@tbb@@QAEX_N@Z - -#if !TBB_NO_LEGACY -; task_v2.cpp -?destroy@task@tbb@@QAEXAAV12@@Z -#endif - -; exception handling support -#if __TBB_TASK_GROUP_CONTEXT -?allocate@allocate_root_with_context_proxy@internal@tbb@@QBEAAVtask@3@I@Z -?free@allocate_root_with_context_proxy@internal@tbb@@QBEXAAVtask@3@@Z -?is_group_execution_cancelled@task_group_context@tbb@@QBE_NXZ -?cancel_group_execution@task_group_context@tbb@@QAE_NXZ -?reset@task_group_context@tbb@@QAEXXZ -?init@task_group_context@tbb@@IAEXXZ -?register_pending_exception@task_group_context@tbb@@QAEXXZ -??1task_group_context@tbb@@QAE@XZ -?name@captured_exception@tbb@@UBEPBDXZ -?what@captured_exception@tbb@@UBEPBDXZ -??1captured_exception@tbb@@UAE@XZ -?move@captured_exception@tbb@@UAEPAV12@XZ -?destroy@captured_exception@tbb@@UAEXXZ -?set@captured_exception@tbb@@QAEXPBD0@Z -?clear@captured_exception@tbb@@QAEXXZ -#endif /* __TBB_TASK_GROUP_CONTEXT */ - -; Symbols for exceptions thrown from TBB -?throw_bad_last_alloc_exception_v4@internal@tbb@@YAXXZ -?throw_exception_v4@internal@tbb@@YAXW4exception_id@12@@Z -?what@bad_last_alloc@tbb@@UBEPBDXZ -?what@missing_wait@tbb@@UBEPBDXZ -?what@invalid_multiple_scheduling@tbb@@UBEPBDXZ -?what@improper_lock@tbb@@UBEPBDXZ - -; tbb_misc.cpp -?assertion_failure@tbb@@YAXPBDH00@Z -?get_initial_auto_partitioner_divisor@internal@tbb@@YAIXZ -?handle_perror@internal@tbb@@YAXHPBD@Z -?set_assertion_handler@tbb@@YAP6AXPBDH00@ZP6AX0H00@Z@Z -?runtime_warning@internal@tbb@@YAXPBDZZ -TBB_runtime_interface_version - -; itt_notify.cpp -?itt_load_pointer_with_acquire_v3@internal@tbb@@YAPAXPBX@Z -?itt_store_pointer_with_release_v3@internal@tbb@@YAXPAX0@Z -?itt_set_sync_name_v3@internal@tbb@@YAXPAXPB_W@Z -?itt_load_pointer_v3@internal@tbb@@YAPAXPBX@Z - -; pipeline.cpp -??0pipeline@tbb@@QAE@XZ -??1filter@tbb@@UAE@XZ -??1pipeline@tbb@@UAE@XZ -??_7pipeline@tbb@@6B@ -?add_filter@pipeline@tbb@@QAEXAAVfilter@2@@Z -?clear@pipeline@tbb@@QAEXXZ -?inject_token@pipeline@tbb@@AAEXAAVtask@2@@Z -?run@pipeline@tbb@@QAEXI@Z -#if __TBB_TASK_GROUP_CONTEXT -?run@pipeline@tbb@@QAEXIAAVtask_group_context@2@@Z -#endif -?process_item@thread_bound_filter@tbb@@QAE?AW4result_type@12@XZ -?try_process_item@thread_bound_filter@tbb@@QAE?AW4result_type@12@XZ - -; queuing_rw_mutex.cpp -?internal_construct@queuing_rw_mutex@tbb@@QAEXXZ -?acquire@scoped_lock@queuing_rw_mutex@tbb@@QAEXAAV23@_N@Z -?downgrade_to_reader@scoped_lock@queuing_rw_mutex@tbb@@QAE_NXZ -?release@scoped_lock@queuing_rw_mutex@tbb@@QAEXXZ -?upgrade_to_writer@scoped_lock@queuing_rw_mutex@tbb@@QAE_NXZ -?try_acquire@scoped_lock@queuing_rw_mutex@tbb@@QAE_NAAV23@_N@Z - -; reader_writer_lock.cpp -?try_lock_read@reader_writer_lock@interface5@tbb@@QAE_NXZ -?try_lock@reader_writer_lock@interface5@tbb@@QAE_NXZ -?unlock@reader_writer_lock@interface5@tbb@@QAEXXZ -?lock_read@reader_writer_lock@interface5@tbb@@QAEXXZ -?lock@reader_writer_lock@interface5@tbb@@QAEXXZ -?internal_construct@reader_writer_lock@interface5@tbb@@AAEXXZ -?internal_destroy@reader_writer_lock@interface5@tbb@@AAEXXZ -?internal_construct@scoped_lock@reader_writer_lock@interface5@tbb@@AAEXAAV234@@Z -?internal_destroy@scoped_lock@reader_writer_lock@interface5@tbb@@AAEXXZ -?internal_construct@scoped_lock_read@reader_writer_lock@interface5@tbb@@AAEXAAV234@@Z -?internal_destroy@scoped_lock_read@reader_writer_lock@interface5@tbb@@AAEXXZ - -#if !TBB_NO_LEGACY -; spin_rw_mutex.cpp v2 -?internal_acquire_reader@spin_rw_mutex@tbb@@CAXPAV12@@Z -?internal_acquire_writer@spin_rw_mutex@tbb@@CA_NPAV12@@Z -?internal_downgrade@spin_rw_mutex@tbb@@CAXPAV12@@Z -?internal_itt_releasing@spin_rw_mutex@tbb@@CAXPAV12@@Z -?internal_release_reader@spin_rw_mutex@tbb@@CAXPAV12@@Z -?internal_release_writer@spin_rw_mutex@tbb@@CAXPAV12@@Z -?internal_upgrade@spin_rw_mutex@tbb@@CA_NPAV12@@Z -?internal_try_acquire_writer@spin_rw_mutex@tbb@@CA_NPAV12@@Z -?internal_try_acquire_reader@spin_rw_mutex@tbb@@CA_NPAV12@@Z -#endif - -; spin_rw_mutex v3 -?internal_construct@spin_rw_mutex_v3@tbb@@AAEXXZ -?internal_upgrade@spin_rw_mutex_v3@tbb@@AAE_NXZ -?internal_downgrade@spin_rw_mutex_v3@tbb@@AAEXXZ -?internal_acquire_reader@spin_rw_mutex_v3@tbb@@AAEXXZ -?internal_acquire_writer@spin_rw_mutex_v3@tbb@@AAE_NXZ -?internal_release_reader@spin_rw_mutex_v3@tbb@@AAEXXZ -?internal_release_writer@spin_rw_mutex_v3@tbb@@AAEXXZ -?internal_try_acquire_reader@spin_rw_mutex_v3@tbb@@AAE_NXZ -?internal_try_acquire_writer@spin_rw_mutex_v3@tbb@@AAE_NXZ - -; spin_mutex.cpp -?internal_construct@spin_mutex@tbb@@QAEXXZ -?internal_acquire@scoped_lock@spin_mutex@tbb@@AAEXAAV23@@Z -?internal_release@scoped_lock@spin_mutex@tbb@@AAEXXZ -?internal_try_acquire@scoped_lock@spin_mutex@tbb@@AAE_NAAV23@@Z - -; mutex.cpp -?internal_acquire@scoped_lock@mutex@tbb@@AAEXAAV23@@Z -?internal_release@scoped_lock@mutex@tbb@@AAEXXZ -?internal_try_acquire@scoped_lock@mutex@tbb@@AAE_NAAV23@@Z -?internal_construct@mutex@tbb@@AAEXXZ -?internal_destroy@mutex@tbb@@AAEXXZ - -; recursive_mutex.cpp -?internal_acquire@scoped_lock@recursive_mutex@tbb@@AAEXAAV23@@Z -?internal_release@scoped_lock@recursive_mutex@tbb@@AAEXXZ -?internal_try_acquire@scoped_lock@recursive_mutex@tbb@@AAE_NAAV23@@Z -?internal_construct@recursive_mutex@tbb@@AAEXXZ -?internal_destroy@recursive_mutex@tbb@@AAEXXZ - -; queuing_mutex.cpp -?internal_construct@queuing_mutex@tbb@@QAEXXZ -?acquire@scoped_lock@queuing_mutex@tbb@@QAEXAAV23@@Z -?release@scoped_lock@queuing_mutex@tbb@@QAEXXZ -?try_acquire@scoped_lock@queuing_mutex@tbb@@QAE_NAAV23@@Z - -; critical_section.cpp -?internal_construct@critical_section_v4@internal@tbb@@QAEXXZ - -#if !TBB_NO_LEGACY -; concurrent_hash_map.cpp -?internal_grow_predicate@hash_map_segment_base@internal@tbb@@QBE_NXZ - -; concurrent_queue.cpp v2 -?advance@concurrent_queue_iterator_base@internal@tbb@@IAEXXZ -?assign@concurrent_queue_iterator_base@internal@tbb@@IAEXABV123@@Z -?internal_size@concurrent_queue_base@internal@tbb@@IBEHXZ -??0concurrent_queue_base@internal@tbb@@IAE@I@Z -??0concurrent_queue_iterator_base@internal@tbb@@IAE@ABVconcurrent_queue_base@12@@Z -??1concurrent_queue_base@internal@tbb@@MAE@XZ -??1concurrent_queue_iterator_base@internal@tbb@@IAE@XZ -?internal_pop@concurrent_queue_base@internal@tbb@@IAEXPAX@Z -?internal_pop_if_present@concurrent_queue_base@internal@tbb@@IAE_NPAX@Z -?internal_push@concurrent_queue_base@internal@tbb@@IAEXPBX@Z -?internal_push_if_not_full@concurrent_queue_base@internal@tbb@@IAE_NPBX@Z -?internal_set_capacity@concurrent_queue_base@internal@tbb@@IAEXHI@Z -#endif - -; concurrent_queue v3 -??1concurrent_queue_iterator_base_v3@internal@tbb@@IAE@XZ -??0concurrent_queue_iterator_base_v3@internal@tbb@@IAE@ABVconcurrent_queue_base_v3@12@@Z -??0concurrent_queue_iterator_base_v3@internal@tbb@@IAE@ABVconcurrent_queue_base_v3@12@I@Z -?advance@concurrent_queue_iterator_base_v3@internal@tbb@@IAEXXZ -?assign@concurrent_queue_iterator_base_v3@internal@tbb@@IAEXABV123@@Z -??0concurrent_queue_base_v3@internal@tbb@@IAE@I@Z -??1concurrent_queue_base_v3@internal@tbb@@MAE@XZ -?internal_pop@concurrent_queue_base_v3@internal@tbb@@IAEXPAX@Z -?internal_pop_if_present@concurrent_queue_base_v3@internal@tbb@@IAE_NPAX@Z -?internal_push@concurrent_queue_base_v3@internal@tbb@@IAEXPBX@Z -?internal_push_if_not_full@concurrent_queue_base_v3@internal@tbb@@IAE_NPBX@Z -?internal_size@concurrent_queue_base_v3@internal@tbb@@IBEHXZ -?internal_empty@concurrent_queue_base_v3@internal@tbb@@IBE_NXZ -?internal_set_capacity@concurrent_queue_base_v3@internal@tbb@@IAEXHI@Z -?internal_finish_clear@concurrent_queue_base_v3@internal@tbb@@IAEXXZ -?internal_throw_exception@concurrent_queue_base_v3@internal@tbb@@IBEXXZ -?assign@concurrent_queue_base_v3@internal@tbb@@IAEXABV123@@Z - -#if !TBB_NO_LEGACY -; concurrent_vector.cpp v2 -?internal_assign@concurrent_vector_base@internal@tbb@@IAEXABV123@IP6AXPAXI@ZP6AX1PBXI@Z4@Z -?internal_capacity@concurrent_vector_base@internal@tbb@@IBEIXZ -?internal_clear@concurrent_vector_base@internal@tbb@@IAEXP6AXPAXI@Z_N@Z -?internal_copy@concurrent_vector_base@internal@tbb@@IAEXABV123@IP6AXPAXPBXI@Z@Z -?internal_grow_by@concurrent_vector_base@internal@tbb@@IAEIIIP6AXPAXI@Z@Z -?internal_grow_to_at_least@concurrent_vector_base@internal@tbb@@IAEXIIP6AXPAXI@Z@Z -?internal_push_back@concurrent_vector_base@internal@tbb@@IAEPAXIAAI@Z -?internal_reserve@concurrent_vector_base@internal@tbb@@IAEXIII@Z -#endif - -; concurrent_vector v3 -??1concurrent_vector_base_v3@internal@tbb@@IAE@XZ -?internal_assign@concurrent_vector_base_v3@internal@tbb@@IAEXABV123@IP6AXPAXI@ZP6AX1PBXI@Z4@Z -?internal_capacity@concurrent_vector_base_v3@internal@tbb@@IBEIXZ -?internal_clear@concurrent_vector_base_v3@internal@tbb@@IAEIP6AXPAXI@Z@Z -?internal_copy@concurrent_vector_base_v3@internal@tbb@@IAEXABV123@IP6AXPAXPBXI@Z@Z -?internal_grow_by@concurrent_vector_base_v3@internal@tbb@@IAEIIIP6AXPAXPBXI@Z1@Z -?internal_grow_to_at_least@concurrent_vector_base_v3@internal@tbb@@IAEXIIP6AXPAXPBXI@Z1@Z -?internal_push_back@concurrent_vector_base_v3@internal@tbb@@IAEPAXIAAI@Z -?internal_reserve@concurrent_vector_base_v3@internal@tbb@@IAEXIII@Z -?internal_compact@concurrent_vector_base_v3@internal@tbb@@IAEPAXIPAXP6AX0I@ZP6AX0PBXI@Z@Z -?internal_swap@concurrent_vector_base_v3@internal@tbb@@IAEXAAV123@@Z -?internal_throw_exception@concurrent_vector_base_v3@internal@tbb@@IBEXI@Z -?internal_resize@concurrent_vector_base_v3@internal@tbb@@IAEXIIIPBXP6AXPAXI@ZP6AX10I@Z@Z -?internal_grow_to_at_least_with_result@concurrent_vector_base_v3@internal@tbb@@IAEIIIP6AXPAXPBXI@Z1@Z - -; tbb_thread -?join@tbb_thread_v3@internal@tbb@@QAEXXZ -?detach@tbb_thread_v3@internal@tbb@@QAEXXZ -?internal_start@tbb_thread_v3@internal@tbb@@AAEXP6GIPAX@Z0@Z -?allocate_closure_v3@internal@tbb@@YAPAXI@Z -?free_closure_v3@internal@tbb@@YAXPAX@Z -?hardware_concurrency@tbb_thread_v3@internal@tbb@@SAIXZ -?thread_yield_v3@internal@tbb@@YAXXZ -?thread_sleep_v3@internal@tbb@@YAXABVinterval_t@tick_count@2@@Z -?move_v3@internal@tbb@@YAXAAVtbb_thread_v3@12@0@Z -?thread_get_id_v3@internal@tbb@@YA?AVid@tbb_thread_v3@12@XZ - -; condition_variable -?internal_initialize_condition_variable@internal@interface5@tbb@@YAXAATcondvar_impl_t@123@@Z -?internal_condition_variable_wait@internal@interface5@tbb@@YA_NAATcondvar_impl_t@123@PAVmutex@3@PBVinterval_t@tick_count@3@@Z -?internal_condition_variable_notify_one@internal@interface5@tbb@@YAXAATcondvar_impl_t@123@@Z -?internal_condition_variable_notify_all@internal@interface5@tbb@@YAXAATcondvar_impl_t@123@@Z -?internal_destroy_condition_variable@internal@interface5@tbb@@YAXAATcondvar_impl_t@123@@Z diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/win64-gcc-tbb-export.def b/deal.II/bundled/tbb30_104oss/src/tbb/win64-gcc-tbb-export.def deleted file mode 100644 index d95078deb3..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/win64-gcc-tbb-export.def +++ /dev/null @@ -1,365 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - - -#include "tbb/tbb_config.h" - -{ -global: - -/* cache_aligned_allocator.cpp */ -_ZN3tbb8internal12NFS_AllocateEyyPv; // MODIFIED LINUX ENTRY -_ZN3tbb8internal15NFS_GetLineSizeEv; -_ZN3tbb8internal8NFS_FreeEPv; -_ZN3tbb8internal23allocate_via_handler_v3Ey; // MODIFIED LINUX ENTRY -_ZN3tbb8internal25deallocate_via_handler_v3EPv; -_ZN3tbb8internal17is_malloc_used_v3Ev; - -/* task.cpp v3 */ -_ZN3tbb4task13note_affinityEt; -_ZN3tbb4task22internal_set_ref_countEi; -_ZN3tbb4task28internal_decrement_ref_countEv; -_ZN3tbb4task22spawn_and_wait_for_allERNS_9task_listE; -_ZN3tbb4task4selfEv; -_ZN3tbb10interface58internal9task_base7destroyERNS_4taskE; -_ZNK3tbb4task26is_owned_by_current_threadEv; -_ZN3tbb8internal19allocate_root_proxy4freeERNS_4taskE; -_ZN3tbb8internal19allocate_root_proxy8allocateEy; // MODIFIED LINUX ENTRY -_ZN3tbb8internal28affinity_partitioner_base_v36resizeEj; -_ZNK3tbb8internal20allocate_child_proxy4freeERNS_4taskE; -_ZNK3tbb8internal20allocate_child_proxy8allocateEy; // MODIFIED LINUX ENTRY -_ZNK3tbb8internal27allocate_continuation_proxy4freeERNS_4taskE; -_ZNK3tbb8internal27allocate_continuation_proxy8allocateEy; // MODIFIED LINUX ENTRY -_ZNK3tbb8internal34allocate_additional_child_of_proxy4freeERNS_4taskE; -_ZNK3tbb8internal34allocate_additional_child_of_proxy8allocateEy; // MODIFIED LINUX ENTRY -_ZTIN3tbb4taskE; -_ZTSN3tbb4taskE; -_ZTVN3tbb4taskE; -_ZN3tbb19task_scheduler_init19default_num_threadsEv; -_ZN3tbb19task_scheduler_init10initializeEiy; // MODIFIED LINUX ENTRY -_ZN3tbb19task_scheduler_init10initializeEi; -_ZN3tbb19task_scheduler_init9terminateEv; -_ZN3tbb8internal26task_scheduler_observer_v37observeEb; -_ZN3tbb10empty_task7executeEv; -_ZN3tbb10empty_taskD0Ev; -_ZN3tbb10empty_taskD1Ev; -_ZTIN3tbb10empty_taskE; -_ZTSN3tbb10empty_taskE; -_ZTVN3tbb10empty_taskE; - -#if !TBB_NO_LEGACY -/* task_v2.cpp */ -_ZN3tbb4task7destroyERS0_; -#endif /* !TBB_NO_LEGACY */ - -/* Exception handling in task scheduler */ -#if __TBB_TASK_GROUP_CONTEXT -_ZNK3tbb8internal32allocate_root_with_context_proxy8allocateEy; // MODIFIED LINUX ENTRY -_ZNK3tbb8internal32allocate_root_with_context_proxy4freeERNS_4taskE; -_ZNK3tbb18task_group_context28is_group_execution_cancelledEv; -_ZN3tbb18task_group_context22cancel_group_executionEv; -_ZN3tbb18task_group_context26register_pending_exceptionEv; -_ZN3tbb18task_group_context5resetEv; -_ZN3tbb18task_group_context4initEv; -_ZN3tbb18task_group_contextD1Ev; -_ZN3tbb18task_group_contextD2Ev; -_ZNK3tbb18captured_exception4nameEv; -_ZNK3tbb18captured_exception4whatEv; -_ZN3tbb18captured_exception10throw_selfEv; -_ZN3tbb18captured_exception3setEPKcS2_; -_ZN3tbb18captured_exception4moveEv; -_ZN3tbb18captured_exception5clearEv; -_ZN3tbb18captured_exception7destroyEv; -_ZN3tbb18captured_exception8allocateEPKcS2_; -_ZN3tbb18captured_exceptionD0Ev; -_ZN3tbb18captured_exceptionD1Ev; -_ZTIN3tbb18captured_exceptionE; -_ZTSN3tbb18captured_exceptionE; -_ZTVN3tbb18captured_exceptionE; -_ZN3tbb13tbb_exceptionD2Ev; -_ZTIN3tbb13tbb_exceptionE; -_ZTSN3tbb13tbb_exceptionE; -_ZTVN3tbb13tbb_exceptionE; -#endif /* __TBB_TASK_GROUP_CONTEXT */ - -/* Symbols for exceptions thrown from TBB */ -_ZN3tbb8internal33throw_bad_last_alloc_exception_v4Ev; -_ZN3tbb8internal18throw_exception_v4ENS0_12exception_idE; -_ZN3tbb14bad_last_allocD0Ev; -_ZN3tbb14bad_last_allocD1Ev; -_ZNK3tbb14bad_last_alloc4whatEv; -_ZTIN3tbb14bad_last_allocE; -_ZTSN3tbb14bad_last_allocE; -_ZTVN3tbb14bad_last_allocE; -_ZN3tbb12missing_waitD0Ev; -_ZN3tbb12missing_waitD1Ev; -_ZNK3tbb12missing_wait4whatEv; -_ZTIN3tbb12missing_waitE; -_ZTSN3tbb12missing_waitE; -_ZTVN3tbb12missing_waitE; -_ZN3tbb27invalid_multiple_schedulingD0Ev; -_ZN3tbb27invalid_multiple_schedulingD1Ev; -_ZNK3tbb27invalid_multiple_scheduling4whatEv; -_ZTIN3tbb27invalid_multiple_schedulingE; -_ZTSN3tbb27invalid_multiple_schedulingE; -_ZTVN3tbb27invalid_multiple_schedulingE; -_ZN3tbb13improper_lockD0Ev; -_ZN3tbb13improper_lockD1Ev; -_ZNK3tbb13improper_lock4whatEv; -_ZTIN3tbb13improper_lockE; -_ZTSN3tbb13improper_lockE; -_ZTVN3tbb13improper_lockE; - -/* tbb_misc.cpp */ -_ZN3tbb17assertion_failureEPKciS1_S1_; -_ZN3tbb21set_assertion_handlerEPFvPKciS1_S1_E; -_ZN3tbb8internal36get_initial_auto_partitioner_divisorEv; -_ZN3tbb8internal13handle_perrorEiPKc; -_ZN3tbb8internal15runtime_warningEPKcz; -TBB_runtime_interface_version; - -/* itt_notify.cpp */ -_ZN3tbb8internal32itt_load_pointer_with_acquire_v3EPKv; -_ZN3tbb8internal33itt_store_pointer_with_release_v3EPvS1_; -_ZN3tbb8internal20itt_set_sync_name_v3EPvPKc; // MODIFIED LINUX ENTRY -_ZN3tbb8internal19itt_load_pointer_v3EPKv; - -/* pipeline.cpp */ -_ZTIN3tbb6filterE; -_ZTSN3tbb6filterE; -_ZTVN3tbb6filterE; -_ZN3tbb6filterD2Ev; -_ZN3tbb8pipeline10add_filterERNS_6filterE; -_ZN3tbb8pipeline12inject_tokenERNS_4taskE; -_ZN3tbb8pipeline13remove_filterERNS_6filterE; -_ZN3tbb8pipeline3runEy; // MODIFIED LINUX ENTRY -#if __TBB_TASK_GROUP_CONTEXT -_ZN3tbb8pipeline3runEyRNS_18task_group_contextE; // MODIFIED LINUX ENTRY -#endif -_ZN3tbb8pipeline5clearEv; -_ZN3tbb19thread_bound_filter12process_itemEv; -_ZN3tbb19thread_bound_filter16try_process_itemEv; -_ZTIN3tbb8pipelineE; -_ZTSN3tbb8pipelineE; -_ZTVN3tbb8pipelineE; -_ZN3tbb8pipelineC1Ev; -_ZN3tbb8pipelineC2Ev; -_ZN3tbb8pipelineD0Ev; -_ZN3tbb8pipelineD1Ev; -_ZN3tbb8pipelineD2Ev; - -/* queuing_rw_mutex.cpp */ -_ZN3tbb16queuing_rw_mutex18internal_constructEv; -_ZN3tbb16queuing_rw_mutex11scoped_lock17upgrade_to_writerEv; -_ZN3tbb16queuing_rw_mutex11scoped_lock19downgrade_to_readerEv; -_ZN3tbb16queuing_rw_mutex11scoped_lock7acquireERS0_b; -_ZN3tbb16queuing_rw_mutex11scoped_lock7releaseEv; -_ZN3tbb16queuing_rw_mutex11scoped_lock11try_acquireERS0_b; - -/* reader_writer_lock.cpp */ -_ZN3tbb10interface518reader_writer_lock11scoped_lock16internal_destroyEv; -_ZN3tbb10interface518reader_writer_lock11scoped_lock18internal_constructERS1_; -_ZN3tbb10interface518reader_writer_lock13try_lock_readEv; -_ZN3tbb10interface518reader_writer_lock16scoped_lock_read16internal_destroyEv; -_ZN3tbb10interface518reader_writer_lock16scoped_lock_read18internal_constructERS1_; -_ZN3tbb10interface518reader_writer_lock16internal_destroyEv; -_ZN3tbb10interface518reader_writer_lock18internal_constructEv; -_ZN3tbb10interface518reader_writer_lock4lockEv; -_ZN3tbb10interface518reader_writer_lock6unlockEv; -_ZN3tbb10interface518reader_writer_lock8try_lockEv; -_ZN3tbb10interface518reader_writer_lock9lock_readEv; - -#if !TBB_NO_LEGACY -/* spin_rw_mutex.cpp v2 */ -_ZN3tbb13spin_rw_mutex16internal_upgradeEPS0_; -_ZN3tbb13spin_rw_mutex22internal_itt_releasingEPS0_; -_ZN3tbb13spin_rw_mutex23internal_acquire_readerEPS0_; -_ZN3tbb13spin_rw_mutex23internal_acquire_writerEPS0_; -_ZN3tbb13spin_rw_mutex18internal_downgradeEPS0_; -_ZN3tbb13spin_rw_mutex23internal_release_readerEPS0_; -_ZN3tbb13spin_rw_mutex23internal_release_writerEPS0_; -_ZN3tbb13spin_rw_mutex27internal_try_acquire_readerEPS0_; -_ZN3tbb13spin_rw_mutex27internal_try_acquire_writerEPS0_; -#endif - -/* spin_rw_mutex v3 */ -_ZN3tbb16spin_rw_mutex_v318internal_constructEv; -_ZN3tbb16spin_rw_mutex_v316internal_upgradeEv; -_ZN3tbb16spin_rw_mutex_v318internal_downgradeEv; -_ZN3tbb16spin_rw_mutex_v323internal_acquire_readerEv; -_ZN3tbb16spin_rw_mutex_v323internal_acquire_writerEv; -_ZN3tbb16spin_rw_mutex_v323internal_release_readerEv; -_ZN3tbb16spin_rw_mutex_v323internal_release_writerEv; -_ZN3tbb16spin_rw_mutex_v327internal_try_acquire_readerEv; -_ZN3tbb16spin_rw_mutex_v327internal_try_acquire_writerEv; - -/* spin_mutex.cpp */ -_ZN3tbb10spin_mutex11scoped_lock16internal_acquireERS0_; -_ZN3tbb10spin_mutex11scoped_lock16internal_releaseEv; -_ZN3tbb10spin_mutex11scoped_lock20internal_try_acquireERS0_; -_ZN3tbb10spin_mutex18internal_constructEv; - -/* mutex.cpp */ -_ZN3tbb5mutex11scoped_lock16internal_acquireERS0_; -_ZN3tbb5mutex11scoped_lock16internal_releaseEv; -_ZN3tbb5mutex11scoped_lock20internal_try_acquireERS0_; -_ZN3tbb5mutex16internal_destroyEv; -_ZN3tbb5mutex18internal_constructEv; - -/* recursive_mutex.cpp */ -_ZN3tbb15recursive_mutex11scoped_lock16internal_acquireERS0_; -_ZN3tbb15recursive_mutex11scoped_lock16internal_releaseEv; -_ZN3tbb15recursive_mutex11scoped_lock20internal_try_acquireERS0_; -_ZN3tbb15recursive_mutex16internal_destroyEv; -_ZN3tbb15recursive_mutex18internal_constructEv; - -/* QueuingMutex.cpp */ -_ZN3tbb13queuing_mutex18internal_constructEv; -_ZN3tbb13queuing_mutex11scoped_lock7acquireERS0_; -_ZN3tbb13queuing_mutex11scoped_lock7releaseEv; -_ZN3tbb13queuing_mutex11scoped_lock11try_acquireERS0_; - -/* critical_section.cpp */ -_ZN3tbb8internal19critical_section_v418internal_constructEv; - -#if !TBB_NO_LEGACY -/* concurrent_hash_map */ -_ZNK3tbb8internal21hash_map_segment_base23internal_grow_predicateEv; - -/* concurrent_queue.cpp v2 */ -_ZN3tbb8internal21concurrent_queue_base12internal_popEPv; -_ZN3tbb8internal21concurrent_queue_base13internal_pushEPKv; -_ZN3tbb8internal21concurrent_queue_base21internal_set_capacityExy; // MODIFIED LINUX ENTRY -_ZN3tbb8internal21concurrent_queue_base23internal_pop_if_presentEPv; -_ZN3tbb8internal21concurrent_queue_base25internal_push_if_not_fullEPKv; -_ZN3tbb8internal21concurrent_queue_baseC2Ey; // MODIFIED LINUX ENTRY -_ZN3tbb8internal21concurrent_queue_baseD2Ev; -_ZTIN3tbb8internal21concurrent_queue_baseE; -_ZTSN3tbb8internal21concurrent_queue_baseE; -_ZTVN3tbb8internal21concurrent_queue_baseE; -_ZN3tbb8internal30concurrent_queue_iterator_base6assignERKS1_; -_ZN3tbb8internal30concurrent_queue_iterator_base7advanceEv; -_ZN3tbb8internal30concurrent_queue_iterator_baseC2ERKNS0_21concurrent_queue_baseE; -_ZN3tbb8internal30concurrent_queue_iterator_baseD2Ev; -_ZNK3tbb8internal21concurrent_queue_base13internal_sizeEv; -#endif - -/* concurrent_queue v3 */ -/* constructors */ -_ZN3tbb8internal24concurrent_queue_base_v3C2Ey; // MODIFIED LINUX ENTRY -_ZN3tbb8internal33concurrent_queue_iterator_base_v3C2ERKNS0_24concurrent_queue_base_v3E; -_ZN3tbb8internal33concurrent_queue_iterator_base_v3C2ERKNS0_24concurrent_queue_base_v3Ey; // MODIFIED LINUX ENTRY -/* destructors */ -_ZN3tbb8internal24concurrent_queue_base_v3D2Ev; -_ZN3tbb8internal33concurrent_queue_iterator_base_v3D2Ev; -/* typeinfo */ -_ZTIN3tbb8internal24concurrent_queue_base_v3E; -_ZTSN3tbb8internal24concurrent_queue_base_v3E; -/* vtable */ -_ZTVN3tbb8internal24concurrent_queue_base_v3E; -/* methods */ -_ZN3tbb8internal33concurrent_queue_iterator_base_v36assignERKS1_; -_ZN3tbb8internal33concurrent_queue_iterator_base_v37advanceEv; -_ZN3tbb8internal24concurrent_queue_base_v313internal_pushEPKv; -_ZN3tbb8internal24concurrent_queue_base_v325internal_push_if_not_fullEPKv; -_ZN3tbb8internal24concurrent_queue_base_v312internal_popEPv; -_ZN3tbb8internal24concurrent_queue_base_v323internal_pop_if_presentEPv; -_ZN3tbb8internal24concurrent_queue_base_v321internal_finish_clearEv; -_ZN3tbb8internal24concurrent_queue_base_v321internal_set_capacityExy; // MODIFIED LINUX ENTRY -_ZNK3tbb8internal24concurrent_queue_base_v313internal_sizeEv; -_ZNK3tbb8internal24concurrent_queue_base_v314internal_emptyEv; -_ZNK3tbb8internal24concurrent_queue_base_v324internal_throw_exceptionEv; -_ZN3tbb8internal24concurrent_queue_base_v36assignERKS1_; - -#if !TBB_NO_LEGACY -/* concurrent_vector.cpp v2 */ -_ZN3tbb8internal22concurrent_vector_base13internal_copyERKS1_yPFvPvPKvyE; // MODIFIED LINUX ENTRY -_ZN3tbb8internal22concurrent_vector_base14internal_clearEPFvPvyEb; // MODIFIED LINUX ENTRY -_ZN3tbb8internal22concurrent_vector_base15internal_assignERKS1_yPFvPvyEPFvS4_PKvyESA_; // MODIFIED LINUX ENTRY -_ZN3tbb8internal22concurrent_vector_base16internal_grow_byEyyPFvPvyE; // MODIFIED LINUX ENTRY -_ZN3tbb8internal22concurrent_vector_base16internal_reserveEyyy; // MODIFIED LINUX ENTRY -_ZN3tbb8internal22concurrent_vector_base18internal_push_backEyRy; // MODIFIED LINUX ENTRY -_ZN3tbb8internal22concurrent_vector_base25internal_grow_to_at_leastEyyPFvPvyE; // MODIFIED LINUX ENTRY -_ZNK3tbb8internal22concurrent_vector_base17internal_capacityEv; -#endif - -/* concurrent_vector v3 */ -_ZN3tbb8internal25concurrent_vector_base_v313internal_copyERKS1_yPFvPvPKvyE; // MODIFIED LINUX ENTRY -_ZN3tbb8internal25concurrent_vector_base_v314internal_clearEPFvPvyE; // MODIFIED LINUX ENTRY -_ZN3tbb8internal25concurrent_vector_base_v315internal_assignERKS1_yPFvPvyEPFvS4_PKvyESA_; // MODIFIED LINUX ENTRY -_ZN3tbb8internal25concurrent_vector_base_v316internal_grow_byEyyPFvPvPKvyES4_; // MODIFIED LINUX ENTRY -_ZN3tbb8internal25concurrent_vector_base_v316internal_reserveEyyy; // MODIFIED LINUX ENTRY -_ZN3tbb8internal25concurrent_vector_base_v318internal_push_backEyRy; // MODIFIED LINUX ENTRY -_ZN3tbb8internal25concurrent_vector_base_v325internal_grow_to_at_leastEyyPFvPvPKvyES4_; // MODIFIED LINUX ENTRY -_ZNK3tbb8internal25concurrent_vector_base_v317internal_capacityEv; -_ZN3tbb8internal25concurrent_vector_base_v316internal_compactEyPvPFvS2_yEPFvS2_PKvyE; // MODIFIED LINUX ENTRY -_ZN3tbb8internal25concurrent_vector_base_v313internal_swapERS1_; -_ZNK3tbb8internal25concurrent_vector_base_v324internal_throw_exceptionEy; // MODIFIED LINUX ENTRY -_ZN3tbb8internal25concurrent_vector_base_v3D2Ev; -_ZN3tbb8internal25concurrent_vector_base_v315internal_resizeEyyyPKvPFvPvyEPFvS4_S3_yE; // MODIFIED LINUX ENTRY -_ZN3tbb8internal25concurrent_vector_base_v337internal_grow_to_at_least_with_resultEyyPFvPvPKvyES4_; // MODIFIED LINUX ENTRY - -/* tbb_thread */ -_ZN3tbb8internal13tbb_thread_v320hardware_concurrencyEv; -_ZN3tbb8internal13tbb_thread_v36detachEv; -_ZN3tbb8internal16thread_get_id_v3Ev; -_ZN3tbb8internal15free_closure_v3EPv; -_ZN3tbb8internal13tbb_thread_v34joinEv; -_ZN3tbb8internal13tbb_thread_v314internal_startEPFjPvES2_; // MODIFIED LINUX ENTRY -_ZN3tbb8internal19allocate_closure_v3Ey; // MODIFIED LINUX ENTRY -_ZN3tbb8internal7move_v3ERNS0_13tbb_thread_v3ES2_; -_ZN3tbb8internal15thread_yield_v3Ev; -_ZN3tbb8internal15thread_sleep_v3ERKNS_10tick_count10interval_tE; - -/* condition_variable */ -_ZN3tbb10interface58internal32internal_condition_variable_waitERNS1_14condvar_impl_tEPNS_5mutexEPKNS_10tick_count10interval_tE; -_ZN3tbb10interface58internal35internal_destroy_condition_variableERNS1_14condvar_impl_tE; -_ZN3tbb10interface58internal38internal_condition_variable_notify_allERNS1_14condvar_impl_tE; -_ZN3tbb10interface58internal38internal_condition_variable_notify_oneERNS1_14condvar_impl_tE; -_ZN3tbb10interface58internal38internal_initialize_condition_variableERNS1_14condvar_impl_tE; - -local: - -/* TBB symbols */ -*3tbb*; -*__TBB*; - -/* Intel Compiler (libirc) symbols */ -__intel_*; -_intel_*; -get_msg_buf; -get_text_buf; -message_catalog; -print_buf; -irc__get_msg; -irc__print; - -}; - - - diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/win64-tbb-export.def b/deal.II/bundled/tbb30_104oss/src/tbb/win64-tbb-export.def deleted file mode 100644 index 1ca8ed7072..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/win64-tbb-export.def +++ /dev/null @@ -1,293 +0,0 @@ -; Copyright 2005-2010 Intel Corporation. All Rights Reserved. -; -; This file is part of Threading Building Blocks. -; -; Threading Building Blocks is free software; you can redistribute it -; and/or modify it under the terms of the GNU General Public License -; version 2 as published by the Free Software Foundation. -; -; Threading Building Blocks is distributed in the hope that it will be -; useful, but WITHOUT ANY WARRANTY; without even the implied warranty -; of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -; GNU General Public License for more details. -; -; You should have received a copy of the GNU General Public License -; along with Threading Building Blocks; if not, write to the Free Software -; Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -; -; As a special exception, you may use this file as part of a free software -; library without restriction. Specifically, if other files instantiate -; templates or use macros or inline functions from this file, or you compile -; this file and link it with other files to produce an executable, this -; file does not by itself cause the resulting executable to be covered by -; the GNU General Public License. This exception does not however -; invalidate any other reasons why the executable file might be covered by -; the GNU General Public License. - -; This file is organized with a section for each .cpp file. -; Each of these sections is in alphabetical order. - -#include "tbb/tbb_config.h" - -EXPORTS - -; Assembly-language support that is called directly by clients -__TBB_machine_cmpswp1 -__TBB_machine_fetchadd1 -__TBB_machine_fetchstore1 -__TBB_machine_cmpswp2 -__TBB_machine_fetchadd2 -__TBB_machine_fetchstore2 -__TBB_machine_pause - -; cache_aligned_allocator.cpp -?NFS_Allocate@internal@tbb@@YAPEAX_K0PEAX@Z -?NFS_GetLineSize@internal@tbb@@YA_KXZ -?NFS_Free@internal@tbb@@YAXPEAX@Z -?allocate_via_handler_v3@internal@tbb@@YAPEAX_K@Z -?deallocate_via_handler_v3@internal@tbb@@YAXPEAX@Z -?is_malloc_used_v3@internal@tbb@@YA_NXZ - - -; task.cpp v3 -?resize@affinity_partitioner_base_v3@internal@tbb@@AEAAXI@Z -?allocate@allocate_additional_child_of_proxy@internal@tbb@@QEBAAEAVtask@3@_K@Z -?allocate@allocate_child_proxy@internal@tbb@@QEBAAEAVtask@3@_K@Z -?allocate@allocate_continuation_proxy@internal@tbb@@QEBAAEAVtask@3@_K@Z -?allocate@allocate_root_proxy@internal@tbb@@SAAEAVtask@3@_K@Z -?destroy@task_base@internal@interface5@tbb@@SAXAEAVtask@4@@Z -?free@allocate_additional_child_of_proxy@internal@tbb@@QEBAXAEAVtask@3@@Z -?free@allocate_child_proxy@internal@tbb@@QEBAXAEAVtask@3@@Z -?free@allocate_continuation_proxy@internal@tbb@@QEBAXAEAVtask@3@@Z -?free@allocate_root_proxy@internal@tbb@@SAXAEAVtask@3@@Z -?internal_set_ref_count@task@tbb@@AEAAXH@Z -?internal_decrement_ref_count@task@tbb@@AEAA_JXZ -?is_owned_by_current_thread@task@tbb@@QEBA_NXZ -?note_affinity@task@tbb@@UEAAXG@Z -?self@task@tbb@@SAAEAV12@XZ -?spawn_and_wait_for_all@task@tbb@@QEAAXAEAVtask_list@2@@Z -?default_num_threads@task_scheduler_init@tbb@@SAHXZ -?initialize@task_scheduler_init@tbb@@QEAAXH_K@Z -?initialize@task_scheduler_init@tbb@@QEAAXH@Z -?terminate@task_scheduler_init@tbb@@QEAAXXZ -?observe@task_scheduler_observer_v3@internal@tbb@@QEAAX_N@Z - -#if !TBB_NO_LEGACY -; task_v2.cpp -?destroy@task@tbb@@QEAAXAEAV12@@Z -#endif - -; Exception handling in task scheduler -#if __TBB_TASK_GROUP_CONTEXT -?allocate@allocate_root_with_context_proxy@internal@tbb@@QEBAAEAVtask@3@_K@Z -?free@allocate_root_with_context_proxy@internal@tbb@@QEBAXAEAVtask@3@@Z -?is_group_execution_cancelled@task_group_context@tbb@@QEBA_NXZ -?cancel_group_execution@task_group_context@tbb@@QEAA_NXZ -?reset@task_group_context@tbb@@QEAAXXZ -?init@task_group_context@tbb@@IEAAXXZ -?register_pending_exception@task_group_context@tbb@@QEAAXXZ -??1task_group_context@tbb@@QEAA@XZ -?name@captured_exception@tbb@@UEBAPEBDXZ -?what@captured_exception@tbb@@UEBAPEBDXZ -??1captured_exception@tbb@@UEAA@XZ -?move@captured_exception@tbb@@UEAAPEAV12@XZ -?destroy@captured_exception@tbb@@UEAAXXZ -?set@captured_exception@tbb@@QEAAXPEBD0@Z -?clear@captured_exception@tbb@@QEAAXXZ -#endif /* __TBB_TASK_GROUP_CONTEXT */ - -; Symbols for exceptions thrown from TBB -?throw_bad_last_alloc_exception_v4@internal@tbb@@YAXXZ -?throw_exception_v4@internal@tbb@@YAXW4exception_id@12@@Z -?what@bad_last_alloc@tbb@@UEBAPEBDXZ -?what@missing_wait@tbb@@UEBAPEBDXZ -?what@invalid_multiple_scheduling@tbb@@UEBAPEBDXZ -?what@improper_lock@tbb@@UEBAPEBDXZ - -; tbb_misc.cpp -?assertion_failure@tbb@@YAXPEBDH00@Z -?get_initial_auto_partitioner_divisor@internal@tbb@@YA_KXZ -?handle_perror@internal@tbb@@YAXHPEBD@Z -?set_assertion_handler@tbb@@YAP6AXPEBDH00@ZP6AX0H00@Z@Z -?runtime_warning@internal@tbb@@YAXPEBDZZ -TBB_runtime_interface_version - -; itt_notify.cpp -?itt_load_pointer_with_acquire_v3@internal@tbb@@YAPEAXPEBX@Z -?itt_store_pointer_with_release_v3@internal@tbb@@YAXPEAX0@Z -?itt_load_pointer_v3@internal@tbb@@YAPEAXPEBX@Z -?itt_set_sync_name_v3@internal@tbb@@YAXPEAXPEB_W@Z - -; pipeline.cpp -??_7pipeline@tbb@@6B@ -??0pipeline@tbb@@QEAA@XZ -??1filter@tbb@@UEAA@XZ -??1pipeline@tbb@@UEAA@XZ -?add_filter@pipeline@tbb@@QEAAXAEAVfilter@2@@Z -?clear@pipeline@tbb@@QEAAXXZ -?inject_token@pipeline@tbb@@AEAAXAEAVtask@2@@Z -?run@pipeline@tbb@@QEAAX_K@Z -#if __TBB_TASK_GROUP_CONTEXT -?run@pipeline@tbb@@QEAAX_KAEAVtask_group_context@2@@Z -#endif -?process_item@thread_bound_filter@tbb@@QEAA?AW4result_type@12@XZ -?try_process_item@thread_bound_filter@tbb@@QEAA?AW4result_type@12@XZ - -; queuing_rw_mutex.cpp -?internal_construct@queuing_rw_mutex@tbb@@QEAAXXZ -?acquire@scoped_lock@queuing_rw_mutex@tbb@@QEAAXAEAV23@_N@Z -?downgrade_to_reader@scoped_lock@queuing_rw_mutex@tbb@@QEAA_NXZ -?release@scoped_lock@queuing_rw_mutex@tbb@@QEAAXXZ -?upgrade_to_writer@scoped_lock@queuing_rw_mutex@tbb@@QEAA_NXZ -?try_acquire@scoped_lock@queuing_rw_mutex@tbb@@QEAA_NAEAV23@_N@Z - -; reader_writer_lock.cpp -?try_lock_read@reader_writer_lock@interface5@tbb@@QEAA_NXZ -?try_lock@reader_writer_lock@interface5@tbb@@QEAA_NXZ -?unlock@reader_writer_lock@interface5@tbb@@QEAAXXZ -?lock_read@reader_writer_lock@interface5@tbb@@QEAAXXZ -?lock@reader_writer_lock@interface5@tbb@@QEAAXXZ -?internal_construct@reader_writer_lock@interface5@tbb@@AEAAXXZ -?internal_destroy@reader_writer_lock@interface5@tbb@@AEAAXXZ -?internal_construct@scoped_lock@reader_writer_lock@interface5@tbb@@AEAAXAEAV234@@Z -?internal_destroy@scoped_lock@reader_writer_lock@interface5@tbb@@AEAAXXZ -?internal_construct@scoped_lock_read@reader_writer_lock@interface5@tbb@@AEAAXAEAV234@@Z -?internal_destroy@scoped_lock_read@reader_writer_lock@interface5@tbb@@AEAAXXZ - -#if !TBB_NO_LEGACY -; spin_rw_mutex.cpp v2 -?internal_itt_releasing@spin_rw_mutex@tbb@@CAXPEAV12@@Z -?internal_acquire_writer@spin_rw_mutex@tbb@@CA_NPEAV12@@Z -?internal_acquire_reader@spin_rw_mutex@tbb@@CAXPEAV12@@Z -?internal_downgrade@spin_rw_mutex@tbb@@CAXPEAV12@@Z -?internal_upgrade@spin_rw_mutex@tbb@@CA_NPEAV12@@Z -?internal_release_reader@spin_rw_mutex@tbb@@CAXPEAV12@@Z -?internal_release_writer@spin_rw_mutex@tbb@@CAXPEAV12@@Z -?internal_try_acquire_writer@spin_rw_mutex@tbb@@CA_NPEAV12@@Z -?internal_try_acquire_reader@spin_rw_mutex@tbb@@CA_NPEAV12@@Z -#endif - -; spin_rw_mutex v3 -?internal_construct@spin_rw_mutex_v3@tbb@@AEAAXXZ -?internal_upgrade@spin_rw_mutex_v3@tbb@@AEAA_NXZ -?internal_downgrade@spin_rw_mutex_v3@tbb@@AEAAXXZ -?internal_acquire_reader@spin_rw_mutex_v3@tbb@@AEAAXXZ -?internal_acquire_writer@spin_rw_mutex_v3@tbb@@AEAA_NXZ -?internal_release_reader@spin_rw_mutex_v3@tbb@@AEAAXXZ -?internal_release_writer@spin_rw_mutex_v3@tbb@@AEAAXXZ -?internal_try_acquire_reader@spin_rw_mutex_v3@tbb@@AEAA_NXZ -?internal_try_acquire_writer@spin_rw_mutex_v3@tbb@@AEAA_NXZ - -; spin_mutex.cpp -?internal_construct@spin_mutex@tbb@@QEAAXXZ -?internal_acquire@scoped_lock@spin_mutex@tbb@@AEAAXAEAV23@@Z -?internal_release@scoped_lock@spin_mutex@tbb@@AEAAXXZ -?internal_try_acquire@scoped_lock@spin_mutex@tbb@@AEAA_NAEAV23@@Z - -; mutex.cpp -?internal_acquire@scoped_lock@mutex@tbb@@AEAAXAEAV23@@Z -?internal_release@scoped_lock@mutex@tbb@@AEAAXXZ -?internal_try_acquire@scoped_lock@mutex@tbb@@AEAA_NAEAV23@@Z -?internal_construct@mutex@tbb@@AEAAXXZ -?internal_destroy@mutex@tbb@@AEAAXXZ - -; recursive_mutex.cpp -?internal_construct@recursive_mutex@tbb@@AEAAXXZ -?internal_destroy@recursive_mutex@tbb@@AEAAXXZ -?internal_acquire@scoped_lock@recursive_mutex@tbb@@AEAAXAEAV23@@Z -?internal_try_acquire@scoped_lock@recursive_mutex@tbb@@AEAA_NAEAV23@@Z -?internal_release@scoped_lock@recursive_mutex@tbb@@AEAAXXZ - -; queuing_mutex.cpp -?internal_construct@queuing_mutex@tbb@@QEAAXXZ -?acquire@scoped_lock@queuing_mutex@tbb@@QEAAXAEAV23@@Z -?release@scoped_lock@queuing_mutex@tbb@@QEAAXXZ -?try_acquire@scoped_lock@queuing_mutex@tbb@@QEAA_NAEAV23@@Z - -;critical_section.cpp -?internal_construct@critical_section_v4@internal@tbb@@QEAAXXZ - -#if !TBB_NO_LEGACY -; concurrent_hash_map.cpp -?internal_grow_predicate@hash_map_segment_base@internal@tbb@@QEBA_NXZ - -; concurrent_queue.cpp v2 -??0concurrent_queue_base@internal@tbb@@IEAA@_K@Z -??0concurrent_queue_iterator_base@internal@tbb@@IEAA@AEBVconcurrent_queue_base@12@@Z -??1concurrent_queue_base@internal@tbb@@MEAA@XZ -??1concurrent_queue_iterator_base@internal@tbb@@IEAA@XZ -?advance@concurrent_queue_iterator_base@internal@tbb@@IEAAXXZ -?assign@concurrent_queue_iterator_base@internal@tbb@@IEAAXAEBV123@@Z -?internal_pop@concurrent_queue_base@internal@tbb@@IEAAXPEAX@Z -?internal_pop_if_present@concurrent_queue_base@internal@tbb@@IEAA_NPEAX@Z -?internal_push@concurrent_queue_base@internal@tbb@@IEAAXPEBX@Z -?internal_push_if_not_full@concurrent_queue_base@internal@tbb@@IEAA_NPEBX@Z -?internal_set_capacity@concurrent_queue_base@internal@tbb@@IEAAX_J_K@Z -?internal_size@concurrent_queue_base@internal@tbb@@IEBA_JXZ -#endif - -; concurrent_queue v3 -??0concurrent_queue_iterator_base_v3@internal@tbb@@IEAA@AEBVconcurrent_queue_base_v3@12@@Z -??0concurrent_queue_iterator_base_v3@internal@tbb@@IEAA@AEBVconcurrent_queue_base_v3@12@_K@Z -??1concurrent_queue_iterator_base_v3@internal@tbb@@IEAA@XZ -?assign@concurrent_queue_iterator_base_v3@internal@tbb@@IEAAXAEBV123@@Z -?advance@concurrent_queue_iterator_base_v3@internal@tbb@@IEAAXXZ -??0concurrent_queue_base_v3@internal@tbb@@IEAA@_K@Z -??1concurrent_queue_base_v3@internal@tbb@@MEAA@XZ -?internal_push@concurrent_queue_base_v3@internal@tbb@@IEAAXPEBX@Z -?internal_push_if_not_full@concurrent_queue_base_v3@internal@tbb@@IEAA_NPEBX@Z -?internal_pop@concurrent_queue_base_v3@internal@tbb@@IEAAXPEAX@Z -?internal_pop_if_present@concurrent_queue_base_v3@internal@tbb@@IEAA_NPEAX@Z -?internal_size@concurrent_queue_base_v3@internal@tbb@@IEBA_JXZ -?internal_empty@concurrent_queue_base_v3@internal@tbb@@IEBA_NXZ -?internal_finish_clear@concurrent_queue_base_v3@internal@tbb@@IEAAXXZ -?internal_set_capacity@concurrent_queue_base_v3@internal@tbb@@IEAAX_J_K@Z -?internal_throw_exception@concurrent_queue_base_v3@internal@tbb@@IEBAXXZ -?assign@concurrent_queue_base_v3@internal@tbb@@IEAAXAEBV123@@Z - -#if !TBB_NO_LEGACY -; concurrent_vector.cpp v2 -?internal_assign@concurrent_vector_base@internal@tbb@@IEAAXAEBV123@_KP6AXPEAX1@ZP6AX2PEBX1@Z5@Z -?internal_capacity@concurrent_vector_base@internal@tbb@@IEBA_KXZ -?internal_clear@concurrent_vector_base@internal@tbb@@IEAAXP6AXPEAX_K@Z_N@Z -?internal_copy@concurrent_vector_base@internal@tbb@@IEAAXAEBV123@_KP6AXPEAXPEBX1@Z@Z -?internal_grow_by@concurrent_vector_base@internal@tbb@@IEAA_K_K0P6AXPEAX0@Z@Z -?internal_grow_to_at_least@concurrent_vector_base@internal@tbb@@IEAAX_K0P6AXPEAX0@Z@Z -?internal_push_back@concurrent_vector_base@internal@tbb@@IEAAPEAX_KAEA_K@Z -?internal_reserve@concurrent_vector_base@internal@tbb@@IEAAX_K00@Z -#endif - -; concurrent_vector v3 -??1concurrent_vector_base_v3@internal@tbb@@IEAA@XZ -?internal_assign@concurrent_vector_base_v3@internal@tbb@@IEAAXAEBV123@_KP6AXPEAX1@ZP6AX2PEBX1@Z5@Z -?internal_capacity@concurrent_vector_base_v3@internal@tbb@@IEBA_KXZ -?internal_clear@concurrent_vector_base_v3@internal@tbb@@IEAA_KP6AXPEAX_K@Z@Z -?internal_copy@concurrent_vector_base_v3@internal@tbb@@IEAAXAEBV123@_KP6AXPEAXPEBX1@Z@Z -?internal_grow_by@concurrent_vector_base_v3@internal@tbb@@IEAA_K_K0P6AXPEAXPEBX0@Z2@Z -?internal_grow_to_at_least@concurrent_vector_base_v3@internal@tbb@@IEAAX_K0P6AXPEAXPEBX0@Z2@Z -?internal_push_back@concurrent_vector_base_v3@internal@tbb@@IEAAPEAX_KAEA_K@Z -?internal_reserve@concurrent_vector_base_v3@internal@tbb@@IEAAX_K00@Z -?internal_compact@concurrent_vector_base_v3@internal@tbb@@IEAAPEAX_KPEAXP6AX10@ZP6AX1PEBX0@Z@Z -?internal_swap@concurrent_vector_base_v3@internal@tbb@@IEAAXAEAV123@@Z -?internal_throw_exception@concurrent_vector_base_v3@internal@tbb@@IEBAX_K@Z -?internal_resize@concurrent_vector_base_v3@internal@tbb@@IEAAX_K00PEBXP6AXPEAX0@ZP6AX210@Z@Z -?internal_grow_to_at_least_with_result@concurrent_vector_base_v3@internal@tbb@@IEAA_K_K0P6AXPEAXPEBX0@Z2@Z - -; tbb_thread -?allocate_closure_v3@internal@tbb@@YAPEAX_K@Z -?detach@tbb_thread_v3@internal@tbb@@QEAAXXZ -?free_closure_v3@internal@tbb@@YAXPEAX@Z -?hardware_concurrency@tbb_thread_v3@internal@tbb@@SAIXZ -?internal_start@tbb_thread_v3@internal@tbb@@AEAAXP6AIPEAX@Z0@Z -?join@tbb_thread_v3@internal@tbb@@QEAAXXZ -?move_v3@internal@tbb@@YAXAEAVtbb_thread_v3@12@0@Z -?thread_get_id_v3@internal@tbb@@YA?AVid@tbb_thread_v3@12@XZ -?thread_sleep_v3@internal@tbb@@YAXAEBVinterval_t@tick_count@2@@Z -?thread_yield_v3@internal@tbb@@YAXXZ - -; condition_variable -?internal_initialize_condition_variable@internal@interface5@tbb@@YAXAEATcondvar_impl_t@123@@Z -?internal_condition_variable_wait@internal@interface5@tbb@@YA_NAEATcondvar_impl_t@123@PEAVmutex@3@PEBVinterval_t@tick_count@3@@Z -?internal_condition_variable_notify_one@internal@interface5@tbb@@YAXAEATcondvar_impl_t@123@@Z -?internal_condition_variable_notify_all@internal@interface5@tbb@@YAXAEATcondvar_impl_t@123@@Z -?internal_destroy_condition_variable@internal@interface5@tbb@@YAXAEATcondvar_impl_t@123@@Z diff --git a/deal.II/bundled/tbb30_104oss/src/tbb/xbox360-tbb-export.def b/deal.II/bundled/tbb30_104oss/src/tbb/xbox360-tbb-export.def deleted file mode 100644 index ac5f14ee8b..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbb/xbox360-tbb-export.def +++ /dev/null @@ -1,234 +0,0 @@ -; Copyright 2005-2010 Intel Corporation. All Rights Reserved. -; -; This file is part of Threading Building Blocks. -; -; Threading Building Blocks is free software; you can redistribute it -; and/or modify it under the terms of the GNU General Public License -; version 2 as published by the Free Software Foundation. -; -; Threading Building Blocks is distributed in the hope that it will be -; useful, but WITHOUT ANY WARRANTY; without even the implied warranty -; of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -; GNU General Public License for more details. -; -; You should have received a copy of the GNU General Public License -; along with Threading Building Blocks; if not, write to the Free Software -; Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -; -; As a special exception, you may use this file as part of a free software -; library without restriction. Specifically, if other files instantiate -; templates or use macros or inline functions from this file, or you compile -; this file and link it with other files to produce an executable, this -; file does not by itself cause the resulting executable to be covered by -; the GNU General Public License. This exception does not however -; invalidate any other reasons why the executable file might be covered by -; the GNU General Public License. - -EXPORTS - -; Assembly-language support that is called directly by clients -;__TBB_machine_cmpswp1 -;__TBB_machine_cmpswp2 -;__TBB_machine_cmpswp4 -;__TBB_machine_cmpswp8 -;__TBB_machine_fetchadd1 -;__TBB_machine_fetchadd2 -;__TBB_machine_fetchadd4 -;__TBB_machine_fetchadd8 -;__TBB_machine_fetchstore1 -;__TBB_machine_fetchstore2 -;__TBB_machine_fetchstore4 -;__TBB_machine_fetchstore8 -;__TBB_machine_store8 -;__TBB_machine_load8 -;__TBB_machine_trylockbyte - -; cache_aligned_allocator.cpp -?NFS_Allocate@internal@tbb@@YAPAXIIPAX@Z @1 -?NFS_GetLineSize@internal@tbb@@YAIXZ @2 -?NFS_Free@internal@tbb@@YAXPAX@Z @3 -?allocate_via_handler_v3@internal@tbb@@YAPAXI@Z @4 -?deallocate_via_handler_v3@internal@tbb@@YAXPAX@Z @5 -?is_malloc_used_v3@internal@tbb@@YA_NXZ @6 - -; task.cpp v3 -?allocate@allocate_additional_child_of_proxy@internal@tbb@@QBAAAVtask@3@I@Z @7 -?allocate@allocate_child_proxy@internal@tbb@@QBAAAVtask@3@I@Z @8 -?allocate@allocate_continuation_proxy@internal@tbb@@QBAAAVtask@3@I@Z @9 -?allocate@allocate_root_proxy@internal@tbb@@SAAAVtask@3@I@Z @10 -?destroy@task@tbb@@QAAXAAV12@@Z @11 -?free@allocate_additional_child_of_proxy@internal@tbb@@QBAXAAVtask@3@@Z @12 -?free@allocate_child_proxy@internal@tbb@@QBAXAAVtask@3@@Z @13 -?free@allocate_continuation_proxy@internal@tbb@@QBAXAAVtask@3@@Z @14 -?free@allocate_root_proxy@internal@tbb@@SAXAAVtask@3@@Z @15 -?internal_set_ref_count@task@tbb@@AAAXH@Z @16 -?is_owned_by_current_thread@task@tbb@@QBA_NXZ @17 -?note_affinity@task@tbb@@UAAXG@Z @18 -?resize@affinity_partitioner_base_v3@internal@tbb@@AAAXI@Z @19 -?self@task@tbb@@SAAAV12@XZ @20 -?spawn_and_wait_for_all@task@tbb@@QAAXAAVtask_list@2@@Z @21 -?default_num_threads@task_scheduler_init@tbb@@SAHXZ @22 -?initialize@task_scheduler_init@tbb@@QAAXHI@Z @23 -?initialize@task_scheduler_init@tbb@@QAAXH@Z @24 -?terminate@task_scheduler_init@tbb@@QAAXXZ @25 -?observe@task_scheduler_observer_v3@internal@tbb@@QAAX_N@Z @26 - -; exception handling support -?allocate@allocate_root_with_context_proxy@internal@tbb@@QBAAAVtask@3@I@Z @27 -?free@allocate_root_with_context_proxy@internal@tbb@@QBAXAAVtask@3@@Z @28 -?is_group_execution_cancelled@task_group_context@tbb@@QBA_NXZ @29 -?cancel_group_execution@task_group_context@tbb@@QAA_NXZ @30 -?reset@task_group_context@tbb@@QAAXXZ @31 -?init@task_group_context@tbb@@IAAXXZ @32 -??1task_group_context@tbb@@QAA@XZ @33 -?name@captured_exception@tbb@@UBAPBDXZ @34 -?what@captured_exception@tbb@@UBAPBDXZ @35 -??1captured_exception@tbb@@UAA@XZ @36 - -; tbb_misc.cpp -?assertion_failure@tbb@@YAXPBDH00@Z @37 -?get_initial_auto_partitioner_divisor@internal@tbb@@YAIXZ @38 -?handle_perror@internal@tbb@@YAXHPBD@Z @39 -?set_assertion_handler@tbb@@YAP6AXPBDH00@ZP6AX0H00@Z@Z @40 -?runtime_warning@internal@tbb@@YAXPBDZZ @41 - -; itt_notify.cpp -?itt_load_pointer_with_acquire_v3@internal@tbb@@YAPAXPBX@Z @42 -?itt_store_pointer_with_release_v3@internal@tbb@@YAXPAX0@Z @43 - -; pipeline.cpp -??0pipeline@tbb@@QAA@XZ @44 -??1filter@tbb@@UAA@XZ @45 -??1pipeline@tbb@@UAA@XZ @46 -??_7pipeline@tbb@@6B@ @47 -?add_filter@pipeline@tbb@@QAAXAAVfilter@2@@Z @48 -?clear@pipeline@tbb@@QAAXXZ @49 -?inject_token@pipeline@tbb@@AAAXAAVtask@2@@Z @50 -?run@pipeline@tbb@@QAAXI@Z @51 - -; queuing_rw_mutex.cpp -?acquire@scoped_lock@queuing_rw_mutex@tbb@@QAAXAAV23@_N@Z @52 -?downgrade_to_reader@scoped_lock@queuing_rw_mutex@tbb@@QAA_NXZ @53 -?release@scoped_lock@queuing_rw_mutex@tbb@@QAAXXZ @54 -?upgrade_to_writer@scoped_lock@queuing_rw_mutex@tbb@@QAA_NXZ @55 -?try_acquire@scoped_lock@queuing_rw_mutex@tbb@@QAA_NAAV23@_N@Z @56 - -#if !TBB_NO_LEGACY -; spin_rw_mutex.cpp v2 -?internal_acquire_reader@spin_rw_mutex@tbb@@CAXPAV12@@Z @57 -?internal_acquire_writer@spin_rw_mutex@tbb@@CA_NPAV12@@Z @58 -?internal_downgrade@spin_rw_mutex@tbb@@CAXPAV12@@Z @59 -?internal_itt_releasing@spin_rw_mutex@tbb@@CAXPAV12@@Z @60 -?internal_release_reader@spin_rw_mutex@tbb@@CAXPAV12@@Z @61 -?internal_release_writer@spin_rw_mutex@tbb@@CAXPAV12@@Z @62 -?internal_upgrade@spin_rw_mutex@tbb@@CA_NPAV12@@Z @63 -?internal_try_acquire_writer@spin_rw_mutex@tbb@@CA_NPAV12@@Z @64 -?internal_try_acquire_reader@spin_rw_mutex@tbb@@CA_NPAV12@@Z @65 -#endif - -; spin_rw_mutex v3 -?internal_upgrade@spin_rw_mutex_v3@tbb@@AAA_NXZ @66 -?internal_downgrade@spin_rw_mutex_v3@tbb@@AAAXXZ @67 -?internal_acquire_reader@spin_rw_mutex_v3@tbb@@AAAXXZ @68 -?internal_acquire_writer@spin_rw_mutex_v3@tbb@@AAA_NXZ @69 -?internal_release_reader@spin_rw_mutex_v3@tbb@@AAAXXZ @70 -?internal_release_writer@spin_rw_mutex_v3@tbb@@AAAXXZ @71 -?internal_try_acquire_reader@spin_rw_mutex_v3@tbb@@AAA_NXZ @72 -?internal_try_acquire_writer@spin_rw_mutex_v3@tbb@@AAA_NXZ @73 - -; spin_mutex.cpp -?internal_acquire@scoped_lock@spin_mutex@tbb@@AAAXAAV23@@Z @74 -?internal_release@scoped_lock@spin_mutex@tbb@@AAAXXZ @75 -?internal_try_acquire@scoped_lock@spin_mutex@tbb@@AAA_NAAV23@@Z @76 - -; mutex.cpp -?internal_acquire@scoped_lock@mutex@tbb@@AAAXAAV23@@Z @77 -?internal_release@scoped_lock@mutex@tbb@@AAAXXZ @78 -?internal_try_acquire@scoped_lock@mutex@tbb@@AAA_NAAV23@@Z @79 -?internal_construct@mutex@tbb@@AAAXXZ @80 -?internal_destroy@mutex@tbb@@AAAXXZ @81 - -; recursive_mutex.cpp -?internal_acquire@scoped_lock@recursive_mutex@tbb@@AAAXAAV23@@Z @82 -?internal_release@scoped_lock@recursive_mutex@tbb@@AAAXXZ @83 -?internal_try_acquire@scoped_lock@recursive_mutex@tbb@@AAA_NAAV23@@Z @84 -?internal_construct@recursive_mutex@tbb@@AAAXXZ @85 -?internal_destroy@recursive_mutex@tbb@@AAAXXZ @86 - -; queuing_mutex.cpp -?acquire@scoped_lock@queuing_mutex@tbb@@QAAXAAV23@@Z @87 -?release@scoped_lock@queuing_mutex@tbb@@QAAXXZ @88 -?try_acquire@scoped_lock@queuing_mutex@tbb@@QAA_NAAV23@@Z @89 - -; concurrent_hash_map.cpp -?internal_grow_predicate@hash_map_segment_base@internal@tbb@@QBA_NXZ @90 - -#if !TBB_NO_LEGACY -; concurrent_queue.cpp v2 -?advance@concurrent_queue_iterator_base@internal@tbb@@IAAXXZ @91 -?assign@concurrent_queue_iterator_base@internal@tbb@@IAAXABV123@@Z @92 -?internal_size@concurrent_queue_base@internal@tbb@@IBAHXZ @93 -??0concurrent_queue_base@internal@tbb@@IAA@I@Z @94 -??0concurrent_queue_iterator_base@internal@tbb@@IAA@ABVconcurrent_queue_base@12@@Z @95 -??1concurrent_queue_base@internal@tbb@@MAA@XZ @96 -??1concurrent_queue_iterator_base@internal@tbb@@IAA@XZ @97 -?internal_pop@concurrent_queue_base@internal@tbb@@IAAXPAX@Z @98 -?internal_pop_if_present@concurrent_queue_base@internal@tbb@@IAA_NPAX@Z @99 -?internal_push@concurrent_queue_base@internal@tbb@@IAAXPBX@Z @100 -?internal_push_if_not_full@concurrent_queue_base@internal@tbb@@IAA_NPBX@Z @101 -?internal_set_capacity@concurrent_queue_base@internal@tbb@@IAAXHI@Z @102 -#endif - -; concurrent_queue v3 -??1concurrent_queue_iterator_base_v3@internal@tbb@@IAA@XZ @103 -??0concurrent_queue_iterator_base_v3@internal@tbb@@IAA@ABVconcurrent_queue_base_v3@12@@Z @104 -?advance@concurrent_queue_iterator_base_v3@internal@tbb@@IAAXXZ @105 -?assign@concurrent_queue_iterator_base_v3@internal@tbb@@IAAXABV123@@Z @106 -??0concurrent_queue_base_v3@internal@tbb@@IAA@I@Z @107 -??1concurrent_queue_base_v3@internal@tbb@@MAA@XZ @108 -?internal_pop@concurrent_queue_base_v3@internal@tbb@@IAAXPAX@Z @109 -?internal_pop_if_present@concurrent_queue_base_v3@internal@tbb@@IAA_NPAX@Z @110 -?internal_push@concurrent_queue_base_v3@internal@tbb@@IAAXPBX@Z @111 -?internal_push_if_not_full@concurrent_queue_base_v3@internal@tbb@@IAA_NPBX@Z @112 -?internal_size@concurrent_queue_base_v3@internal@tbb@@IBAHXZ @113 -?internal_set_capacity@concurrent_queue_base_v3@internal@tbb@@IAAXHI@Z @114 -?internal_finish_clear@concurrent_queue_base_v3@internal@tbb@@IAAXXZ @115 -?internal_throw_exception@concurrent_queue_base_v3@internal@tbb@@IBAXXZ @116 - -#if !TBB_NO_LEGACY -; concurrent_vector.cpp v2 -?internal_assign@concurrent_vector_base@internal@tbb@@IAAXABV123@IP6AXPAXI@ZP6AX1PBXI@Z4@Z @117 -?internal_capacity@concurrent_vector_base@internal@tbb@@IBAIXZ @118 -?internal_clear@concurrent_vector_base@internal@tbb@@IAAXP6AXPAXI@Z_N@Z @119 -?internal_copy@concurrent_vector_base@internal@tbb@@IAAXABV123@IP6AXPAXPBXI@Z@Z @120 -?internal_grow_by@concurrent_vector_base@internal@tbb@@IAAIIIP6AXPAXI@Z@Z @121 -?internal_grow_to_at_least@concurrent_vector_base@internal@tbb@@IAAXIIP6AXPAXI@Z@Z @122 -?internal_push_back@concurrent_vector_base@internal@tbb@@IAAPAXIAAI@Z @123 -?internal_reserve@concurrent_vector_base@internal@tbb@@IAAXIII@Z @124 -#endif - -; concurrent_vector v3 -??1concurrent_vector_base_v3@internal@tbb@@IAA@XZ @125 -?internal_assign@concurrent_vector_base_v3@internal@tbb@@IAAXABV123@IP6AXPAXI@ZP6AX1PBXI@Z4@Z @126 -?internal_capacity@concurrent_vector_base_v3@internal@tbb@@IBAIXZ @127 -?internal_clear@concurrent_vector_base_v3@internal@tbb@@IAAIP6AXPAXI@Z@Z @128 -?internal_copy@concurrent_vector_base_v3@internal@tbb@@IAAXABV123@IP6AXPAXPBXI@Z@Z @129 -?internal_grow_by@concurrent_vector_base_v3@internal@tbb@@IAAIIIP6AXPAXPBXI@Z1@Z @130 -?internal_grow_to_at_least@concurrent_vector_base_v3@internal@tbb@@IAAXIIP6AXPAXPBXI@Z1@Z @131 -?internal_push_back@concurrent_vector_base_v3@internal@tbb@@IAAPAXIAAI@Z @132 -?internal_reserve@concurrent_vector_base_v3@internal@tbb@@IAAXIII@Z @133 -?internal_compact@concurrent_vector_base_v3@internal@tbb@@IAAPAXIPAXP6AX0I@ZP6AX0PBXI@Z@Z @134 -?internal_swap@concurrent_vector_base_v3@internal@tbb@@IAAXAAV123@@Z @135 -?internal_throw_exception@concurrent_vector_base_v3@internal@tbb@@IBAXI@Z @136 - -; tbb_thread -?join@tbb_thread_v3@internal@tbb@@QAAXXZ @137 -?detach@tbb_thread_v3@internal@tbb@@QAAXXZ @138 -?internal_start@tbb_thread_v3@internal@tbb@@AAAXP6AIPAX@Z0@Z @139 -?allocate_closure_v3@internal@tbb@@YAPAXI@Z @140 -?free_closure_v3@internal@tbb@@YAXPAX@Z @141 -?hardware_concurrency@tbb_thread_v3@internal@tbb@@SAIXZ @142 -?thread_yield_v3@internal@tbb@@YAXXZ @143 -?thread_sleep_v3@internal@tbb@@YAXABVinterval_t@tick_count@2@@Z @144 -?move_v3@internal@tbb@@YAXAAVtbb_thread_v3@12@0@Z @145 -?thread_get_id_v3@internal@tbb@@YA?AVid@tbb_thread_v3@12@XZ @146 diff --git a/deal.II/bundled/tbb30_104oss/src/tbbmalloc/Customize.h b/deal.II/bundled/tbb30_104oss/src/tbbmalloc/Customize.h deleted file mode 100644 index 2ad17a99d3..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbbmalloc/Customize.h +++ /dev/null @@ -1,128 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef _TBB_malloc_Customize_H_ -#define _TBB_malloc_Customize_H_ - -/* Thread shutdown notification callback */ -/* redefine the name of the callback to meet TBB requirements - for externally visible names of service functions */ -#define mallocThreadShutdownNotification __TBB_mallocThreadShutdownNotification -#define mallocProcessShutdownNotification __TBB_mallocProcessShutdownNotification - -extern "C" void mallocThreadShutdownNotification(void *); -extern "C" void mallocProcessShutdownNotification(void); - -// customizing MALLOC_ASSERT macro -#include "tbb/tbb_stddef.h" -#define MALLOC_ASSERT(assertion, message) __TBB_ASSERT(assertion, message) - -#ifndef MALLOC_DEBUG -#define MALLOC_DEBUG TBB_USE_DEBUG -#endif - -#include "tbb/tbb_machine.h" - -#if DO_ITT_NOTIFY -#include "tbb/itt_notify.h" -#define MALLOC_ITT_SYNC_PREPARE(pointer) ITT_NOTIFY(sync_prepare, (pointer)) -#define MALLOC_ITT_SYNC_ACQUIRED(pointer) ITT_NOTIFY(sync_acquired, (pointer)) -#define MALLOC_ITT_SYNC_RELEASING(pointer) ITT_NOTIFY(sync_releasing, (pointer)) -#define MALLOC_ITT_SYNC_CANCEL(pointer) ITT_NOTIFY(sync_cancel, (pointer)) -#else -#define MALLOC_ITT_SYNC_PREPARE(pointer) ((void)0) -#define MALLOC_ITT_SYNC_ACQUIRED(pointer) ((void)0) -#define MALLOC_ITT_SYNC_RELEASING(pointer) ((void)0) -#define MALLOC_ITT_SYNC_CANCEL(pointer) ((void)0) -#endif - -//! Stripped down version of spin_mutex. -/** Instances of MallocMutex must be declared in memory that is zero-initialized. - There are no constructors. This is a feature that lets it be - used in situations where the mutex might be used while file-scope constructors - are running. - - There are no methods "acquire" or "release". The scoped_lock must be used - in a strict block-scoped locking pattern. Omitting these methods permitted - further simplication. */ -class MallocMutex { - unsigned char value; - - //! Deny assignment - void operator=( MallocMutex& MallocMutex ); -public: - class scoped_lock { - const unsigned char value; - MallocMutex& mutex; - public: - scoped_lock( MallocMutex& m ) : value( __TBB_LockByte(m.value)), mutex(m) {} - ~scoped_lock() { __TBB_store_with_release(mutex.value, value); } - }; - friend class scoped_lock; -}; - -inline intptr_t AtomicIncrement( volatile intptr_t& counter ) { - return __TBB_FetchAndAddW( &counter, 1 )+1; -} - -inline uintptr_t AtomicAdd( volatile uintptr_t& counter, uintptr_t value ) { - return __TBB_FetchAndAddW( &counter, value ); -} - -inline intptr_t AtomicCompareExchange( volatile intptr_t& location, intptr_t new_value, intptr_t comparand) { - return __TBB_CompareAndSwapW( &location, new_value, comparand ); -} - -inline intptr_t FencedLoad( const volatile intptr_t &location ) { - return __TBB_load_with_acquire(location); -} - -inline void FencedStore( volatile intptr_t &location, intptr_t value ) { - __TBB_store_with_release(location, value); -} - -#define USE_DEFAULT_MEMORY_MAPPING 1 - -// To support malloc replacement with LD_PRELOAD -#include "proxy.h" - -#if MALLOC_LD_PRELOAD -#define malloc_proxy __TBB_malloc_proxy -extern "C" void * __TBB_malloc_proxy(size_t) __attribute__ ((weak)); -#else -const bool malloc_proxy = false; -#endif - -namespace rml { -namespace internal { - void init_tbbmalloc(); -} } // namespaces - -#define MALLOC_EXTRA_INITIALIZATION rml::internal::init_tbbmalloc() - -#endif /* _TBB_malloc_Customize_H_ */ diff --git a/deal.II/bundled/tbb30_104oss/src/tbbmalloc/LifoList.h b/deal.II/bundled/tbb30_104oss/src/tbbmalloc/LifoList.h deleted file mode 100644 index e45934c11d..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbbmalloc/LifoList.h +++ /dev/null @@ -1,106 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef _itt_common_malloc_LifoList_H_ -#define _itt_common_malloc_LifoList_H_ - -#include "TypeDefinitions.h" -#include <string.h> // for memset() - -//! Checking the synchronization method -/** FINE_GRAIN_LOCKS is the only variant for now; should be defined for LifoList */ -#ifndef FINE_GRAIN_LOCKS -#define FINE_GRAIN_LOCKS -#endif - -namespace rml { - -namespace internal { - -class LifoList { -public: - inline LifoList(); - inline void push(void** ptr); - inline void* pop(void); - inline void pushList(void **head, void **tail); - -private: - void * top; -#ifdef FINE_GRAIN_LOCKS - MallocMutex lock; -#endif /* FINE_GRAIN_LOCKS */ -}; - -#ifdef FINE_GRAIN_LOCKS -/* LifoList assumes zero initialization so a vector of it can be created - * by just allocating some space with no call to constructor. - * On Linux, it seems to be necessary to avoid linking with C++ libraries. - * - * By usage convention there is no race on the initialization. */ -LifoList::LifoList( ) : top(NULL) -{ - // MallocMutex assumes zero initialization - memset(&lock, 0, sizeof(MallocMutex)); -} - -void LifoList::push( void **ptr ) -{ - MallocMutex::scoped_lock scoped_cs(lock); - *ptr = top; - top = ptr; -} - -void LifoList::pushList( void **head, void **tail ) -{ - MallocMutex::scoped_lock scoped_cs(lock); - *tail = top; - top = head; -} - -void * LifoList::pop( ) -{ - void **result=NULL; - if (!top) goto done; - { - MallocMutex::scoped_lock scoped_cs(lock); - if (!top) goto done; - result = (void **) top; - top = *result; - } - *result = NULL; -done: - return result; -} - -#endif /* FINE_GRAIN_LOCKS */ - -} // namespace internal -} // namespace rml - -#endif /* _itt_common_malloc_LifoList_H_ */ - diff --git a/deal.II/bundled/tbb30_104oss/src/tbbmalloc/MapMemory.h b/deal.II/bundled/tbb30_104oss/src/tbbmalloc/MapMemory.h deleted file mode 100644 index 26d85f2d0d..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbbmalloc/MapMemory.h +++ /dev/null @@ -1,101 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef _itt_shared_malloc_MapMemory_H -#define _itt_shared_malloc_MapMemory_H - -#if __linux__ || __APPLE__ || __sun || __FreeBSD__ - -#if __sun && !defined(_XPG4_2) - // To have void* as mmap's 1st argument - #define _XPG4_2 1 - #define XPG4_WAS_DEFINED 1 -#endif - -#include <sys/mman.h> - -#if XPG4_WAS_DEFINED - #undef _XPG4_2 - #undef XPG4_WAS_DEFINED -#endif - -#define MEMORY_MAPPING_USES_MALLOC 0 -void* MapMemory (size_t bytes) -{ - void* result = 0; -#ifndef MAP_ANONYMOUS -// Mac OS* X defines MAP_ANON, which is deprecated in Linux. -#define MAP_ANONYMOUS MAP_ANON -#endif /* MAP_ANONYMOUS */ - result = mmap(result, bytes, (PROT_READ | PROT_WRITE), MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); - return result==MAP_FAILED? 0: result; -} - -int UnmapMemory(void *area, size_t bytes) -{ - return munmap(area, bytes); -} - -#elif (_WIN32 || _WIN64) && !_XBOX -#include <windows.h> - -#define MEMORY_MAPPING_USES_MALLOC 0 -void* MapMemory (size_t bytes) -{ - /* Is VirtualAlloc thread safe? */ - return VirtualAlloc(NULL, bytes, (MEM_RESERVE | MEM_COMMIT | MEM_TOP_DOWN), PAGE_READWRITE); -} - -int UnmapMemory(void *area, size_t bytes) -{ - BOOL result = VirtualFree(area, 0, MEM_RELEASE); - return !result; -} - -#else -#include <stdlib.h> - -#define MEMORY_MAPPING_USES_MALLOC 1 -void* MapMemory (size_t bytes) -{ - return malloc( bytes ); -} - -int UnmapMemory(void *area, size_t bytes) -{ - free( area ); - return 0; -} - -#endif /* OS dependent */ - -#if MALLOC_CHECK_RECURSION && MEMORY_MAPPING_USES_MALLOC -#error Impossible to protect against malloc recursion when memory mapping uses malloc. -#endif - -#endif /* _itt_shared_malloc_MapMemory_H */ diff --git a/deal.II/bundled/tbb30_104oss/src/tbbmalloc/Statistics.h b/deal.II/bundled/tbb30_104oss/src/tbbmalloc/Statistics.h deleted file mode 100644 index 9b59d0e9f3..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbbmalloc/Statistics.h +++ /dev/null @@ -1,137 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#define MAX_THREADS 1024 -#define NUM_OF_BINS 30 -#define ThreadCommonCounters NUM_OF_BINS - -enum counter_type { - allocBlockNew = 0, - allocBlockPublic, - allocBumpPtrUsed, - allocFreeListUsed, - allocPrivatized, - examineEmptyEnough, - examineNotEmpty, - freeRestoreBumpPtr, - freeByOtherThread, - freeToActiveBlock, - freeToInactiveBlock, - freeBlockPublic, - freeBlockBack, - MaxCounters -}; -enum common_counter_type { - allocNewLargeObj = 0, - allocCachedLargeObj, - cacheLargeObj, - freeLargeObj, - lockPublicFreeList, - freeToOtherThread -}; - -#if COLLECT_STATISTICS -/* Statistics reporting callback registred via a static object dtor - on Posix or DLL_PROCESS_DETACH on Windows. - */ - -static bool reportAllocationStatistics; - -struct bin_counters { - int counter[MaxCounters]; -}; - -static bin_counters statistic[MAX_THREADS][NUM_OF_BINS+1]; //zero-initialized; - -static inline int STAT_increment(int thread, int bin, int ctr) -{ - return reportAllocationStatistics && thread < MAX_THREADS ? ++(statistic[thread][bin].counter[ctr]) : 0; -} - -static inline void initStatisticsCollection() { -#if defined(MALLOCENV_COLLECT_STATISTICS) - if (NULL != getenv(MALLOCENV_COLLECT_STATISTICS)) - reportAllocationStatistics = true; -#endif -} - -#else -#define STAT_increment(a,b,c) ((void)0) -#endif /* COLLECT_STATISTICS */ - -static inline void STAT_print(int thread) -{ -#if COLLECT_STATISTICS - if (!reportAllocationStatistics) - return; - - char filename[100]; -#if USE_PTHREAD - sprintf(filename, "stat_ScalableMalloc_proc%04d_thr%04d.log", getpid(), thread); -#else - sprintf(filename, "stat_ScalableMalloc_thr%04d.log", thread); -#endif - FILE* outfile = fopen(filename, "w"); - for(int i=0; i<NUM_OF_BINS; ++i) - { - bin_counters& ctrs = statistic[thread][i]; - fprintf(outfile, "Thr%04d Bin%02d", thread, i); - fprintf(outfile, ": allocNewBlocks %5d", ctrs.counter[allocBlockNew]); - fprintf(outfile, ", allocPublicBlocks %5d", ctrs.counter[allocBlockPublic]); - fprintf(outfile, ", restoreBumpPtr %5d", ctrs.counter[freeRestoreBumpPtr]); - fprintf(outfile, ", privatizeCalled %10d", ctrs.counter[allocPrivatized]); - fprintf(outfile, ", emptyEnough %10d", ctrs.counter[examineEmptyEnough]); - fprintf(outfile, ", notEmptyEnough %10d", ctrs.counter[examineNotEmpty]); - fprintf(outfile, ", freeBlocksPublic %5d", ctrs.counter[freeBlockPublic]); - fprintf(outfile, ", freeBlocksBack %5d", ctrs.counter[freeBlockBack]); - fprintf(outfile, "\n"); - } - for(int i=0; i<NUM_OF_BINS; ++i) - { - bin_counters& ctrs = statistic[thread][i]; - fprintf(outfile, "Thr%04d Bin%02d", thread, i); - fprintf(outfile, ": allocBumpPtr %10d", ctrs.counter[allocBumpPtrUsed]); - fprintf(outfile, ", allocFreeList %10d", ctrs.counter[allocFreeListUsed]); - fprintf(outfile, ", freeToActiveBlk %10d", ctrs.counter[freeToActiveBlock]); - fprintf(outfile, ", freeToInactive %10d", ctrs.counter[freeToInactiveBlock]); - fprintf(outfile, ", freedByOther %10d", ctrs.counter[freeByOtherThread]); - fprintf(outfile, "\n"); - } - bin_counters& ctrs = statistic[thread][ThreadCommonCounters]; - fprintf(outfile, "Thr%04d common counters", thread); - fprintf(outfile, ": allocNewLargeObject %5d", ctrs.counter[allocNewLargeObj]); - fprintf(outfile, ": allocCachedLargeObject %5d", ctrs.counter[allocCachedLargeObj]); - fprintf(outfile, ", cacheLargeObject %5d", ctrs.counter[cacheLargeObj]); - fprintf(outfile, ", freeLargeObject %5d", ctrs.counter[freeLargeObj]); - fprintf(outfile, ", lockPublicFreeList %5d", ctrs.counter[lockPublicFreeList]); - fprintf(outfile, ", freeToOtherThread %10d", ctrs.counter[freeToOtherThread]); - fprintf(outfile, "\n"); - - fclose(outfile); -#endif -} diff --git a/deal.II/bundled/tbb30_104oss/src/tbbmalloc/TypeDefinitions.h b/deal.II/bundled/tbb30_104oss/src/tbbmalloc/TypeDefinitions.h deleted file mode 100644 index 4442d96f81..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbbmalloc/TypeDefinitions.h +++ /dev/null @@ -1,105 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef _itt_shared_malloc_TypeDefinitions_H_ -#define _itt_shared_malloc_TypeDefinitions_H_ - -// Define preprocessor symbols used to determine architecture -#if _WIN32||_WIN64 -# if defined(_M_AMD64)||defined(__MINGW64__) // the latter for MinGW support -# define __ARCH_x86_64 1 -# elif defined(_M_IA64) -# define __ARCH_ipf 1 -# elif defined(_M_IX86)||defined(__i386__) // the latter for MinGW support -# define __ARCH_x86_32 1 -# else -# error Unknown processor architecture for Windows -# endif -# define USE_WINTHREAD 1 -#else /* Assume generic Unix */ -# if __x86_64__ -# define __ARCH_x86_64 1 -# elif __ia64__ -# define __ARCH_ipf 1 -# elif __i386__ || __i386 -# define __ARCH_x86_32 1 -# else -# define __ARCH_other 1 -# endif -# define USE_PTHREAD 1 -#endif - -// Include files containing declarations of intptr_t and uintptr_t -#include <stddef.h> // size_t -#if _MSC_VER -typedef unsigned __int16 uint16_t; -typedef unsigned __int32 uint32_t; -typedef unsigned __int64 uint64_t; -#else -#include <stdint.h> -#endif - -namespace rml { -namespace internal { - -extern bool original_malloc_found; -extern void* (*original_malloc_ptr)(size_t); -extern void (*original_free_ptr)(void*); - -} } // namespaces - -//! PROVIDE YOUR OWN Customize.h IF YOU FEEL NECESSARY -#include "Customize.h" - -/* - * Functions to align an integer down or up to the given power of two, - * and test for such an alignment, and for power of two. - */ -template<typename T> -static inline T alignDown(T arg, uintptr_t alignment) { - return T( (uintptr_t)arg & ~(alignment-1)); -} -template<typename T> -static inline T alignUp (T arg, uintptr_t alignment) { - return T(((uintptr_t)arg+(alignment-1)) & ~(alignment-1)); - // /*is this better?*/ return (((uintptr_t)arg-1) | (alignment-1)) + 1; -} -template<typename T> -static inline bool isAligned(T arg, uintptr_t alignment) { - return 0==((uintptr_t)arg & (alignment-1)); -} -static inline bool isPowerOfTwo(uintptr_t arg) { - return arg && (0==(arg & (arg-1))); -} -static inline bool isPowerOfTwoMultiple(uintptr_t arg, uintptr_t divisor) { - // Divisor is assumed to be a power of two (which is valid for current uses). - MALLOC_ASSERT( isPowerOfTwo(divisor), "Divisor should be a power of two" ); - return arg && (0==(arg & (arg-divisor))); -} - -#endif /* _itt_shared_malloc_TypeDefinitions_H_ */ diff --git a/deal.II/bundled/tbb30_104oss/src/tbbmalloc/backend.cpp b/deal.II/bundled/tbb30_104oss/src/tbbmalloc/backend.cpp deleted file mode 100644 index 7db8b65df0..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbbmalloc/backend.cpp +++ /dev/null @@ -1,275 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -// intrin.h available since VS2005 -#if defined(_MSC_VER) && _MSC_VER >= 1400 -#define __TBB_HAS_INTRIN_H 1 -#else -#define __TBB_HAS_INTRIN_H 0 -#endif - -#if __TBB_HAS_INTRIN_H -#include <intrin.h> /* for __cpuid */ -#endif - -#include "tbbmalloc_internal.h" -//! Define the main synchronization method -/** It should be specified before including LifoList.h */ -#define FINE_GRAIN_LOCKS -#include "LifoList.h" - - -namespace rml { -namespace internal { - -// If USE_MALLOC_FOR_LARGE_OBJECT is nonzero, then large allocations are done via malloc. -// Otherwise large allocations are done using the scalable allocator's block allocator. -// As of 06.Jun.17, using malloc is about 10x faster on Linux. -#if !_WIN32 -#define USE_MALLOC_FOR_LARGE_OBJECT 1 -#endif - -/*********** Code to acquire memory from the OS or other executive ****************/ - -#if USE_DEFAULT_MEMORY_MAPPING -#include "MapMemory.h" -#else -/* assume MapMemory and UnmapMemory are customized */ -#endif - -#if USE_MALLOC_FOR_LARGE_OBJECT - -// (get|free)RawMemory only necessary for the USE_MALLOC_FOR_LARGE_OBJECT case -void* getRawMemory (size_t size, bool useMapMem = false) -{ - void *object; - - if (useMapMem) - object = MapMemory(size); - else -#if MALLOC_CHECK_RECURSION - if (RecursiveMallocCallProtector::noRecursion()) - object = malloc(size); - else if ( rml::internal::original_malloc_found ) - object = (*rml::internal::original_malloc_ptr)(size); - else - object = MapMemory(size); -#else - object = malloc(size); -#endif /* MALLOC_CHECK_RECURSION */ - return object; -} - -void freeRawMemory (void *object, size_t size, bool useMapMem) -{ - if (useMapMem) - UnmapMemory(object, size); - else -#if MALLOC_CHECK_RECURSION - if (RecursiveMallocCallProtector::noRecursion()) - free(object); - else if ( rml::internal::original_malloc_found ) - (*rml::internal::original_free_ptr)(object); - else - UnmapMemory(object, size); -#else - free(object); -#endif /* MALLOC_CHECK_RECURSION */ -} - -#else /* USE_MALLOC_FOR_LARGE_OBJECT */ - -void* getRawMemory (size_t size, bool = false) { return MapMemory(size); } - -void freeRawMemory (void *object, size_t size, bool) { - UnmapMemory(object, size); -} - -#endif /* USE_MALLOC_FOR_LARGE_OBJECT */ - -/********* End memory acquisition code ********************************/ - -static unsigned int getCPUid() -{ - unsigned int id; - -#if (__ARCH_x86_32||__ARCH_x86_64) && (__linux__||__APPLE__||__FreeBSD__||__sun||__MINGW32__) - int res; - #if __ARCH_x86_32 - /* EBX used for PIC support. Having EAX in output operands - prevents ICC from crash like in __TBB_ICC_ASM_VOLATILE_BROKEN. */ - int _eax, _ecx, _edx; - __asm__ ("xchgl %%ebx, %1\n\t" - "cpuid\n\t" - "xchgl %%ebx, %1\n\t" - : "=a" (_eax), "=r" (res) - : "a" (1) : "ecx", "edx"); - #else - __asm__ ("cpuid\n\t" - : "=b" (res) - : "a" (1) ); - #endif // __ARCH_x86_32 - id = (res >> 24) & 0xff; -#elif _WIN32 || _WIN64 - #if __TBB_HAS_INTRIN_H - int CPUInfo[4]; - __cpuid(CPUInfo, 1); - id = (CPUInfo[1] >> 24) & 0xff; - #else - int res; - _asm { - push ebx - push ecx - mov eax,1 - cpuid - mov res,ebx - pop ecx - pop ebx - } - id = (res >> 24) & 0xff; - #endif -# else - id = getThreadId(); -#endif - return id; -} - - -/* - * To decrease contention for free blocks, free blocks are split, and access - * to them is based on process number. - */ -const int numOfFreeBlockLists = 4; - -/* - * This is a LIFO linked list that one can init, push or pop from - */ -static LifoList freeBlockList[numOfFreeBlockLists]; - -FreeBlocks freeBlocks; - -bool FreeBlocks::bootstrap(RawAlloc myAlloc, RawFree myFree, size_t /*myReqSize*/) -{ - if (!myAlloc && !myFree) { - rawAlloc = getRawMemory; - rawFree = freeRawMemory; - // Get virtual memory in pieces of this size: 0x0100000 is 1 megabyte decimal - memReqSize = 0x0100000; - } else - MALLOC_ASSERT(0, "Not implemented yet."); - return mallocBigBlock(); -} - -BlockI *FreeBlocks::get(bool startup) -{ - BlockI *bigBlock; - // must not call getCPUid during malloc initialization - // because getCPUid can call malloc - const unsigned myFreeList = startup? 0 : getCPUid()%numOfFreeBlockLists; - unsigned currListIdx = myFreeList; - - do { - if (bigBlock = (BlockI *) freeBlockList[currListIdx].pop()) { - MALLOC_ITT_SYNC_ACQUIRED(freeBlockList+currListIdx); - break; - } - currListIdx = (currListIdx+1) % numOfFreeBlockLists; - } while (currListIdx != myFreeList); - - while (!bigBlock) { - /* We are out of blocks so go to the OS and get another one */ - if (!mallocBigBlock()) return NULL; - - bigBlock = (BlockI *) freeBlockList[myFreeList].pop(); - if (bigBlock) - MALLOC_ITT_SYNC_ACQUIRED(freeBlockList+myFreeList); - } - - return bigBlock; -} - -void FreeBlocks::put(BlockI *ptr, bool startup) -{ - unsigned myFreeList = startup? 0 : getCPUid()%numOfFreeBlockLists; - MALLOC_ITT_SYNC_RELEASING(freeBlockList+myFreeList); - freeBlockList[myFreeList].push((void **)ptr); -} - -void FreeBlocks::putList(BlockI *head, BlockI *tail) -{ - unsigned myFreeList = getCPUid()%numOfFreeBlockLists; - MALLOC_ITT_SYNC_RELEASING(freeBlockList+myFreeList); - freeBlockList[myFreeList].pushList((void**)head, (void**)tail); -} - -/* - * Big Blocks are the blocks we get from the OS or some similar place using getMemory above. - * They are placed on the freeBlockList once they are acquired. - */ -bool FreeBlocks::mallocBigBlock() -{ -/* Divide the big block into smaller bigBlocks that hold that many blocks. - * This is done since we really need a lot of blocks on the freeBlockList - * or there will be contention problems. - */ - const unsigned int blocksPerBigBlock = 16/numOfFreeBlockLists; - - void *unalignedBigBlock = (*rawAlloc)(memReqSize, /*useMapMem=*/true); - - if (!unalignedBigBlock) { - TRACEF(( "[ScalableMalloc trace] in mallocBigBlock, getMemory returns 0\n" )); - /* We can't get any more memory from the OS or executive */ - return false; - } - - void *alignedBigBlock = alignUp(unalignedBigBlock, blockSize); - void *bigBlockCeiling = (void*)((uintptr_t)unalignedBigBlock + memReqSize); - - size_t bigBlockSplitSize = blocksPerBigBlock * blockSize; - - BlockI *splitBlock = (BlockI*)alignedBigBlock; - - // distribute alignedBigBlock between all freeBlockList elements - for (unsigned currListIdx = 0; - ((uintptr_t)splitBlock + blockSize) <= (uintptr_t)bigBlockCeiling; - currListIdx = (currListIdx+1) % numOfFreeBlockLists) { - void *splitEdge = (void*)((uintptr_t)splitBlock + bigBlockSplitSize); - if( splitEdge > bigBlockCeiling) { - splitEdge = alignDown(bigBlockCeiling, blockSize); - } - ((BlockI*)splitBlock)->initialize(splitEdge); - MALLOC_ITT_SYNC_RELEASING(freeBlockList+currListIdx); - freeBlockList[currListIdx].push((void**) splitBlock); - splitBlock = (BlockI*)splitEdge; - } - - TRACEF(( "[ScalableMalloc trace] in mallocBigBlock returning 1\n" )); - return true; -} - -} } // namespaces diff --git a/deal.II/bundled/tbb30_104oss/src/tbbmalloc/backref.cpp b/deal.II/bundled/tbb30_104oss/src/tbbmalloc/backref.cpp deleted file mode 100644 index 7c5c944593..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbbmalloc/backref.cpp +++ /dev/null @@ -1,240 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#include <string.h> -#include <new> /* for placement new */ -#include "tbbmalloc_internal.h" - -namespace rml { -namespace internal { - - -/********* backreferences ***********************/ -/* Each 16KB block and each large memory object header contains BackRefIdx - * that points out in some BackRefBlock which points back to this block or header. - */ -struct BackRefBlock { - BackRefBlock *nextForUse; // the next in the chain of blocks with free items - FreeObject *bumpPtr; // bump pointer moves from the end to the beginning of the block - FreeObject *freeList; - int allocatedCount; // the number of objects allocated - int myNum; // the index in the parent array - MallocMutex blockMutex; - bool addedToForUse; // this block is already added to the listForUse chain - - BackRefBlock(BackRefBlock *blockToUse, int myNum) : - nextForUse(NULL), bumpPtr((FreeObject*)((uintptr_t)blockToUse + blockSize - sizeof(void*))), - freeList(NULL), allocatedCount(0), myNum(myNum), addedToForUse(false) { - // index in BackRefMaster must fit to uint16_t - MALLOC_ASSERT(!(myNum >> 16), ASSERT_TEXT); - } - - // when BackRefMaster::findFreeBlock() calls getRawBlock, - // BackRefBlock::bytes is used implicitly - static const int bytes = blockSize; -}; - -// max number of backreference pointers in 16KB block -static const int BR_MAX_CNT = (BackRefBlock::bytes-sizeof(BackRefBlock))/sizeof(void*); - -struct BackRefMaster { -/* A 16KB block can hold up to ~2K back pointers to 16KB blocks or large objects, - * so it can address at least 32MB. The array of 64KB holds 8K pointers - * to such blocks, addressing ~256 GB. - */ - static const size_t bytes = 64*1024; - static const int dataSz; - - BackRefBlock *active; // if defined, use it for allocations - BackRefBlock *listForUse; // the chain of data blocks with free items - int lastUsed; // index of the last used block - BackRefBlock *backRefBl[1]; // the real size of the array is dataSz - - BackRefBlock *findFreeBlock(); - void addBackRefBlockToList(BackRefBlock *bl); - void addEmptyBackRefBlock(BackRefBlock *newBl); -}; - -const int BackRefMaster::dataSz - = 1+(BackRefMaster::bytes-sizeof(BackRefMaster))/sizeof(BackRefBlock*); - -static MallocMutex backRefMutex; -static BackRefMaster *backRefMaster; - -bool initBackRefMaster() -{ - // reserve space for master table and 4 leaves taking into account VirtualAlloc allocation granularity - // MapMemory is forced because the function runs during startup. - const int leaves = 4; - if (! (backRefMaster = (BackRefMaster*)getRawMemory(BackRefMaster::bytes+leaves*BackRefBlock::bytes, /*useMapMem=*/true))) - return false; - backRefMaster->listForUse = NULL; - for (int i=0; i<leaves; i++) { - BackRefBlock *bl = (BackRefBlock *)((uintptr_t)backRefMaster + BackRefMaster::bytes + i*BackRefBlock::bytes); - backRefMaster->lastUsed = i; - backRefMaster->addEmptyBackRefBlock(bl); - if (i) - backRefMaster->addBackRefBlockToList(bl); - else // active leaf is not needed in listForUse - backRefMaster->active = bl; - } - return true; -} - -void BackRefMaster::addBackRefBlockToList(BackRefBlock *bl) -{ - bl->nextForUse = backRefMaster->listForUse; - backRefMaster->listForUse = bl; - bl->addedToForUse = true; -} - -void BackRefMaster::addEmptyBackRefBlock(BackRefBlock *newBl) -{ - memset(newBl, 0, BackRefBlock::bytes); - new (newBl) BackRefBlock(newBl, lastUsed); - backRefBl[lastUsed] = newBl; -} - -BackRefBlock *BackRefMaster::findFreeBlock() -{ - if (active->allocatedCount < BR_MAX_CNT) - return active; - - if (listForUse) { // use released list - active = listForUse; - listForUse = listForUse->nextForUse; - MALLOC_ASSERT(active->addedToForUse, ASSERT_TEXT); - active->addedToForUse = false; - } else if (lastUsed-1 < backRefMaster->dataSz) { // allocate new data node - // TODO: this block is never released, so can prevent re-using - // of the memory it belong to in the backend, - // getRawMemory can be used instead. - BackRefBlock *newBl = - (BackRefBlock*)BlockI::getRawBlock( /*startup=*/!isMallocInitializedExt() ); - if (!newBl) return NULL; - lastUsed++; - backRefMaster->addEmptyBackRefBlock(newBl); - active = newBl; - } else // no free blocks, give up - return NULL; - return active; -} - -void *getBackRef(BackRefIdx backRefIdx) -{ - // !backRefMaster means no initialization done, so it can't be valid memory - if (!backRefMaster || backRefIdx.getMaster() > backRefMaster->lastUsed - || backRefIdx.getOffset() >= BR_MAX_CNT) - return NULL; - return *(void**)((uintptr_t)backRefMaster->backRefBl[backRefIdx.getMaster()] - + sizeof(BackRefBlock)+backRefIdx.getOffset()*sizeof(void*)); -} - -void setBackRef(BackRefIdx backRefIdx, void *newPtr) -{ - MALLOC_ASSERT(backRefIdx.getMaster()<=backRefMaster->lastUsed && backRefIdx.getOffset()<BR_MAX_CNT, - ASSERT_TEXT); - *(void**)((uintptr_t)backRefMaster->backRefBl[backRefIdx.getMaster()] - + sizeof(BackRefBlock) + backRefIdx.getOffset()*sizeof(void*)) = newPtr; -} - -BackRefIdx BackRefIdx::newBackRef(bool largeObj) -{ - BackRefBlock *blockToUse; - void **toUse; - BackRefIdx res; - - do { - { // global lock taken to find a block - MallocMutex::scoped_lock lock(backRefMutex); - - MALLOC_ASSERT(backRefMaster, ASSERT_TEXT); - if (! (blockToUse = backRefMaster->findFreeBlock())) - return BackRefIdx(); - } - toUse = NULL; - { // the block is locked to find a reference - MallocMutex::scoped_lock lock(blockToUse->blockMutex); - - if (blockToUse->freeList) { - toUse = (void**)blockToUse->freeList; - blockToUse->freeList = blockToUse->freeList->next; - } else if (blockToUse->allocatedCount < BR_MAX_CNT) { - toUse = (void**)blockToUse->bumpPtr; - blockToUse->bumpPtr = - (FreeObject*)((uintptr_t)blockToUse->bumpPtr - sizeof(void*)); - if (blockToUse->allocatedCount == BR_MAX_CNT-1) { - MALLOC_ASSERT((uintptr_t)blockToUse->bumpPtr - < (uintptr_t)blockToUse+sizeof(BackRefBlock), - ASSERT_TEXT); - blockToUse->bumpPtr = NULL; - } - } - if (toUse) - blockToUse->allocatedCount++; - } // end of lock scope - } while (!toUse); - res.master = blockToUse->myNum; - uintptr_t offset = - ((uintptr_t)toUse - ((uintptr_t)blockToUse + sizeof(BackRefBlock)))/sizeof(void*); - // Is offset too big? - MALLOC_ASSERT(!(offset >> 15), ASSERT_TEXT); - res.offset = offset; - if (largeObj) res.largeObj = largeObj; - - return res; -} - -void removeBackRef(BackRefIdx backRefIdx) -{ - MALLOC_ASSERT(backRefIdx.getMaster()<=backRefMaster->lastUsed - && backRefIdx.getOffset()<BR_MAX_CNT, ASSERT_TEXT); - BackRefBlock *currBlock = backRefMaster->backRefBl[backRefIdx.getMaster()]; - FreeObject *freeObj = (FreeObject*)((uintptr_t)currBlock + sizeof(BackRefBlock) - + backRefIdx.getOffset()*sizeof(void*)); - { - MallocMutex::scoped_lock lock(currBlock->blockMutex); - - freeObj->next = currBlock->freeList; - currBlock->freeList = freeObj; - currBlock->allocatedCount--; - } - // TODO: do we need double-check here? - if (!currBlock->addedToForUse && currBlock!=backRefMaster->active) { - MallocMutex::scoped_lock lock(backRefMutex); - - if (!currBlock->addedToForUse && currBlock!=backRefMaster->active) - backRefMaster->addBackRefBlockToList(currBlock); - } -} - -/********* End of backreferences ***********************/ - -} // namespace internal -} // namespace rml - diff --git a/deal.II/bundled/tbb30_104oss/src/tbbmalloc/frontend.cpp b/deal.II/bundled/tbb30_104oss/src/tbbmalloc/frontend.cpp deleted file mode 100644 index 5c7ba3d995..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbbmalloc/frontend.cpp +++ /dev/null @@ -1,2057 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - - -#include "tbbmalloc_internal.h" -#include <errno.h> - -//! Define the main synchronization method -/** It should be specified before including LifoList.h */ -#define FINE_GRAIN_LOCKS -#include "LifoList.h" - -#if USE_PTHREAD - #define TlsSetValue_func pthread_setspecific - #define TlsGetValue_func pthread_getspecific - typedef pthread_key_t tls_key_t; - #include <sched.h> - inline void do_yield() {sched_yield();} - -#elif USE_WINTHREAD - #define _WIN32_WINNT 0x0400 - #include "tbb/machine/windows_api.h" - #define TlsSetValue_func TlsSetValue - #define TlsGetValue_func TlsGetValue - typedef DWORD tls_key_t; - inline void do_yield() {SwitchToThread();} - -#else - #error Must define USE_PTHREAD or USE_WINTHREAD - -#endif - - -#define FREELIST_NONBLOCKING 1 - -void mallocThreadShutdownNotification(void* arg); - -namespace rml { -namespace internal { - -class Block; - -#if MALLOC_CHECK_RECURSION - -inline bool isMallocInitialized(); - -bool RecursiveMallocCallProtector::noRecursion() { - MALLOC_ASSERT(isMallocInitialized(), - "Recursion status can be checked only when initialization was done."); - return !mallocRecursionDetected; -} - -#endif // MALLOC_CHECK_RECURSION - -/* - * Block::objectSize value used to mark blocks allocated by startupAlloc - */ -const unsigned int startupAllocObjSizeMark = ~(unsigned int)0; - -/* - * This number of bins in the TLS that leads to blocks that we can allocate in. - */ -const uint32_t numBlockBinLimit = 31; - -/* - * The following constant is used to define the size of struct Block, the block header. - * The intent is to have the size of a Block multiple of the cache line size, this allows us to - * get good alignment at the cost of some overhead equal to the amount of padding included in the Block. - */ - -const int blockHeaderAlignment = 64; // a common size of a cache line - - -/********* The data structures and global objects **************/ - -/* - * The malloc routines themselves need to be able to occasionally malloc some space, - * in order to set up the structures used by the thread local structures. This - * routine preforms that fuctions. - */ -class BootStrapBlocks { - MallocMutex bootStrapLock; - Block *bootStrapBlock; - Block *bootStrapBlockUsed; - FreeObject *bootStrapObjectList; -public: - void *allocate(size_t size); - void free(void* ptr); -}; - -class ThreadId { - static tls_key_t Tid_key; - static intptr_t ThreadIdCount; - - unsigned int id; -public: - - static void init() { -#if USE_WINTHREAD - Tid_key = TlsAlloc(); -#else - int status = pthread_key_create( &Tid_key, NULL ); - if ( status ) { - fprintf (stderr, "The memory manager cannot create tls key during initialization; exiting \n"); - exit(1); - } -#endif /* USE_WINTHREAD */ - } - static ThreadId get() { - ThreadId result; - result.id = reinterpret_cast<intptr_t>(TlsGetValue_func(Tid_key)); - if( !result.id ) { - RecursiveMallocCallProtector scoped; - // Thread-local value is zero -> first call from this thread, - // need to initialize with next ID value (IDs start from 1) - result.id = AtomicIncrement(ThreadIdCount); // returned new value! - TlsSetValue_func( Tid_key, reinterpret_cast<void*>(result.id) ); - } - return result; - } - bool defined() const { return id; } - void undef() { id = 0; } - void invalid() { id = (unsigned int)-1; } - bool own() const { return id == ThreadId::get().id; } - - friend bool operator==(const ThreadId &id1, const ThreadId &id2); - friend unsigned int getThreadId(); -}; - -tls_key_t ThreadId::Tid_key; -intptr_t ThreadId::ThreadIdCount; - -bool operator==(const ThreadId &id1, const ThreadId &id2) { - return id1.id == id2.id; -} - -unsigned int getThreadId() { return ThreadId::get().id; } - -/* The 'next' field in the block header has to maintain some invariants: - * it needs to be on a 16K boundary and the first field in the block. - * Any value stored there needs to have the lower 14 bits set to 0 - * so that various assert work. This means that if you want to smash this memory - * for debugging purposes you will need to obey this invariant. - * The total size of the header needs to be a power of 2 to simplify - * the alignment requirements. For now it is a 128 byte structure. - * To avoid false sharing, the fields changed only locally are separated - * from the fields changed by foreign threads. - * Changing the size of the block header would require to change - * some bin allocation sizes, in particular "fitting" sizes (see above). - */ -class Bin; -class StartupBlock; -struct TLSData; - -class LocalBlockFields : public BlockI { -protected: - Block *next; /* This field needs to be on a 16K boundary and the first field in the block - for LIFO lists to work. */ - Block *previous; /* Use double linked list to speed up removal */ - unsigned int objectSize; - ThreadId owner; - FreeObject *bumpPtr; /* Bump pointer moves from the end to the beginning of a block */ - FreeObject *freeList; - BackRefIdx backRefIdx; - unsigned int allocatedCount; /* Number of objects allocated (obviously by the owning thread) */ - bool isFull; - - friend void *BootStrapBlocks::allocate(size_t size); - friend class FreeBlockPool; - friend class StartupBlock; - friend void BlockI::initialize(void *bumpPtr); -}; - -class Block : public LocalBlockFields { - size_t __pad_local_fields[(blockHeaderAlignment-sizeof(LocalBlockFields))/sizeof(size_t)]; - FreeObject *publicFreeList; - Block *nextPrivatizable; - size_t __pad_public_fields[(blockHeaderAlignment-2*sizeof(void*))/sizeof(size_t)]; - -public: - static Block *getEmpty(size_t size); - - inline FreeObject* allocate(); - inline FreeObject *allocateFromFreeList(); - inline bool emptyEnoughToUse(); - bool freeListNonNull() { return freeList; } - void freePublicObject(FreeObject *objectToFree); - inline void freeOwnObject(FreeObject *objectToFree); - void returnEmpty(bool poolTheBlock); - void privatizePublicFreeList(); - void restoreBumpPtr(); - void privatizeOrphaned(Bin *bin); - void shareOrphaned(const Bin *bin); - unsigned int getSize() const { return objectSize; } - const BackRefIdx *getBackRefIdx() const { return &backRefIdx; } - bool ownBlock() const { return owner.own(); } - bool isStartupAllocObject() const { return objectSize == startupAllocObjSizeMark; } - inline FreeObject *findObjectToFree(void *object) const; - bool checkFreePrecond() const { return allocatedCount>0; } - const BackRefIdx *getBackRef() const { return &backRefIdx; } - -protected: - static Block *getRaw(bool startup); - void cleanBlockHeader(); - -private: - static const float emptyEnoughRatio; /* "Reactivate" a block if this share of its objects is free. */ - - inline FreeObject *allocateFromBumpPtr(); - void initEmptyBlock(size_t size); - inline FreeObject *findAllocatedObject(const void *address) const; - inline bool isProperlyPlaced(const void *object) const; - - friend class Bin; - friend void ::mallocThreadShutdownNotification(void* arg); - friend BlockI *BlockI::getRawBlock(bool startup); -}; - -const float Block::emptyEnoughRatio = 1.0 / 4.0; - -class Bin { - Block *activeBlk; - Block *mailbox; - MallocMutex mailLock; - - static TLSData* createTLS(); -public: - static inline Bin* getAllocationBin(size_t size); - - inline Block* getActiveBlock() const { return activeBlk; } - inline void setActiveBlock(Block *block); - inline Block* setPreviousBlockActive(); - Block* getPublicFreeListBlock(); - void moveBlockToBinFront(Block *block); - void processLessUsedBlock(Block *block); - - void outofTLSBin (Block* block); - void verifyTLSBin (size_t size) const; - void pushTLSBin(Block* block); - - friend void ::mallocThreadShutdownNotification(void* arg); - friend void Block::freePublicObject (FreeObject *objectToFree); -}; - -/********* End of the data structures **************/ - -/* - * There are bins for all 8 byte aligned objects less than this segregated size; 8 bins in total - */ -const uint32_t minSmallObjectIndex = 0; -const uint32_t numSmallObjectBins = 8; -const uint32_t maxSmallObjectSize = 64; - -/* - * There are 4 bins between each couple of powers of 2 [64-128-256-...] - * from maxSmallObjectSize till this size; 16 bins in total - */ -const uint32_t minSegregatedObjectIndex = minSmallObjectIndex+numSmallObjectBins; -const uint32_t numSegregatedObjectBins = 16; -const uint32_t maxSegregatedObjectSize = 1024; - -/* - * And there are 5 bins with the following allocation sizes: 1792, 2688, 3968, 5376, 8064. - * They selected to fit 9, 6, 4, 3, and 2 sizes per a block, and also are multiples of 128. - * If sizeof(Block) changes from 128, these sizes require close attention! - */ -const uint32_t minFittingIndex = minSegregatedObjectIndex+numSegregatedObjectBins; -const uint32_t numFittingBins = 5; - -const uint32_t fittingAlignment = 128; - -#define SET_FITTING_SIZE(N) ( (blockSize-sizeof(Block))/N ) & ~(fittingAlignment-1) -const uint32_t fittingSize1 = SET_FITTING_SIZE(9); -const uint32_t fittingSize2 = SET_FITTING_SIZE(6); -const uint32_t fittingSize3 = SET_FITTING_SIZE(4); -const uint32_t fittingSize4 = SET_FITTING_SIZE(3); -const uint32_t fittingSize5 = SET_FITTING_SIZE(2); -#undef SET_FITTING_SIZE - -/* - * The total number of thread-specific Block-based bins - */ -const uint32_t numBlockBins = minFittingIndex+numFittingBins; - -/* - * Objects of this size and larger are considered large objects. - */ -const uint32_t minLargeObjectSize = fittingSize5 + 1; - -/* - * When a block that is not completely free is returned for reuse by other threads - * this is where the block goes. - * - * LifoList assumes zero initialization; so below its constructors are omitted, - * to avoid linking with C++ libraries on Linux. - */ - -class OrphanedBlocks { - LifoList bins[numBlockBinLimit]; -public: - Block *get(Bin* bin, unsigned int size); - void put(Bin* bin, Block *block); -}; - -static char globalBinSpace[sizeof(LifoList)*numBlockBinLimit]; -static OrphanedBlocks *orphanedBlocks = (OrphanedBlocks*)globalBinSpace; - -/* - * Per-thread pool of 16KB blocks. Idea behind it is to not share with other - * threads memory that are likely in local cache(s) of our CPU. - */ -class FreeBlockPool { - Block *head; - Block *tail; - int size; - void insertBlock(Block *block); -public: - static const int POOL_HIGH_MARK = 32; - static const int POOL_LOW_MARK = 8; - - Block *getBlock(); - void returnBlock(Block *block); - void releaseAllBlocks(); -}; - -struct TLSData { - Bin bin[numBlockBinLimit]; - FreeBlockPool pool; -}; - -#if MALLOC_CHECK_RECURSION -MallocMutex RecursiveMallocCallProtector::rmc_mutex; -pthread_t RecursiveMallocCallProtector::owner_thread; -void *RecursiveMallocCallProtector::autoObjPtr; -bool RecursiveMallocCallProtector::mallocRecursionDetected; -#if __FreeBSD__ -bool RecursiveMallocCallProtector::canUsePthread; -#endif - -#endif - -/*********** Code to provide thread ID and a thread-local void pointer **********/ - -static tls_key_t TLS_pointer_key; - -static inline TLSData* getThreadMallocTLS() { - TLSData *result; - result = (TLSData *)TlsGetValue_func( TLS_pointer_key ); -// The assert below is incorrect: with lazy initialization, it fails on the first call of the function. -// MALLOC_ASSERT( result, "Memory allocator not initialized" ); - return result; -} - -static inline void setThreadMallocTLS( TLSData * newvalue ) { - RecursiveMallocCallProtector scoped; - TlsSetValue_func( TLS_pointer_key, newvalue ); -} - -/*********** End code to provide thread ID and a TLS pointer **********/ - -#if !MALLOC_DEBUG -#if __INTEL_COMPILER || _MSC_VER -#define NOINLINE(decl) __declspec(noinline) decl -#define ALWAYSINLINE(decl) __forceinline decl -#elif __GNUC__ -#define NOINLINE(decl) decl __attribute__ ((noinline)) -#define ALWAYSINLINE(decl) decl __attribute__ ((always_inline)) -#else -#define NOINLINE(decl) decl -#define ALWAYSINLINE(decl) decl -#endif - -static NOINLINE( void doInitialization() ); - -ALWAYSINLINE( bool isMallocInitialized() ); - -#undef ALWAYSINLINE -#undef NOINLINE -#endif /* !MALLOC_DEBUG */ - - -/********* Now some rough utility code to deal with indexing the size bins. **************/ - -/* - * Given a number return the highest non-zero bit in it. It is intended to work with 32-bit values only. - * Moreover, on IPF, for sake of simplicity and performance, it is narrowed to only serve for 64 to 1023. - * This is enough for current algorithm of distribution of sizes among bins. - */ -#if _WIN64 && _MSC_VER>=1400 && !__INTEL_COMPILER -extern "C" unsigned char _BitScanReverse( unsigned long* i, unsigned long w ); -#pragma intrinsic(_BitScanReverse) -#endif -static inline unsigned int highestBitPos(unsigned int n) -{ - unsigned int pos; -#if __ARCH_x86_32||__ARCH_x86_64 - -# if __linux__||__APPLE__||__FreeBSD__||__sun||__MINGW32__ - __asm__ ("bsr %1,%0" : "=r"(pos) : "r"(n)); -# elif (_WIN32 && (!_WIN64 || __INTEL_COMPILER)) - __asm - { - bsr eax, n - mov pos, eax - } -# elif _WIN64 && _MSC_VER>=1400 - _BitScanReverse((unsigned long*)&pos, (unsigned long)n); -# else -# error highestBitPos() not implemented for this platform -# endif - -#elif __ARCH_ipf || __ARCH_other - static unsigned int bsr[16] = {0,6,7,7,8,8,8,8,9,9,9,9,9,9,9,9}; - MALLOC_ASSERT( n>=64 && n<1024, ASSERT_TEXT ); - pos = bsr[ n>>6 ]; -#else -# error highestBitPos() not implemented for this platform -#endif /* __ARCH_* */ - return pos; -} - -/* - * Depending on indexRequest, for a given size return either the index into the bin - * for objects of this size, or the actual size of objects in this bin. - */ -template<bool indexRequest> -static unsigned int getIndexOrObjectSize (unsigned int size) -{ - if (size <= maxSmallObjectSize) { // selection from 4/8/16/24/32/40/48/56/64 - /* Index 0 holds up to 8 bytes, Index 1 16 and so forth */ - return indexRequest ? (size - 1) >> 3 : alignUp(size,8); - } - else if (size <= maxSegregatedObjectSize ) { // 80/96/112/128 / 160/192/224/256 / 320/384/448/512 / 640/768/896/1024 - unsigned int order = highestBitPos(size-1); // which group of bin sizes? - MALLOC_ASSERT( 6<=order && order<=9, ASSERT_TEXT ); - if (indexRequest) - return minSegregatedObjectIndex - (4*6) - 4 + (4*order) + ((size-1)>>(order-2)); - else { - unsigned int alignment = 128 >> (9-order); // alignment in the group - MALLOC_ASSERT( alignment==16 || alignment==32 || alignment==64 || alignment==128, ASSERT_TEXT ); - return alignUp(size,alignment); - } - } - else { - if( size <= fittingSize3 ) { - if( size <= fittingSize2 ) { - if( size <= fittingSize1 ) - return indexRequest ? minFittingIndex : fittingSize1; - else - return indexRequest ? minFittingIndex+1 : fittingSize2; - } else - return indexRequest ? minFittingIndex+2 : fittingSize3; - } else { - if( size <= fittingSize5 ) { - if( size <= fittingSize4 ) - return indexRequest ? minFittingIndex+3 : fittingSize4; - else - return indexRequest ? minFittingIndex+4 : fittingSize5; - } else { - MALLOC_ASSERT( 0,ASSERT_TEXT ); // this should not happen - return ~0U; - } - } - } -} - -static unsigned int getIndex (unsigned int size) -{ - return getIndexOrObjectSize</*indexRequest*/true>(size); -} - -static unsigned int getObjectSize (unsigned int size) -{ - return getIndexOrObjectSize</*indexRequest*/false>(size); -} - -/* - * Initialization code. - * - */ - -/* - * Forward Refs - */ - -static BootStrapBlocks bootStrapBlocks; - -void *BootStrapBlocks::allocate(size_t size) -{ - FreeObject *result; - - MALLOC_ASSERT( size == sizeof(TLSData), ASSERT_TEXT ); - - { // Lock with acquire - MallocMutex::scoped_lock scoped_cs(bootStrapLock); - - if( bootStrapObjectList) { - result = bootStrapObjectList; - bootStrapObjectList = bootStrapObjectList->next; - } else { - if (!bootStrapBlock) { - bootStrapBlock = Block::getEmpty(size); - if (!bootStrapBlock) return NULL; - } - result = bootStrapBlock->bumpPtr; - bootStrapBlock->bumpPtr = (FreeObject *)((uintptr_t)bootStrapBlock->bumpPtr - bootStrapBlock->objectSize); - if ((uintptr_t)bootStrapBlock->bumpPtr < (uintptr_t)bootStrapBlock+sizeof(Block)) { - bootStrapBlock->bumpPtr = NULL; - bootStrapBlock->next = bootStrapBlockUsed; - bootStrapBlockUsed = bootStrapBlock; - bootStrapBlock = NULL; - } - } - } // Unlock with release - - memset (result, 0, size); - return (void*)result; -} - -void BootStrapBlocks::free(void* ptr) -{ - MALLOC_ASSERT( ptr, ASSERT_TEXT ); - { // Lock with acquire - MallocMutex::scoped_lock scoped_cs(bootStrapLock); - ((FreeObject*)ptr)->next = bootStrapObjectList; - bootStrapObjectList = (FreeObject*)ptr; - } // Unlock with release -} - -#if !(FREELIST_NONBLOCKING) -static MallocMutex publicFreeListLock; // lock for changes of publicFreeList -#endif - -const uintptr_t UNUSABLE = 0x1; -inline bool isSolidPtr( void* ptr ) -{ - return (UNUSABLE|(uintptr_t)ptr)!=UNUSABLE; -} -inline bool isNotForUse( void* ptr ) -{ - return (uintptr_t)ptr==UNUSABLE; -} - -/********* End rough utility code **************/ - -/********* Thread and block related code *************/ - -void Bin::verifyTLSBin (size_t size) const -{ -#if MALLOC_DEBUG -/* The debug version verifies the TLSBin as needed */ - Bin* tlsBin = getThreadMallocTLS()->bin; - uint32_t index = getIndex(size); - uint32_t objSize = getObjectSize(size); - - MALLOC_ASSERT( this == tlsBin+index, ASSERT_TEXT ); - - if (activeBlk) { - MALLOC_ASSERT( activeBlk->owner.own(), ASSERT_TEXT ); - MALLOC_ASSERT( activeBlk->objectSize == objSize, ASSERT_TEXT ); -#if MALLOC_DEBUG>1 - for (Block* temp = activeBlk->next; temp; temp=temp->next) { - MALLOC_ASSERT( temp!=activeBlk, ASSERT_TEXT ); - MALLOC_ASSERT( temp->owner.own(), ASSERT_TEXT ); - MALLOC_ASSERT( temp->objectSize == objSize, ASSERT_TEXT ); - MALLOC_ASSERT( temp->previous->next == temp, ASSERT_TEXT ); - if (temp->next) { - MALLOC_ASSERT( temp->next->previous == temp, ASSERT_TEXT ); - } - } - for (Block* temp = activeBlk->previous; temp; temp=temp->previous) { - MALLOC_ASSERT( temp!=activeBlk, ASSERT_TEXT ); - MALLOC_ASSERT( temp->owner.own(), ASSERT_TEXT ); - MALLOC_ASSERT( temp->objectSize == objSize, ASSERT_TEXT ); - MALLOC_ASSERT( temp->next->previous == temp, ASSERT_TEXT ); - if (temp->previous) { - MALLOC_ASSERT( temp->previous->next == temp, ASSERT_TEXT ); - } - } -#endif /* MALLOC_DEBUG>1 */ - } -#endif /* MALLOC_DEBUG */ -} - -/* - * Add a block to the start of this tls bin list. - */ -void Bin::pushTLSBin(Block* block) -{ - /* The objectSize should be defined and not a parameter - because the function is applied to partially filled blocks as well */ - unsigned int size = block->objectSize; - - MALLOC_ASSERT( block->owner == ThreadId::get(), ASSERT_TEXT ); - MALLOC_ASSERT( block->objectSize != 0, ASSERT_TEXT ); - MALLOC_ASSERT( block->next == NULL, ASSERT_TEXT ); - MALLOC_ASSERT( block->previous == NULL, ASSERT_TEXT ); - - MALLOC_ASSERT( this, ASSERT_TEXT ); - verifyTLSBin(size); - - block->next = activeBlk; - if( activeBlk ) { - block->previous = activeBlk->previous; - activeBlk->previous = block; - if( block->previous ) - block->previous->next = block; - } else { - activeBlk = block; - } - - verifyTLSBin(size); -} - -/* - * Take a block out of its tls bin (e.g. before removal). - */ -void Bin::outofTLSBin(Block* block) -{ - unsigned int size = block->objectSize; - - MALLOC_ASSERT( block->owner == ThreadId::get(), ASSERT_TEXT ); - MALLOC_ASSERT( block->objectSize != 0, ASSERT_TEXT ); - - MALLOC_ASSERT( this, ASSERT_TEXT ); - verifyTLSBin(size); - - if (block == activeBlk) { - activeBlk = block->previous? block->previous : block->next; - } - /* Delink the block */ - if (block->previous) { - MALLOC_ASSERT( block->previous->next == block, ASSERT_TEXT ); - block->previous->next = block->next; - } - if (block->next) { - MALLOC_ASSERT( block->next->previous == block, ASSERT_TEXT ); - block->next->previous = block->previous; - } - block->next = NULL; - block->previous = NULL; - - verifyTLSBin(size); -} - -TLSData* Bin::createTLS() -{ - MALLOC_ASSERT( sizeof(TLSData) >= sizeof(Bin) * numBlockBins + sizeof(FreeBlockPool), ASSERT_TEXT ); - TLSData* tls = (TLSData*) bootStrapBlocks.allocate(sizeof(TLSData)); - if ( !tls ) return NULL; - /* the block contains zeroes after bootStrapMalloc, so bins are initialized */ -#if MALLOC_DEBUG - for (uint32_t i = 0; i < numBlockBinLimit; i++) { - MALLOC_ASSERT( tls->bin[i].activeBlk == 0, ASSERT_TEXT ); - MALLOC_ASSERT( tls->bin[i].mailbox == 0, ASSERT_TEXT ); - } -#endif - setThreadMallocTLS(tls); - return tls; -} - -/* - * Return the bin for the given size. If the TLS bin structure is absent, create it. - */ -Bin* Bin::getAllocationBin(size_t size) -{ - TLSData* tls = getThreadMallocTLS(); - if( !tls ) - tls = createTLS(); - MALLOC_ASSERT( tls, ASSERT_TEXT ); - return tls->bin + getIndex(size); -} - -Block* Bin::getPublicFreeListBlock() -{ - Block* block; - MALLOC_ASSERT( this, ASSERT_TEXT ); - // if this method is called, active block usage must be unsuccesful - MALLOC_ASSERT( !activeBlk && !mailbox || activeBlk && activeBlk->isFull, ASSERT_TEXT ); - -// the counter should be changed STAT_increment(getThreadId(), ThreadCommonCounters, lockPublicFreeList); - { - MallocMutex::scoped_lock scoped_cs(mailLock); - block = mailbox; - if( block ) { - MALLOC_ASSERT( block->ownBlock(), ASSERT_TEXT ); - MALLOC_ASSERT( !isNotForUse(block->nextPrivatizable), ASSERT_TEXT ); - mailbox = block->nextPrivatizable; - block->nextPrivatizable = (Block*) this; - } - } - if( block ) { - MALLOC_ASSERT( isSolidPtr(block->publicFreeList), ASSERT_TEXT ); - block->privatizePublicFreeList(); - } - return block; -} - -BlockI *BlockI::getRawBlock(bool startup) { return Block::getRaw(startup); } - -void BlockI::initialize(void *ptr) { ((LocalBlockFields*)this)->bumpPtr = (FreeObject*)ptr; } - -bool Block::emptyEnoughToUse() -{ - const float threshold = (blockSize - sizeof(Block)) * (1-emptyEnoughRatio); - - if (bumpPtr) { - /* If we are still using a bump ptr for this block it is empty enough to use. */ - STAT_increment(owner, getIndex(objectSize), examineEmptyEnough); - isFull = false; - return 1; - } - - /* allocatedCount shows how many objects in the block are in use; however it still counts - blocks freed by other threads; so prior call to privatizePublicFreeList() is recommended */ - isFull = (allocatedCount*objectSize > threshold)? true: false; -#if COLLECT_STATISTICS - if (isFull) - STAT_increment(owner, getIndex(objectSize), examineNotEmpty); - else - STAT_increment(owner, getIndex(objectSize), examineEmptyEnough); -#endif - return !isFull; -} - -/* Restore the bump pointer for an empty block that is planned to use */ -void Block::restoreBumpPtr() -{ - MALLOC_ASSERT( allocatedCount == 0, ASSERT_TEXT ); - MALLOC_ASSERT( publicFreeList == NULL, ASSERT_TEXT ); - STAT_increment(owner, getIndex(objectSize), freeRestoreBumpPtr); - bumpPtr = (FreeObject *)((uintptr_t)this + blockSize - objectSize); - freeList = NULL; - isFull = 0; -} - -void Block::freeOwnObject(FreeObject *objectToFree) -{ - objectToFree->next = freeList; - freeList = objectToFree; - allocatedCount--; - MALLOC_ASSERT( allocatedCount < (blockSize-sizeof(Block))/objectSize, ASSERT_TEXT ); -#if COLLECT_STATISTICS - if (getActiveBlock(getAllocationBin(block->objectSize)) != block) - STAT_increment(myTid, getIndex(block->objectSize), freeToInactiveBlock); - else - STAT_increment(myTid, getIndex(block->objectSize), freeToActiveBlock); -#endif - if (isFull) { - if (emptyEnoughToUse()) - Bin::getAllocationBin(objectSize)->moveBlockToBinFront(this); - } else { - if (allocatedCount==0 && publicFreeList==NULL) - Bin::getAllocationBin(objectSize)->processLessUsedBlock(this); - } -} - -void Block::freePublicObject (FreeObject *objectToFree) -{ - FreeObject *localPublicFreeList; - - MALLOC_ITT_SYNC_RELEASING(&publicFreeList); -#if FREELIST_NONBLOCKING - FreeObject *temp = publicFreeList; - do { - localPublicFreeList = objectToFree->next = temp; - temp = (FreeObject*)AtomicCompareExchange( - (intptr_t&)publicFreeList, - (intptr_t)objectToFree, (intptr_t)localPublicFreeList ); - // no backoff necessary because trying to make change, not waiting for a change - } while( temp != localPublicFreeList ); -#else - STAT_increment(getThreadId(), ThreadCommonCounters, lockPublicFreeList); - { - MallocMutex::scoped_lock scoped_cs(publicFreeListLock); - localPublicFreeList = objectToFree->next = publicFreeList; - publicFreeList = objectToFree; - } -#endif - - if( localPublicFreeList==NULL ) { - // if the block is abandoned, its nextPrivatizable pointer should be UNUSABLE - // otherwise, it should point to the bin the block belongs to. - // reading nextPrivatizable is thread-safe below, because: - // 1) the executing thread atomically got localPublicFreeList==NULL and changed it to non-NULL; - // 2) only owning thread can change it back to NULL, - // 3) but it can not be done until the block is put to the mailbox - // So the executing thread is now the only one that can change nextPrivatizable - if( !isNotForUse(nextPrivatizable) ) { - MALLOC_ASSERT( nextPrivatizable!=NULL, ASSERT_TEXT ); - MALLOC_ASSERT( owner.defined(), ASSERT_TEXT ); - Bin* theBin = (Bin*) nextPrivatizable; - MallocMutex::scoped_lock scoped_cs(theBin->mailLock); - nextPrivatizable = theBin->mailbox; - theBin->mailbox = this; - } else { - MALLOC_ASSERT( !owner.defined(), ASSERT_TEXT ); - } - } - STAT_increment(ThreadId::get(), ThreadCommonCounters, freeToOtherThread); - STAT_increment(owner, getIndex(objectSize), freeByOtherThread); -} - -void Block::privatizePublicFreeList() -{ - FreeObject *temp, *localPublicFreeList; - - MALLOC_ASSERT( owner.own(), ASSERT_TEXT ); -#if FREELIST_NONBLOCKING - temp = publicFreeList; - do { - localPublicFreeList = temp; - temp = (FreeObject*)AtomicCompareExchange( - (intptr_t&)publicFreeList, - 0, (intptr_t)localPublicFreeList); - // no backoff necessary because trying to make change, not waiting for a change - } while( temp != localPublicFreeList ); -#else - STAT_increment(owner, ThreadCommonCounters, lockPublicFreeList); - { - MallocMutex::scoped_lock scoped_cs(publicFreeListLock); - localPublicFreeList = publicFreeList; - publicFreeList = NULL; - } - temp = localPublicFreeList; -#endif - MALLOC_ITT_SYNC_ACQUIRED(&publicFreeList); - - MALLOC_ASSERT( localPublicFreeList && localPublicFreeList==temp, ASSERT_TEXT ); // there should be something in publicFreeList! - if( !isNotForUse(temp) ) { // return/getPartialBlock could set it to UNUSABLE - MALLOC_ASSERT( allocatedCount <= (blockSize-sizeof(Block))/objectSize, ASSERT_TEXT ); - /* other threads did not change the counter freeing our blocks */ - allocatedCount--; - while( isSolidPtr(temp->next) ){ // the list will end with either NULL or UNUSABLE - temp = temp->next; - allocatedCount--; - } - MALLOC_ASSERT( allocatedCount < (blockSize-sizeof(Block))/objectSize, ASSERT_TEXT ); - /* merge with local freeList */ - temp->next = freeList; - freeList = localPublicFreeList; - STAT_increment(owner, getIndex(objectSize), allocPrivatized); - } -} - -void Block::privatizeOrphaned(Bin* bin) -{ - next = NULL; - previous = NULL; - MALLOC_ASSERT( publicFreeList!=NULL, ASSERT_TEXT ); - /* There is not a race here since no other thread owns this block */ - MALLOC_ASSERT( !owner.defined(), ASSERT_TEXT ); - owner = ThreadId::get(); - // It is safe to change nextPrivatizable, as publicFreeList is not null - MALLOC_ASSERT( isNotForUse(nextPrivatizable), ASSERT_TEXT ); - nextPrivatizable = (Block*)bin; - // the next call is required to change publicFreeList to 0 - privatizePublicFreeList(); - if( allocatedCount ) { - emptyEnoughToUse(); // check its fullness and set result->isFull - } else { - restoreBumpPtr(); - } - MALLOC_ASSERT( !isNotForUse(publicFreeList), ASSERT_TEXT ); -} - -void Block::shareOrphaned(const Bin *bin) -{ - MALLOC_ASSERT( bin, ASSERT_TEXT ); - STAT_increment(owner, index, freeBlockPublic); - // need to set publicFreeList to non-zero, so other threads - // will not change nextPrivatizable and it can be zeroed. - if ((intptr_t)nextPrivatizable==(intptr_t)bin) { - void* oldval; -#if FREELIST_NONBLOCKING - oldval = (void*)AtomicCompareExchange((intptr_t&)publicFreeList, (intptr_t)UNUSABLE, 0); -#else - STAT_increment(owner, ThreadCommonCounters, lockPublicFreeList); - { - MallocMutex::scoped_lock scoped_cs(publicFreeListLock); - if ( (oldval=publicFreeList)==NULL ) - (uintptr_t&)(publicFreeList) = UNUSABLE; - } -#endif - if ( oldval!=NULL ) { - // another thread freed an object; we need to wait until it finishes. - // I believe there is no need for exponential backoff, as the wait here is not for a lock; - // but need to yield, so the thread we wait has a chance to run. - int count = 256; - while( (intptr_t)const_cast<Block* volatile &>(nextPrivatizable)==(intptr_t)bin ) { - if (--count==0) { - do_yield(); - count = 256; - } - } - } - } else { - MALLOC_ASSERT( isSolidPtr(publicFreeList), ASSERT_TEXT ); - } - MALLOC_ASSERT( publicFreeList!=NULL, ASSERT_TEXT ); - // now it is safe to change our data - previous = NULL; - owner.undef(); - // it is caller responsibility to ensure that the list of blocks - // formed by nextPrivatizable pointers is kept consistent if required. - // if only called from thread shutdown code, it does not matter. - (uintptr_t&)(nextPrivatizable) = UNUSABLE; -} - -void Block::cleanBlockHeader() -{ - next = NULL; - previous = NULL; - freeList = NULL; - allocatedCount = 0; - isFull = 0; - - publicFreeList = NULL; -} - -void Block::initEmptyBlock(size_t size) -{ - // Having getIndex and getObjectSize called next to each other - // allows better compiler optimization as they basically share the code. - unsigned int index = getIndex(size); - unsigned int objSz = getObjectSize(size); - Bin* tlsBin = getThreadMallocTLS()->bin; - - cleanBlockHeader(); - objectSize = objSz; - owner = ThreadId::get(); - // bump pointer should be prepared for first allocation - thus mode it down to objectSize - bumpPtr = (FreeObject *)((uintptr_t)this + blockSize - objectSize); - - // each block should have the address where the head of the list of "privatizable" blocks is kept - // the only exception is a block for boot strap which is initialized when TLS is yet NULL - nextPrivatizable = tlsBin? (Block*)(tlsBin + index) : NULL; - TRACEF(( "[ScalableMalloc trace] Empty block %p is initialized, owner is %d, objectSize is %d, bumpPtr is %p\n", - this, owner, objectSize, bumpPtr )); -} - -Block *OrphanedBlocks::get(Bin* bin, unsigned int size) -{ - Block *result; - MALLOC_ASSERT( bin, ASSERT_TEXT ); - unsigned int index = getIndex(size); - result = (Block *) bins[index].pop(); - if (result) { - MALLOC_ITT_SYNC_ACQUIRED(bins+index); - result->privatizeOrphaned(bin); - STAT_increment(result->owner, index, allocBlockPublic); - } - return result; -} - -void OrphanedBlocks::put(Bin* bin, Block *block) -{ - unsigned int index = getIndex(block->getSize()); - block->shareOrphaned(bin); - MALLOC_ITT_SYNC_RELEASING(bins+index); - bins[index].push((void **)block); -} - -void FreeBlockPool::insertBlock(Block *block) -{ - size++; - block->next = head; - head = block; - if (!tail) - tail = block; -} - -Block *FreeBlockPool::getBlock() -{ - Block *result = head; - if (head) { - size--; - head = head->next; - if (!head) - tail = NULL; - } - return result; -} - -void FreeBlockPool::returnBlock(Block *block) -{ - MALLOC_ASSERT( size <= POOL_HIGH_MARK, ASSERT_TEXT ); - if (size == POOL_HIGH_MARK) { - // release cold blocks and add hot one - Block *headToFree = head, - *tailToFree = tail; - for (int i=0; i<POOL_LOW_MARK-2; i++) - headToFree = headToFree->next; - tail = headToFree; - headToFree = headToFree->next; - tail->next = NULL; - size = POOL_LOW_MARK-1; - for (Block *currBl = headToFree; currBl; currBl = currBl->next) - removeBackRef(currBl->backRefIdx); - freeBlocks.putList(headToFree, tailToFree); - } - insertBlock(block); -} - -void FreeBlockPool::releaseAllBlocks() -{ - if (head) { - for (Block *currBl = head; currBl; currBl = currBl->next) - removeBackRef(currBl->backRefIdx); - freeBlocks.putList(head, tail); - } -} - -/* Return an empty uninitialized block in a non-blocking fashion. */ -Block *Block::getRaw(bool startup) -{ - Block *result = NULL; - Block *bigBlock; - - if (! (bigBlock = static_cast<Block*>(freeBlocks.get(startup)))) return NULL; - - // check alignment - MALLOC_ASSERT( isAligned( bigBlock, blockSize ), ASSERT_TEXT ); - MALLOC_ASSERT( isAligned( bigBlock->bumpPtr, blockSize ), ASSERT_TEXT ); - // block should be at least as big as blockSize; otherwise the previous block can be damaged. - MALLOC_ASSERT( (uintptr_t)bigBlock->bumpPtr >= (uintptr_t)bigBlock + blockSize, ASSERT_TEXT ); - bigBlock->bumpPtr = (FreeObject *)((uintptr_t)bigBlock->bumpPtr - blockSize); - result = (Block *)bigBlock->bumpPtr; - if ( result!=bigBlock ) { - TRACEF(( "[ScalableMalloc trace] Pushing partial rest of block back on.\n" )); - freeBlocks.put(bigBlock, startup); - } - - return result; -} - -/* Return an empty uninitialized block in a non-blocking fashion. */ -Block *Block::getEmpty(size_t size) -{ - Block *result = NULL; - TLSData* tls = getThreadMallocTLS(); - if (tls) - result = tls->pool.getBlock(); - if (!result) { - BackRefIdx backRefIdx = BackRefIdx::newBackRef(/*largeObj=*/false); - if (backRefIdx.isInvalid() || !(result = getRaw(/*startup=*/false))) - return NULL; - setBackRef(backRefIdx, result); - result->backRefIdx = backRefIdx; - } - if (result) { - result->initEmptyBlock(size); - STAT_increment(result->owner, getIndex(result->objectSize), allocBlockNew); - } - return result; -} - -/* We have a block give it back to the malloc block manager */ -void Block::returnEmpty(bool poolTheBlock) -{ - // it is caller's responsibility to ensure no data is lost before calling this - MALLOC_ASSERT( allocatedCount==0, ASSERT_TEXT ); - MALLOC_ASSERT( publicFreeList==NULL, ASSERT_TEXT ); - MALLOC_ASSERT( !poolTheBlock || next == NULL, ASSERT_TEXT ); - MALLOC_ASSERT( !poolTheBlock || previous == NULL, ASSERT_TEXT ); - STAT_increment(owner, getIndex(objectSize), freeBlockBack); - - cleanBlockHeader(); - - nextPrivatizable = NULL; - - objectSize = 0; - owner.invalid(); - // for an empty block, bump pointer should point right after the end of the block - bumpPtr = (FreeObject *)((uintptr_t)this + blockSize); - if (poolTheBlock) { - MALLOC_ASSERT(getThreadMallocTLS(), "Is TLS still not initialized?"); - getThreadMallocTLS()->pool.returnBlock(this); - } - else { - removeBackRef(backRefIdx); - freeBlocks.put(this, /*startup=*/false); - } -} - -inline void Bin::setActiveBlock (Block *block) -{ -// MALLOC_ASSERT( bin, ASSERT_TEXT ); - MALLOC_ASSERT( block->owner.own(), ASSERT_TEXT ); - // it is the caller responsibility to keep bin consistence (i.e. ensure this block is in the bin list) - activeBlk = block; -} - -inline Block* Bin::setPreviousBlockActive() -{ - MALLOC_ASSERT( activeBlk, ASSERT_TEXT ); - Block* temp = activeBlk->previous; - if( temp ) { - MALLOC_ASSERT( temp->isFull == 0, ASSERT_TEXT ); - activeBlk = temp; - } - return temp; -} - -FreeObject *Block::findObjectToFree(void *object) const -{ - FreeObject *objectToFree; - // Due to aligned allocations, a pointer passed to scalable_free - // might differ from the address of internally allocated object. - // Small objects however should always be fine. - if (objectSize <= maxSegregatedObjectSize) - objectToFree = (FreeObject*)object; - // "Fitting size" allocations are suspicious if aligned higher than naturally - else { - if ( ! isAligned(object,2*fittingAlignment) ) - // TODO: the above check is questionable - it gives false negatives in ~50% cases, - // so might even be slower in average than unconditional use of findAllocatedObject. - // here it should be a "real" object - objectToFree = (FreeObject*)object; - else - // here object can be an aligned address, so applying additional checks - objectToFree = findAllocatedObject(object); - MALLOC_ASSERT( isAligned(objectToFree,fittingAlignment), ASSERT_TEXT ); - } - MALLOC_ASSERT( isProperlyPlaced(objectToFree), ASSERT_TEXT ); - - return objectToFree; -} - -#if MALLOC_CHECK_RECURSION - -/* - * It's a special kind of allocation that can be used when malloc is - * not available (either during startup or when malloc was already called and - * we are, say, inside pthread_setspecific's call). - * Block can contain objects of different sizes, - * allocations are performed by moving bump pointer and increasing of object counter, - * releasing is done via counter of objects allocated in the block - * or moving bump pointer if releasing object is on a bound. - */ - -class StartupBlock : public Block { - size_t availableSize() { - return blockSize - ((uintptr_t)bumpPtr - (uintptr_t)this); - } - static StartupBlock *getBlock(); -public: - static FreeObject *allocate(size_t size); - static size_t msize(void *ptr) { return *((size_t*)ptr - 1); } - void free(void *ptr); -}; - -static MallocMutex startupMallocLock; -static StartupBlock *firstStartupBlock; - -StartupBlock *StartupBlock::getBlock() -{ - BackRefIdx backRefIdx = BackRefIdx::newBackRef(/*largeObj=*/false); - if (backRefIdx.isInvalid()) return NULL; - - StartupBlock *block = (StartupBlock *)getRaw(/*startup=*/true); - if (!block) return NULL; - - block->cleanBlockHeader(); - setBackRef(backRefIdx, block); - block->backRefIdx = backRefIdx; - // use startupAllocObjSizeMark to mark objects from startup block marker - block->objectSize = startupAllocObjSizeMark; - block->bumpPtr = (FreeObject *)((uintptr_t)block + sizeof(StartupBlock)); - return block; -} - -/* TODO: Function is called when malloc nested call is detected, so simultaneous - usage from different threads are unprobable, so block pre-allocation - can be not useful, and the code might be simplified. */ -FreeObject *StartupBlock::allocate(size_t size) -{ - FreeObject *result; - StartupBlock *newBlock = NULL; - bool newBlockUnused = false; - - /* Objects must be aligned on their natural bounds, - and objects bigger than word on word's bound. */ - size = alignUp(size, sizeof(size_t)); - // We need size of an object to implement msize. - size_t reqSize = size + sizeof(size_t); - // speculatively allocates newBlock to later use or return it as unused - if (!firstStartupBlock || firstStartupBlock->availableSize() < reqSize) - if (!(newBlock = StartupBlock::getBlock())) - return NULL; - - { - MallocMutex::scoped_lock scoped_cs(startupMallocLock); - - if (!firstStartupBlock || firstStartupBlock->availableSize() < reqSize) { - if (!newBlock && !(newBlock = StartupBlock::getBlock())) - return NULL; - newBlock->next = (Block*)firstStartupBlock; - if (firstStartupBlock) - firstStartupBlock->previous = (Block*)newBlock; - firstStartupBlock = newBlock; - } else - newBlockUnused = true; - result = firstStartupBlock->bumpPtr; - firstStartupBlock->allocatedCount++; - firstStartupBlock->bumpPtr = - (FreeObject *)((uintptr_t)firstStartupBlock->bumpPtr + reqSize); - } - if (newBlock && newBlockUnused) - newBlock->returnEmpty(/*poolTheBlock=*/false); - - // keep object size at the negative offset - *((size_t*)result) = size; - return (FreeObject*)((size_t*)result+1); -} - -void StartupBlock::free(void *ptr) -{ - Block* blockToRelease = NULL; - { - MallocMutex::scoped_lock scoped_cs(startupMallocLock); - - MALLOC_ASSERT(firstStartupBlock, ASSERT_TEXT); - MALLOC_ASSERT(startupAllocObjSizeMark==objectSize - && allocatedCount>0, ASSERT_TEXT); - MALLOC_ASSERT((uintptr_t)ptr>=(uintptr_t)this+sizeof(StartupBlock) - && (uintptr_t)ptr+StartupBlock::msize(ptr)<=(uintptr_t)this+blockSize, - ASSERT_TEXT); - if (0 == --allocatedCount) { - if (this == firstStartupBlock) - firstStartupBlock = (StartupBlock*)firstStartupBlock->next; - if (previous) - previous->next = next; - if (next) - next->previous = previous; - blockToRelease = this; - } else if ((uintptr_t)ptr + StartupBlock::msize(ptr) == (uintptr_t)bumpPtr) { - // last object in the block released - FreeObject *newBump = (FreeObject*)((size_t*)ptr - 1); - MALLOC_ASSERT((uintptr_t)newBump>(uintptr_t)this+sizeof(StartupBlock), - ASSERT_TEXT); - bumpPtr = newBump; - } - } - if (blockToRelease) { - blockToRelease->previous = blockToRelease->next = NULL; - blockToRelease->returnEmpty(/*poolTheBlock=*/false); - } -} - -#endif /* MALLOC_CHECK_RECURSION */ - -/********* End thread related code *************/ - -/********* Library initialization *************/ - -//! Value indicating the state of initialization. -/* 0 = initialization not started. - * 1 = initialization started but not finished. - * 2 = initialization finished. - * In theory, we only need values 0 and 2. But value 1 is nonetheless - * useful for detecting errors in the double-check pattern. - */ -static intptr_t mallocInitialized; // implicitly initialized to 0 -static MallocMutex initMutex; - -inline bool isMallocInitialized() { - // Load must have acquire fence; otherwise thread taking "initialized" path - // might perform textually later loads *before* mallocInitialized becomes 2. - return 2 == FencedLoad(mallocInitialized); -} - -bool isMallocInitializedExt() { - return isMallocInitialized(); -} - -/* - * Allocator initialization routine; - * it is called lazily on the very first scalable_malloc call. - */ -static void initMemoryManager() -{ - TRACEF(( "[ScalableMalloc trace] sizeof(Block) is %d (expected 128); sizeof(uintptr_t) is %d\n", - sizeof(Block), sizeof(uintptr_t) )); - MALLOC_ASSERT( 2*blockHeaderAlignment == sizeof(Block), ASSERT_TEXT ); - MALLOC_ASSERT( sizeof(FreeObject) == sizeof(void*), ASSERT_TEXT ); - -// TODO: add error handling, and on error do something better than exit(1) - if (!initBackRefMaster() || !freeBlocks.bootstrap(NULL, NULL, 0)) { - fprintf (stderr, "The memory manager cannot access sufficient memory to initialize; exiting \n"); - exit(1); - } -// Create keys for thread-local storage and for thread id -#if USE_WINTHREAD - TLS_pointer_key = TlsAlloc(); -#else - int status1 = pthread_key_create( &TLS_pointer_key, mallocThreadShutdownNotification ); - if ( status1 ) { - fprintf (stderr, "The memory manager cannot create tls key during initialization; exiting \n"); - exit(1); - } -#endif /* USE_WINTHREAD */ - ThreadId::init(); -#if COLLECT_STATISTICS - initStatisticsCollection(); -#endif -} - -//! Ensures that initMemoryManager() is called once and only once. -/** Does not return until initMemoryManager() has been completed by a thread. - There is no need to call this routine if mallocInitialized==2 . */ -static void doInitialization() -{ - MallocMutex::scoped_lock lock( initMutex ); - if (mallocInitialized!=2) { - MALLOC_ASSERT( mallocInitialized==0, ASSERT_TEXT ); - mallocInitialized = 1; - RecursiveMallocCallProtector scoped; - initMemoryManager(); -#ifdef MALLOC_EXTRA_INITIALIZATION - MALLOC_EXTRA_INITIALIZATION; -#endif -#if MALLOC_CHECK_RECURSION - RecursiveMallocCallProtector::detectNaiveOverload(); -#endif - MALLOC_ASSERT( mallocInitialized==1, ASSERT_TEXT ); - // Store must have release fence, otherwise mallocInitialized==2 - // might become remotely visible before side effects of - // initMemoryManager() become remotely visible. - FencedStore( mallocInitialized, 2 ); - } - /* It can't be 0 or I would have initialized it */ - MALLOC_ASSERT( mallocInitialized==2, ASSERT_TEXT ); -} - -/********* End library initialization *************/ - -/********* The malloc show begins *************/ - - -FreeObject *Block::allocateFromFreeList() -{ - FreeObject *result; - - if (!freeList) return NULL; - - result = freeList; - MALLOC_ASSERT( result, ASSERT_TEXT ); - - freeList = result->next; - MALLOC_ASSERT( allocatedCount < (blockSize-sizeof(Block))/objectSize, ASSERT_TEXT ); - allocatedCount++; - STAT_increment(owner, getIndex(objectSize), allocFreeListUsed); - - return result; -} - -FreeObject *Block::allocateFromBumpPtr() -{ - FreeObject *result = bumpPtr; - if (result) { - bumpPtr = (FreeObject *) ((uintptr_t) bumpPtr - objectSize); - if ( (uintptr_t)bumpPtr < (uintptr_t)this+sizeof(Block) ) { - bumpPtr = NULL; - } - MALLOC_ASSERT( allocatedCount < (blockSize-sizeof(Block))/objectSize, ASSERT_TEXT ); - allocatedCount++; - STAT_increment(owner, getIndex(objectSize), allocBumpPtrUsed); - } - return result; -} - -inline FreeObject* Block::allocate() -{ - FreeObject *result; - - MALLOC_ASSERT( owner.own(), ASSERT_TEXT ); - - /* for better cache locality, first looking in the free list. */ - if ( (result = allocateFromFreeList()) ) { - return result; - } - MALLOC_ASSERT( !freeList, ASSERT_TEXT ); - - /* if free list is empty, try thread local bump pointer allocation. */ - if ( (result = allocateFromBumpPtr()) ) { - return result; - } - MALLOC_ASSERT( !bumpPtr, ASSERT_TEXT ); - - /* the block is considered full. */ - isFull = 1; - return NULL; -} - -void Bin::moveBlockToBinFront(Block *block) -{ - /* move the block to the front of the bin */ - if (block == activeBlk) return; - outofTLSBin(block); - pushTLSBin(block); -} - -void Bin::processLessUsedBlock(Block *block) -{ - if (block != activeBlk) { - /* We are not actively using this block; return it to the general block pool */ - outofTLSBin(block); - block->returnEmpty(/*poolTheBlock=*/true); - } else { - /* all objects are free - let's restore the bump pointer */ - block->restoreBumpPtr(); - } -} - -/* - * All aligned allocations fall into one of the following categories: - * 1. if both request size and alignment are <= maxSegregatedObjectSize, - * we just align the size up, and request this amount, because for every size - * aligned to some power of 2, the allocated object is at least that aligned. - * 2. for bigger size, check if already guaranteed fittingAlignment is enough. - * 3. if size+alignment<minLargeObjectSize, we take an object of fittingSizeN and align - * its address up; given such pointer, scalable_free could find the real object. - * 4. otherwise, aligned large object is allocated. - */ -static void *allocateAligned(size_t size, size_t alignment) -{ - MALLOC_ASSERT( isPowerOfTwo(alignment), ASSERT_TEXT ); - - void *result; - if (size<=maxSegregatedObjectSize && alignment<=maxSegregatedObjectSize) - result = scalable_malloc(alignUp(size? size: sizeof(size_t), alignment)); - else if (size<minLargeObjectSize && alignment<=fittingAlignment) - result = scalable_malloc(size); - else if (size+alignment < minLargeObjectSize) { - void *unaligned = scalable_malloc(size+alignment); - if (!unaligned) return NULL; - result = alignUp(unaligned, alignment); - } else { - /* This can be the first allocation call. */ - if (!isMallocInitialized()) - doInitialization(); - // take into account only alignment that are higher then natural - result = mallocLargeObject(size, largeObjectAlignment>alignment? - largeObjectAlignment: alignment); - } - - MALLOC_ASSERT( isAligned(result, alignment), ASSERT_TEXT ); - return result; -} - -static void *reallocAligned(void *ptr, size_t size, size_t alignment = 0) -{ - void *result; - size_t copySize; - - if (isLargeObject(ptr)) { - LargeMemoryBlock* lmb = ((LargeObjectHdr *)ptr - 1)->memoryBlock; - copySize = lmb->unalignedSize-((uintptr_t)ptr-(uintptr_t)lmb); - if (size <= copySize && (0==alignment || isAligned(ptr, alignment))) { - lmb->objectSize = size; - return ptr; - } else { - copySize = lmb->objectSize; - result = alignment ? allocateAligned(size, alignment) : scalable_malloc(size); - } - } else { - Block* block = (Block *)alignDown(ptr, blockSize); - copySize = block->getSize(); - if (size <= copySize && (0==alignment || isAligned(ptr, alignment))) { - return ptr; - } else { - result = alignment ? allocateAligned(size, alignment) : scalable_malloc(size); - } - } - if (result) { - memcpy(result, ptr, copySize<size? copySize: size); - scalable_free(ptr); - } - return result; -} - -/* A predicate checks if an object is properly placed inside its block */ -inline bool Block::isProperlyPlaced(const void *object) const -{ - return 0 == ((uintptr_t)this + blockSize - (uintptr_t)object) % objectSize; -} - -/* Finds the real object inside the block */ -FreeObject *Block::findAllocatedObject(const void *address) const -{ - // calculate offset from the end of the block space - uintptr_t offset = (uintptr_t)this + blockSize - (uintptr_t)address; - MALLOC_ASSERT( offset<blockSize-sizeof(Block), ASSERT_TEXT ); - // find offset difference from a multiple of allocation size - offset %= objectSize; - // and move the address down to where the real object starts. - return (FreeObject*)((uintptr_t)address - (offset? objectSize-offset: 0)); -} - -/* - * Bad dereference caused by a foreign pointer is possible only here, not earlier in call chain. - * Separate function isolates SEH code, as it has bad influence on compiler optimization. - */ -static inline BackRefIdx safer_dereference (const BackRefIdx *ptr) -{ - BackRefIdx id; -#if _MSC_VER - __try { -#endif - id = *ptr; -#if _MSC_VER - } __except( GetExceptionCode() == EXCEPTION_ACCESS_VIOLATION? - EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH ) { - id = BackRefIdx(); - } -#endif - return id; -} - -bool isLargeObject(void *object) -{ - if (!isAligned(object, largeObjectAlignment)) - return false; - LargeObjectHdr *header = (LargeObjectHdr*)object - 1; - BackRefIdx idx = safer_dereference(&header->backRefIdx); - - return idx.isLargeObject() && getBackRef(idx) == header; -} - -static inline bool isSmallObject (void *ptr) -{ - void* expected = alignDown(ptr, blockSize); - const BackRefIdx* idx = ((Block*)expected)->getBackRef(); - - return expected == getBackRef(safer_dereference(idx)); -} - -/**** Check if an object was allocated by scalable_malloc ****/ -static inline bool isRecognized (void* ptr) -{ - return isLargeObject(ptr) || isSmallObject(ptr); -} - -static inline void freeSmallObject (void *object) -{ - /* mask low bits to get the block */ - Block *block = (Block *)alignDown(object, blockSize); - MALLOC_ASSERT( block->checkFreePrecond(), ASSERT_TEXT ); - -#if MALLOC_CHECK_RECURSION - if (block->isStartupAllocObject()) { - ((StartupBlock *)block)->free(object); - return; - } -#endif - FreeObject *objectToFree = block->findObjectToFree(object); - - if (block->ownBlock()) - block->freeOwnObject(objectToFree); - else /* Slower path to add to the shared list, the allocatedCount is updated by the owner thread in malloc. */ - block->freePublicObject(objectToFree); - -} - -} // namespace internal -} // namespace rml - -using namespace rml::internal; - -/* - * When a thread is shutting down this routine should be called to remove all the thread ids - * from the malloc blocks and replace them with a NULL thread id. - * - */ -#if MALLOC_TRACE -static unsigned int threadGoingDownCount = 0; -#endif - -/* - * for pthreads, the function is set as a callback in pthread_key_create for TLS bin. - * it will be automatically called at thread exit with the key value as the argument. - * - * for Windows, it should be called directly e.g. from DllMain; the argument can be NULL - * one should include "TypeDefinitions.h" for the declaration of this function. -*/ -extern "C" void mallocThreadShutdownNotification(void* arg) -{ - TLSData *tls; - Block *threadBlock; - Block *threadlessBlock; - unsigned int index; - - // Check whether TLS has been initialized - if (!isMallocInitialized()) return; - - TRACEF(( "[ScalableMalloc trace] Thread id %d blocks return start %d\n", - getThreadId(), threadGoingDownCount++ )); -#ifdef USE_WINTHREAD - tls = getThreadMallocTLS(); -#else - tls = (TLSData*)arg; -#endif - if (tls) { - Bin *tlsBin = tls->bin; - tls->pool.releaseAllBlocks(); - - for (index = 0; index < numBlockBins; index++) { - if (tlsBin[index].activeBlk==NULL) - continue; - threadlessBlock = tlsBin[index].activeBlk->previous; - while (threadlessBlock) { - threadBlock = threadlessBlock->previous; - if (threadlessBlock->allocatedCount==0 && threadlessBlock->publicFreeList==NULL) { - /* we destroy the thread, so not use its block pool */ - threadlessBlock->returnEmpty(/*poolTheBlock=*/false); - } else { - orphanedBlocks->put(tlsBin+index, threadlessBlock); - } - threadlessBlock = threadBlock; - } - threadlessBlock = tlsBin[index].activeBlk; - while (threadlessBlock) { - threadBlock = threadlessBlock->next; - if (threadlessBlock->allocatedCount==0 && threadlessBlock->publicFreeList==NULL) { - /* we destroy the thread, so not use its block pool */ - threadlessBlock->returnEmpty(/*poolTheBlock=*/false); - } else { - orphanedBlocks->put(tlsBin+index, threadlessBlock); - } - threadlessBlock = threadBlock; - } - tlsBin[index].activeBlk = 0; - } - bootStrapBlocks.free(tls); - setThreadMallocTLS(NULL); - } - - TRACEF(( "[ScalableMalloc trace] Thread id %d blocks return end\n", getThreadId() )); -} - -extern "C" void mallocProcessShutdownNotification(void) -{ -#if COLLECT_STATISTICS - ThreadId nThreads = ThreadIdCount; - for( int i=1; i<=nThreads && i<MAX_THREADS; ++i ) - STAT_print(i); -#endif -} - -/********* The malloc code *************/ - -extern "C" void * scalable_malloc(size_t size) -{ - Bin* bin; - Block * mallocBlock; - FreeObject *result = NULL; - - if (!size) size = sizeof(size_t); - -#if MALLOC_CHECK_RECURSION - if (RecursiveMallocCallProtector::sameThreadActive()) { - result = size<minLargeObjectSize? StartupBlock::allocate(size) : - (FreeObject*)mallocLargeObject(size, blockSize, /*startupAlloc=*/ true); - if (!result) errno = ENOMEM; - return result; - } -#endif - - if (!isMallocInitialized()) - doInitialization(); - - /* - * Use Large Object Allocation - */ - if (size >= minLargeObjectSize) { - result = (FreeObject*)mallocLargeObject(size, largeObjectAlignment); - if (!result) errno = ENOMEM; - return result; - } - - /* - * Get an element in thread-local array corresponding to the given size; - * It keeps ptr to the active block for allocations of this size - */ - bin = Bin::getAllocationBin(size); - if ( !bin ) { - errno = ENOMEM; - return NULL; - } - - /* Get the block of you want to try to allocate in. */ - mallocBlock = bin->getActiveBlock(); - - if (mallocBlock) { - do { - if( (result = mallocBlock->allocate()) ) { - return result; - } - // the previous block, if any, should be empty enough - } while( (mallocBlock = bin->setPreviousBlockActive()) ); - } - - /* - * else privatize publicly freed objects in some block and allocate from it - */ - mallocBlock = bin->getPublicFreeListBlock(); - if (mallocBlock) { - if (mallocBlock->emptyEnoughToUse()) { - bin->moveBlockToBinFront(mallocBlock); - } - MALLOC_ASSERT( mallocBlock->freeListNonNull(), ASSERT_TEXT ); - if ( (result = mallocBlock->allocateFromFreeList()) ) { - return result; - } - /* Else something strange happened, need to retry from the beginning; */ - TRACEF(( "[ScalableMalloc trace] Something is wrong: no objects in public free list; reentering.\n" )); - return scalable_malloc(size); - } - - /* - * no suitable own blocks, try to get a partial block that some other thread has discarded. - */ - mallocBlock = orphanedBlocks->get(bin, size); - while (mallocBlock) { - bin->pushTLSBin(mallocBlock); - bin->setActiveBlock(mallocBlock); // TODO: move under the below condition? - if( (result = mallocBlock->allocate()) ) { - return result; - } - mallocBlock = orphanedBlocks->get(bin, size); - } - - /* - * else try to get a new empty block - */ - mallocBlock = Block::getEmpty(size); - if (mallocBlock) { - bin->pushTLSBin(mallocBlock); - bin->setActiveBlock(mallocBlock); - if( (result = mallocBlock->allocate()) ) { - return result; - } - /* Else something strange happened, need to retry from the beginning; */ - TRACEF(( "[ScalableMalloc trace] Something is wrong: no objects in empty block; reentering.\n" )); - return scalable_malloc(size); - } - /* - * else nothing works so return NULL - */ - TRACEF(( "[ScalableMalloc trace] No memory found, returning NULL.\n" )); - errno = ENOMEM; - return NULL; -} - -/********* End the malloc code *************/ - -/********* The free code *************/ - -extern "C" void scalable_free (void *object) { -if (!object) - return; - - MALLOC_ASSERT(isRecognized(object), "Invalid pointer in scalable_free detected."); - - if (isLargeObject(object)) - freeLargeObject(object); - else - freeSmallObject(object); -} - -/* - * A variant that provides additional memory safety, by checking whether the given address - * was obtained with this allocator, and if not redirecting to the provided alternative call. - */ -extern "C" void safer_scalable_free (void *object, void (*original_free)(void*)) -{ - if (!object) - return; - - // must check 1st for large object, because small object check touches 4 pages on left, - // and it can be unaccessable - if (isLargeObject(object)) - freeLargeObject(object); - else if (isSmallObject(object)) - freeSmallObject(object); - else if (original_free) - original_free(object); -} - -/********* End the free code *************/ - -/********* Code for scalable_realloc ***********/ - -/* - * From K&R - * "realloc changes the size of the object pointed to by p to size. The contents will - * be unchanged up to the minimum of the old and the new sizes. If the new size is larger, - * the new space is uninitialized. realloc returns a pointer to the new space, or - * NULL if the request cannot be satisfied, in which case *p is unchanged." - * - */ -extern "C" void* scalable_realloc(void* ptr, size_t size) -{ - /* corner cases left out of reallocAligned to not deal with errno there */ - if (!ptr) { - return scalable_malloc(size); - } - if (!size) { - scalable_free(ptr); - return NULL; - } - void* tmp = reallocAligned(ptr, size, 0); - if (!tmp) errno = ENOMEM; - return tmp; -} - -/* - * A variant that provides additional memory safety, by checking whether the given address - * was obtained with this allocator, and if not redirecting to the provided alternative call. - */ -extern "C" void* safer_scalable_realloc (void* ptr, size_t sz, void* original_realloc) -{ - if (!ptr) { - return scalable_malloc(sz); - } - if (isRecognized(ptr)) { - if (!sz) { - scalable_free(ptr); - return NULL; - } - void* tmp = reallocAligned(ptr, sz, 0); - if (!tmp) errno = ENOMEM; - return tmp; - } -#if USE_WINTHREAD - else if (original_realloc && sz) { - orig_ptrs *original_ptrs = static_cast<orig_ptrs*>(original_realloc); - if ( original_ptrs->orig_msize ){ - size_t oldSize = original_ptrs->orig_msize(ptr); - void *newBuf = scalable_malloc(sz); - if (newBuf) { - memcpy(newBuf, ptr, sz<oldSize? sz : oldSize); - if ( original_ptrs->orig_free ){ - original_ptrs->orig_free( ptr ); - } - } - return newBuf; - } - } -#else - else if (original_realloc) { - typedef void* (*realloc_ptr_t)(void*,size_t); - realloc_ptr_t original_realloc_ptr; - (void *&)original_realloc_ptr = original_realloc; - return original_realloc_ptr(ptr,sz); - } -#endif - return NULL; -} - -/********* End code for scalable_realloc ***********/ - -/********* Code for scalable_calloc ***********/ - -/* - * From K&R - * calloc returns a pointer to space for an array of nobj objects, - * each of size size, or NULL if the request cannot be satisfied. - * The space is initialized to zero bytes. - * - */ - -extern "C" void * scalable_calloc(size_t nobj, size_t size) -{ - size_t arraySize = nobj * size; - void* result = scalable_malloc(arraySize); - if (result) - memset(result, 0, arraySize); - return result; -} - -/********* End code for scalable_calloc ***********/ - -/********* Code for aligned allocation API **********/ - -extern "C" int scalable_posix_memalign(void **memptr, size_t alignment, size_t size) -{ - if ( !isPowerOfTwoMultiple(alignment, sizeof(void*)) ) - return EINVAL; - void *result = allocateAligned(size, alignment); - if (!result) - return ENOMEM; - *memptr = result; - return 0; -} - -extern "C" void * scalable_aligned_malloc(size_t size, size_t alignment) -{ - if (!isPowerOfTwo(alignment) || 0==size) { - errno = EINVAL; - return NULL; - } - void* tmp = allocateAligned(size, alignment); - if (!tmp) - errno = ENOMEM; - return tmp; -} - -extern "C" void * scalable_aligned_realloc(void *ptr, size_t size, size_t alignment) -{ - /* corner cases left out of reallocAligned to not deal with errno there */ - if (!isPowerOfTwo(alignment)) { - errno = EINVAL; - return NULL; - } - if (!ptr) { - return allocateAligned(size, alignment); - } - if (!size) { - scalable_free(ptr); - return NULL; - } - - void* tmp = reallocAligned(ptr, size, alignment); - if (!tmp) errno = ENOMEM; - return tmp; -} - -extern "C" void * safer_scalable_aligned_realloc(void *ptr, size_t size, size_t alignment, void* orig_function) -{ - /* corner cases left out of reallocAligned to not deal with errno there */ - if (!isPowerOfTwo(alignment)) { - errno = EINVAL; - return NULL; - } - if (!ptr) { - return allocateAligned(size, alignment); - } - if (isRecognized(ptr)) { - if (!size) { - scalable_free(ptr); - return NULL; - } - void* tmp = reallocAligned(ptr, size, alignment); - if (!tmp) errno = ENOMEM; - return tmp; - } -#if USE_WINTHREAD - else { - orig_ptrs *original_ptrs = static_cast<orig_ptrs*>(orig_function); - if (size) { - if ( original_ptrs->orig_msize ){ - size_t oldSize = original_ptrs->orig_msize(ptr); - void *newBuf = allocateAligned(size, alignment); - if (newBuf) { - memcpy(newBuf, ptr, size<oldSize? size : oldSize); - if ( original_ptrs->orig_free ){ - original_ptrs->orig_free( ptr ); - } - } - return newBuf; - }else{ - //We can't do anything with this. Just keeping old pointer - return NULL; - } - } else { - if ( original_ptrs->orig_free ){ - original_ptrs->orig_free( ptr ); - } - return NULL; - } - } -#endif - return NULL; -} - -extern "C" void scalable_aligned_free(void *ptr) -{ - scalable_free(ptr); -} - -/********* end code for aligned allocation API **********/ - -/********* Code for scalable_msize ***********/ - -/* - * Returns the size of a memory block allocated in the heap. - */ -extern "C" size_t scalable_msize(void* ptr) -{ - if (ptr) { - MALLOC_ASSERT(isRecognized(ptr), "Invalid pointer in scalable_msize detected."); - if (isLargeObject(ptr)) { - LargeMemoryBlock* lmb = ((LargeObjectHdr*)ptr - 1)->memoryBlock; - return lmb->objectSize; - } else { - Block* block = (Block *)alignDown(ptr, blockSize); -#if MALLOC_CHECK_RECURSION - size_t size = block->getSize()? block->getSize() : StartupBlock::msize(ptr); -#else - size_t size = block->getSize(); -#endif - MALLOC_ASSERT(size>0 && size<minLargeObjectSize, ASSERT_TEXT); - return size; - } - } - errno = EINVAL; - // Unlike _msize, return 0 in case of parameter error. - // Returning size_t(-1) looks more like the way to troubles. - return 0; -} - -/* - * A variant that provides additional memory safety, by checking whether the given address - * was obtained with this allocator, and if not redirecting to the provided alternative call. - */ -extern "C" size_t safer_scalable_msize (void *object, size_t (*original_msize)(void*)) -{ - if (object) { - // Check if the memory was allocated by scalable_malloc - if (isRecognized(object)) - return scalable_msize(object); - else if (original_msize) - return original_msize(object); - } - // object is NULL or unknown - errno = EINVAL; - return 0; -} - -/********* End code for scalable_msize ***********/ diff --git a/deal.II/bundled/tbb30_104oss/src/tbbmalloc/large_objects.cpp b/deal.II/bundled/tbb30_104oss/src/tbbmalloc/large_objects.cpp deleted file mode 100644 index b8b9a9dce7..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbbmalloc/large_objects.cpp +++ /dev/null @@ -1,272 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#include "tbbmalloc_internal.h" - -/********* Allocation of large objects ************/ - - -namespace rml { -namespace internal { - -static struct LargeBlockCacheStat { - uintptr_t age; - size_t cacheSize; -} loCacheStat; - - /* - * The number of bins to cache large objects. - */ -const uint32_t numLargeBlockBins = 1024; // for 1024 max cached size is near 8MB - - -class CachedBlocksList { - LargeMemoryBlock *first, - *last; - /* age of an oldest block in the list; equal to last->age, if last defined, - used for quick cheching it without acquiring the lock. */ - uintptr_t oldest; - /* currAge when something was excluded out of list because of the age, - not because of cache hit */ - uintptr_t lastCleanedAge; - /* Current threshold value for the blocks of a particular size. - Set on cache miss. */ - intptr_t ageThreshold; - - MallocMutex lock; - /* CachedBlocksList should be placed in zero-initialized memory, - ctor not needed. */ - CachedBlocksList(); -public: - inline void push(LargeMemoryBlock* ptr); - inline LargeMemoryBlock* pop(); - void releaseLastIfOld(uintptr_t currAge, size_t size); -}; - -/* - * Array of bins with lists of recently freed large objects cached for re-use. - */ -static char globalCachedBlockBinsSpace[sizeof(CachedBlocksList)*numLargeBlockBins]; -static CachedBlocksList* globalCachedBlockBins = (CachedBlocksList*)globalCachedBlockBinsSpace; - -/* - * Large Objects are the only objects in the system that begin - * on a 16K byte boundary since the blocks used for smaller objects - * have the Block structure at each 16K boundary. - */ -static uintptr_t cleanupCacheIfNeed(); - -void CachedBlocksList::push(LargeMemoryBlock *ptr) -{ - ptr->prev = NULL; - ptr->age = cleanupCacheIfNeed (); - - MallocMutex::scoped_lock scoped_cs(lock); - ptr->next = first; - first = ptr; - if (ptr->next) ptr->next->prev = ptr; - if (!last) { - MALLOC_ASSERT(0 == oldest, ASSERT_TEXT); - oldest = ptr->age; - last = ptr; - } -} - -LargeMemoryBlock *CachedBlocksList::pop() -{ - uintptr_t currAge = cleanupCacheIfNeed(); - LargeMemoryBlock *result=NULL; - { - MallocMutex::scoped_lock scoped_cs(lock); - if (first) { - result = first; - first = result->next; - if (first) - first->prev = NULL; - else { - last = NULL; - oldest = 0; - } - } else { - /* If cache miss occured, set ageThreshold to twice the difference - between current time and last time cache was cleaned. */ - ageThreshold = 2*(currAge - lastCleanedAge); - } - } - return result; -} - -void CachedBlocksList::releaseLastIfOld(uintptr_t currAge, size_t size) -{ - LargeMemoryBlock *toRelease = NULL; - - /* oldest may be more recent then age, that's why cast to signed type - was used. age overflow is also processed correctly. */ - if (last && (intptr_t)(currAge - oldest) > ageThreshold) { - MallocMutex::scoped_lock scoped_cs(lock); - // double check - if (last && (intptr_t)(currAge - last->age) > ageThreshold) { - do { - last = last->prev; - } while (last && (intptr_t)(currAge - last->age) > ageThreshold); - if (last) { - toRelease = last->next; - oldest = last->age; - last->next = NULL; - } else { - toRelease = first; - first = NULL; - oldest = 0; - } - MALLOC_ASSERT( toRelease, ASSERT_TEXT ); - lastCleanedAge = toRelease->age; - } - else - return; - } - while ( toRelease ) { - LargeMemoryBlock *helper = toRelease->next; - removeBackRef(toRelease->backRefIdx); - freeRawMemory(toRelease, size, toRelease->fromMapMemory); - toRelease = helper; - } -} - -static uintptr_t cleanupCacheIfNeed () -{ - /* loCacheStat.age overflow is OK, as we only want difference between - * its current value and some recent. - * - * Both malloc and free should increment loCacheStat.age, as in - * a different case multiple cached blocks would have same age, - * and accuracy of predictors suffers. - */ - uintptr_t currAge = (uintptr_t)AtomicIncrement((intptr_t&)loCacheStat.age); - - if ( 0 == currAge % cacheCleanupFreq ) { - size_t objSize; - int i; - - for (i = numLargeBlockBins-1, - objSize = (numLargeBlockBins-1)*largeBlockCacheStep+blockSize; - i >= 0; - i--, objSize-=largeBlockCacheStep) { - /* cached block size on iteration is - * i*largeBlockCacheStep+blockSize, it seems iterative - * computation of it improves performance. - */ - // release from cache blocks that are older than ageThreshold - globalCachedBlockBins[i].releaseLastIfOld(currAge, objSize); - } - } - return currAge; -} - -static LargeMemoryBlock* getCachedLargeBlock (size_t size) -{ - MALLOC_ASSERT( size%largeBlockCacheStep==0, ASSERT_TEXT ); - LargeMemoryBlock *lmb = NULL; - // blockSize is the minimal alignment and thus the minimal size of a large object. - size_t idx = (size-minLargeObjectSize)/largeBlockCacheStep; - if (idx<numLargeBlockBins) { - lmb = globalCachedBlockBins[idx].pop(); - if (lmb) { - MALLOC_ITT_SYNC_ACQUIRED(globalCachedBlockBins+idx); - STAT_increment(getThreadId(), ThreadCommonCounters, allocCachedLargeBlk); - } - } - return lmb; -} - -void* mallocLargeObject (size_t size, size_t alignment, bool startupAlloc) -{ - LargeMemoryBlock* lmb; - size_t headersSize = sizeof(LargeMemoryBlock)+sizeof(LargeObjectHdr); - size_t allocationSize = alignUp(size+headersSize+alignment, largeBlockCacheStep); - - if (startupAlloc || !(lmb = getCachedLargeBlock(allocationSize))) { - BackRefIdx backRefIdx; - - if ((backRefIdx = BackRefIdx::newBackRef(/*largeObj=*/true)).isInvalid()) - return NULL; - lmb = (LargeMemoryBlock*)getRawMemory(allocationSize, /*useMapMem=*/startupAlloc); - if (!lmb) return NULL; - lmb->fromMapMemory = startupAlloc; - lmb->backRefIdx = backRefIdx; - lmb->unalignedSize = allocationSize; - STAT_increment(getThreadId(), ThreadCommonCounters, allocNewLargeObj); - } - - void *alignedArea = (void*)alignUp((uintptr_t)lmb+headersSize, alignment); - LargeObjectHdr *header = (LargeObjectHdr*)alignedArea-1; - header->memoryBlock = lmb; - header->backRefIdx = lmb->backRefIdx; - setBackRef(header->backRefIdx, header); - - lmb->objectSize = size; - - MALLOC_ASSERT( isLargeObject(alignedArea), ASSERT_TEXT ); - return alignedArea; -} - -static bool freeLargeObjectToCache (LargeMemoryBlock* largeBlock) -{ - size_t size = largeBlock->unalignedSize; - size_t idx = (size-minLargeObjectSize)/largeBlockCacheStep; - if (idx<numLargeBlockBins) { - MALLOC_ASSERT( size%largeBlockCacheStep==0, ASSERT_TEXT ); - MALLOC_ITT_SYNC_RELEASING(globalCachedBlockBins+idx); - globalCachedBlockBins[idx].push(largeBlock); - - STAT_increment(getThreadId(), ThreadCommonCounters, cacheLargeBlk); - return true; - } - return false; -} - -void freeLargeObject (void *object) -{ - LargeObjectHdr *header = (LargeObjectHdr*)object - 1; - - // overwrite backRefIdx to simplify double free detection - header->backRefIdx = BackRefIdx(); - if (!freeLargeObjectToCache(header->memoryBlock)) { - removeBackRef(header->memoryBlock->backRefIdx); - freeRawMemory(header->memoryBlock, header->memoryBlock->unalignedSize, - /*useMapMem=*/ header->memoryBlock->fromMapMemory); - STAT_increment(getThreadId(), ThreadCommonCounters, freeLargeObj); - } -} - -/*********** End allocation of large objects **********/ - - - -} // namespace internal -} // namespace rml - diff --git a/deal.II/bundled/tbb30_104oss/src/tbbmalloc/lin-tbbmalloc-export.def b/deal.II/bundled/tbb30_104oss/src/tbbmalloc/lin-tbbmalloc-export.def deleted file mode 100644 index cd766d7155..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbbmalloc/lin-tbbmalloc-export.def +++ /dev/null @@ -1,70 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -{ -global: - -scalable_calloc; -scalable_free; -scalable_malloc; -scalable_realloc; -scalable_posix_memalign; -scalable_aligned_malloc; -scalable_aligned_realloc; -scalable_aligned_free; -__TBB_internal_calloc; -__TBB_internal_free; -__TBB_internal_malloc; -__TBB_internal_realloc; -__TBB_internal_posix_memalign; -scalable_msize; - -local: - -/* TBB symbols */ -*3rml8internal*; -*3tbb*; -*__TBB*; -__itt_*; -ITT_DoOneTimeInitialization; -TBB_runtime_interface_version; - -/* Intel Compiler (libirc) symbols */ -__intel_*; -_intel_*; -get_memcpy_largest_cachelinesize; -get_memcpy_largest_cache_size; -get_mem_ops_method; -init_mem_ops_method; -irc__get_msg; -irc__print; -override_mem_ops_method; -set_memcpy_largest_cachelinesize; -set_memcpy_largest_cache_size; - -}; diff --git a/deal.II/bundled/tbb30_104oss/src/tbbmalloc/lin32-proxy-export.def b/deal.II/bundled/tbb30_104oss/src/tbbmalloc/lin32-proxy-export.def deleted file mode 100644 index ba22eca5bd..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbbmalloc/lin32-proxy-export.def +++ /dev/null @@ -1,59 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -{ -global: -calloc; -free; -malloc; -realloc; -posix_memalign; -memalign; -valloc; -pvalloc; -mallinfo; -mallopt; -__TBB_malloc_proxy; -__TBB_internal_find_original_malloc; -_ZdaPv; /* next ones are new/delete */ -_ZdaPvRKSt9nothrow_t; -_ZdlPv; -_ZdlPvRKSt9nothrow_t; -_Znaj; -_ZnajRKSt9nothrow_t; -_Znwj; -_ZnwjRKSt9nothrow_t; - -local: - -/* TBB symbols */ -*3rml8internal*; -*3tbb*; -*__TBB*; - -}; diff --git a/deal.II/bundled/tbb30_104oss/src/tbbmalloc/lin64-proxy-export.def b/deal.II/bundled/tbb30_104oss/src/tbbmalloc/lin64-proxy-export.def deleted file mode 100644 index b6eb7aeaab..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbbmalloc/lin64-proxy-export.def +++ /dev/null @@ -1,59 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -{ -global: -calloc; -free; -malloc; -realloc; -posix_memalign; -memalign; -valloc; -pvalloc; -mallinfo; -mallopt; -__TBB_malloc_proxy; -__TBB_internal_find_original_malloc; -_ZdaPv; /* next ones are new/delete */ -_ZdaPvRKSt9nothrow_t; -_ZdlPv; -_ZdlPvRKSt9nothrow_t; -_Znam; -_ZnamRKSt9nothrow_t; -_Znwm; -_ZnwmRKSt9nothrow_t; - -local: - -/* TBB symbols */ -*3rml8internal*; -*3tbb*; -*__TBB*; - -}; diff --git a/deal.II/bundled/tbb30_104oss/src/tbbmalloc/lin64ipf-proxy-export.def b/deal.II/bundled/tbb30_104oss/src/tbbmalloc/lin64ipf-proxy-export.def deleted file mode 100644 index b6eb7aeaab..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbbmalloc/lin64ipf-proxy-export.def +++ /dev/null @@ -1,59 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -{ -global: -calloc; -free; -malloc; -realloc; -posix_memalign; -memalign; -valloc; -pvalloc; -mallinfo; -mallopt; -__TBB_malloc_proxy; -__TBB_internal_find_original_malloc; -_ZdaPv; /* next ones are new/delete */ -_ZdaPvRKSt9nothrow_t; -_ZdlPv; -_ZdlPvRKSt9nothrow_t; -_Znam; -_ZnamRKSt9nothrow_t; -_Znwm; -_ZnwmRKSt9nothrow_t; - -local: - -/* TBB symbols */ -*3rml8internal*; -*3tbb*; -*__TBB*; - -}; diff --git a/deal.II/bundled/tbb30_104oss/src/tbbmalloc/mac32-tbbmalloc-export.def b/deal.II/bundled/tbb30_104oss/src/tbbmalloc/mac32-tbbmalloc-export.def deleted file mode 100644 index 295fce2909..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbbmalloc/mac32-tbbmalloc-export.def +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright 2005-2010 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. -# -# Threading Building Blocks is free software; you can redistribute it -# and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. -# -# Threading Building Blocks is distributed in the hope that it will be -# useful, but WITHOUT ANY WARRANTY; without even the implied warranty -# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Threading Building Blocks; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software -# library without restriction. Specifically, if other files instantiate -# templates or use macros or inline functions from this file, or you compile -# this file and link it with other files to produce an executable, this -# file does not by itself cause the resulting executable to be covered by -# the GNU General Public License. This exception does not however -# invalidate any other reasons why the executable file might be covered by -# the GNU General Public License. - -# MemoryAllocator.cpp -_scalable_calloc -_scalable_free -_scalable_malloc -_scalable_realloc -_scalable_posix_memalign -_scalable_aligned_malloc -_scalable_aligned_realloc -_scalable_aligned_free -_scalable_msize diff --git a/deal.II/bundled/tbb30_104oss/src/tbbmalloc/mac64-tbbmalloc-export.def b/deal.II/bundled/tbb30_104oss/src/tbbmalloc/mac64-tbbmalloc-export.def deleted file mode 100644 index 295fce2909..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbbmalloc/mac64-tbbmalloc-export.def +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright 2005-2010 Intel Corporation. All Rights Reserved. -# -# This file is part of Threading Building Blocks. -# -# Threading Building Blocks is free software; you can redistribute it -# and/or modify it under the terms of the GNU General Public License -# version 2 as published by the Free Software Foundation. -# -# Threading Building Blocks is distributed in the hope that it will be -# useful, but WITHOUT ANY WARRANTY; without even the implied warranty -# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Threading Building Blocks; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# As a special exception, you may use this file as part of a free software -# library without restriction. Specifically, if other files instantiate -# templates or use macros or inline functions from this file, or you compile -# this file and link it with other files to produce an executable, this -# file does not by itself cause the resulting executable to be covered by -# the GNU General Public License. This exception does not however -# invalidate any other reasons why the executable file might be covered by -# the GNU General Public License. - -# MemoryAllocator.cpp -_scalable_calloc -_scalable_free -_scalable_malloc -_scalable_realloc -_scalable_posix_memalign -_scalable_aligned_malloc -_scalable_aligned_realloc -_scalable_aligned_free -_scalable_msize diff --git a/deal.II/bundled/tbb30_104oss/src/tbbmalloc/proxy.cpp b/deal.II/bundled/tbb30_104oss/src/tbbmalloc/proxy.cpp deleted file mode 100644 index 87b03e5525..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbbmalloc/proxy.cpp +++ /dev/null @@ -1,472 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#include "proxy.h" - -#if !defined(__EXCEPTIONS) && !defined(_CPPUNWIND) && !defined(__SUNPRO_CC) || defined(_XBOX) - #if TBB_USE_EXCEPTIONS - #error Compilation settings do not support exception handling. Please do not set TBB_USE_EXCEPTIONS macro or set it to 0. - #elif !defined(TBB_USE_EXCEPTIONS) - #define TBB_USE_EXCEPTIONS 0 - #endif -#elif !defined(TBB_USE_EXCEPTIONS) - #define TBB_USE_EXCEPTIONS 1 -#endif - -#if MALLOC_LD_PRELOAD - -/*** service functions and variables ***/ - -#include <unistd.h> // for sysconf -#include <dlfcn.h> - -static long memoryPageSize; - -static inline void initPageSize() -{ - memoryPageSize = sysconf(_SC_PAGESIZE); -} - -/* For the expected behaviour (i.e., finding malloc/free/etc from libc.so, - not from ld-linux.so) dlsym(RTLD_NEXT) should be called from - a LD_PRELOADed library, not another dynamic library. - So we have to put find_original_malloc here. - */ -extern "C" bool __TBB_internal_find_original_malloc(int num, const char *names[], - void *ptrs[]) -{ - for (int i=0; i<num; i++) - if (NULL == (ptrs[i] = dlsym (RTLD_NEXT, names[i]))) - return false; - - return true; -} - -/* __TBB_malloc_proxy used as a weak symbol by libtbbmalloc for: - 1) detection that the proxy library is loaded - 2) check that dlsym("malloc") found something different from our replacement malloc -*/ -extern "C" void *__TBB_malloc_proxy() __attribute__ ((alias ("malloc"))); - -#ifndef __THROW -#define __THROW -#endif - -/*** replacements for malloc and the family ***/ - -extern "C" { - -void *malloc(size_t size) __THROW -{ - return __TBB_internal_malloc(size); -} - -void * calloc(size_t num, size_t size) __THROW -{ - return __TBB_internal_calloc(num, size); -} - -void free(void *object) __THROW -{ - __TBB_internal_free(object); -} - -void * realloc(void* ptr, size_t sz) __THROW -{ - return __TBB_internal_realloc(ptr, sz); -} - -int posix_memalign(void **memptr, size_t alignment, size_t size) __THROW -{ - return __TBB_internal_posix_memalign(memptr, alignment, size); -} - -/* The older *NIX interface for aligned allocations; - it's formally substituted by posix_memalign and deprecated, - so we do not expect it to cause cyclic dependency with C RTL. */ -void * memalign(size_t alignment, size_t size) __THROW -{ - return scalable_aligned_malloc(size, alignment); -} - -/* valloc allocates memory aligned on a page boundary */ -void * valloc(size_t size) __THROW -{ - if (! memoryPageSize) initPageSize(); - - return scalable_aligned_malloc(size, memoryPageSize); -} - -/* pvalloc allocates smallest set of complete pages which can hold - the requested number of bytes. Result is aligned on page boundary. */ -void * pvalloc(size_t size) __THROW -{ - if (! memoryPageSize) initPageSize(); - // align size up to the page size - size = ((size-1) | (memoryPageSize-1)) + 1; - - return scalable_aligned_malloc(size, memoryPageSize); -} - -int mallopt(int /*param*/, int /*value*/) __THROW -{ - return 1; -} - -} /* extern "C" */ - -#if __linux__ -#include <malloc.h> -#include <string.h> // for memset - -extern "C" struct mallinfo mallinfo() __THROW -{ - struct mallinfo m; - memset(&m, 0, sizeof(struct mallinfo)); - - return m; -} -#endif /* __linux__ */ - -/*** replacements for global operators new and delete ***/ - -#include <new> - -void * operator new(size_t sz) throw (std::bad_alloc) { - void *res = scalable_malloc(sz); -#if TBB_USE_EXCEPTIONS - if (NULL == res) - throw std::bad_alloc(); -#endif /* TBB_USE_EXCEPTIONS */ - return res; -} -void* operator new[](size_t sz) throw (std::bad_alloc) { - void *res = scalable_malloc(sz); -#if TBB_USE_EXCEPTIONS - if (NULL == res) - throw std::bad_alloc(); -#endif /* TBB_USE_EXCEPTIONS */ - return res; -} -void operator delete(void* ptr) throw() { - scalable_free(ptr); -} -void operator delete[](void* ptr) throw() { - scalable_free(ptr); -} -void* operator new(size_t sz, const std::nothrow_t&) throw() { - return scalable_malloc(sz); -} -void* operator new[](std::size_t sz, const std::nothrow_t&) throw() { - return scalable_malloc(sz); -} -void operator delete(void* ptr, const std::nothrow_t&) throw() { - scalable_free(ptr); -} -void operator delete[](void* ptr, const std::nothrow_t&) throw() { - scalable_free(ptr); -} - -#endif /* MALLOC_LD_PRELOAD */ - - -#ifdef _WIN32 -#include <windows.h> - -#include <stdio.h> -#include "tbb_function_replacement.h" - -void safer_scalable_free2( void *ptr) -{ - safer_scalable_free( ptr, NULL ); -} - -// we do not support _expand(); -void* safer_expand( void *, size_t ) -{ - return NULL; -} - -#define __TBB_ORIG_ALLOCATOR_REPLACEMENT_WRAPPER(CRTLIB)\ -void (*orig_free_##CRTLIB)(void*); \ -void safer_scalable_free_##CRTLIB( void *ptr) \ -{ \ - safer_scalable_free( ptr, orig_free_##CRTLIB ); \ -} \ - \ -size_t (*orig_msize_##CRTLIB)(void*); \ -size_t safer_scalable_msize_##CRTLIB( void *ptr) \ -{ \ - return safer_scalable_msize( ptr, orig_msize_##CRTLIB ); \ -} \ - \ -void* safer_scalable_realloc_##CRTLIB( void *ptr, size_t size ) \ -{ \ - orig_ptrs func_ptrs = {orig_free_##CRTLIB, orig_msize_##CRTLIB}; \ - return safer_scalable_realloc( ptr, size, &func_ptrs ); \ -} \ - \ -void* safer_scalable_aligned_realloc_##CRTLIB( void *ptr, size_t size, size_t aligment ) \ -{ \ - orig_ptrs func_ptrs = {orig_free_##CRTLIB, orig_msize_##CRTLIB}; \ - return safer_scalable_aligned_realloc( ptr, size, aligment, &func_ptrs ); \ -} - -// limit is 30 bytes/60 symbols per line -const char* known_bytecodes[] = { -#if _WIN64 - "4883EC284885C974", //release free() win64 - "4883EC384885C975", //release msize() win64 - "4885C974375348", //release free() 8.0.50727.42 win64 - "48894C24084883EC28BA", //debug prologue for win64 - "4C8BC1488B0DA6E4040033", //win64 SDK - "4883EC284885C975", //release msize() 10.0.21003.1 win64 -#else - "558BEC6A018B", //debug free() & _msize() 8.0.50727.4053 win32 - "6A1868********E8", //release free() 8.0.50727.4053 win32 - "6A1C68********E8", //release _msize() 8.0.50727.4053 win32 - "8BFF558BEC6A", //debug free() & _msize() 9.0.21022.8 win32 - "8BFF558BEC83", //debug free() & _msize() 10.0.21003.1 win32 -#endif - NULL - }; - -#if _WIN64 -#define __TBB_ORIG_ALLOCATOR_REPLACEMENT_CALL(CRT_VER)\ - ReplaceFunctionWithStore( #CRT_VER "d.dll", "free", (FUNCPTR)safer_scalable_free_ ## CRT_VER ## d, known_bytecodes, (FUNCPTR*)&orig_free_ ## CRT_VER ## d ); \ - ReplaceFunctionWithStore( #CRT_VER ".dll", "free", (FUNCPTR)safer_scalable_free_ ## CRT_VER, known_bytecodes, (FUNCPTR*)&orig_free_ ## CRT_VER ); \ - ReplaceFunctionWithStore( #CRT_VER "d.dll", "_msize",(FUNCPTR)safer_scalable_msize_ ## CRT_VER ## d, known_bytecodes, (FUNCPTR*)&orig_msize_ ## CRT_VER ## d ); \ - ReplaceFunctionWithStore( #CRT_VER ".dll", "_msize",(FUNCPTR)safer_scalable_msize_ ## CRT_VER, known_bytecodes, (FUNCPTR*)&orig_msize_ ## CRT_VER ); \ - ReplaceFunctionWithStore( #CRT_VER "d.dll", "realloc", (FUNCPTR)safer_scalable_realloc_ ## CRT_VER ## d, 0, NULL); \ - ReplaceFunctionWithStore( #CRT_VER ".dll", "realloc", (FUNCPTR)safer_scalable_realloc_ ## CRT_VER, 0, NULL); \ - ReplaceFunctionWithStore( #CRT_VER "d.dll", "_aligned_free", (FUNCPTR)safer_scalable_free_ ## CRT_VER ## d, 0, NULL); \ - ReplaceFunctionWithStore( #CRT_VER ".dll", "_aligned_free", (FUNCPTR)safer_scalable_free_ ## CRT_VER, 0, NULL); \ - ReplaceFunctionWithStore( #CRT_VER "d.dll", "_aligned_realloc",(FUNCPTR)safer_scalable_aligned_realloc_ ## CRT_VER ## d, 0, NULL); \ - ReplaceFunctionWithStore( #CRT_VER ".dll", "_aligned_realloc",(FUNCPTR)safer_scalable_aligned_realloc_ ## CRT_VER, 0, NULL); -#else -#define __TBB_ORIG_ALLOCATOR_REPLACEMENT_CALL(CRT_VER)\ - ReplaceFunctionWithStore( #CRT_VER "d.dll", "free", (FUNCPTR)safer_scalable_free_ ## CRT_VER ## d, known_bytecodes, (FUNCPTR*)&orig_free_ ## CRT_VER ## d ); \ - ReplaceFunctionWithStore( #CRT_VER ".dll", "free", (FUNCPTR)safer_scalable_free_ ## CRT_VER, known_bytecodes, (FUNCPTR*)&orig_free_ ## CRT_VER ); \ - ReplaceFunctionWithStore( #CRT_VER "d.dll", "_msize",(FUNCPTR)safer_scalable_msize_ ## CRT_VER ## d, known_bytecodes, (FUNCPTR*)&orig_msize_ ## CRT_VER ## d ); \ - ReplaceFunctionWithStore( #CRT_VER ".dll", "_msize",(FUNCPTR)safer_scalable_msize_ ## CRT_VER, known_bytecodes, (FUNCPTR*)&orig_msize_ ## CRT_VER ); \ - ReplaceFunctionWithStore( #CRT_VER "d.dll", "realloc", (FUNCPTR)safer_scalable_realloc_ ## CRT_VER ## d, 0, NULL); \ - ReplaceFunctionWithStore( #CRT_VER ".dll", "realloc", (FUNCPTR)safer_scalable_realloc_ ## CRT_VER, 0, NULL); \ - ReplaceFunctionWithStore( #CRT_VER "d.dll", "_aligned_free", (FUNCPTR)safer_scalable_free_ ## CRT_VER ## d, 0, NULL); \ - ReplaceFunctionWithStore( #CRT_VER ".dll", "_aligned_free", (FUNCPTR)safer_scalable_free_ ## CRT_VER, 0, NULL); \ - ReplaceFunctionWithStore( #CRT_VER "d.dll", "_aligned_realloc",(FUNCPTR)safer_scalable_aligned_realloc_ ## CRT_VER ## d, 0, NULL); \ - ReplaceFunctionWithStore( #CRT_VER ".dll", "_aligned_realloc",(FUNCPTR)safer_scalable_aligned_realloc_ ## CRT_VER, 0, NULL); -#endif - -__TBB_ORIG_ALLOCATOR_REPLACEMENT_WRAPPER(msvcr70d); -__TBB_ORIG_ALLOCATOR_REPLACEMENT_WRAPPER(msvcr70); -__TBB_ORIG_ALLOCATOR_REPLACEMENT_WRAPPER(msvcr71d); -__TBB_ORIG_ALLOCATOR_REPLACEMENT_WRAPPER(msvcr71); -__TBB_ORIG_ALLOCATOR_REPLACEMENT_WRAPPER(msvcr80d); -__TBB_ORIG_ALLOCATOR_REPLACEMENT_WRAPPER(msvcr80); -__TBB_ORIG_ALLOCATOR_REPLACEMENT_WRAPPER(msvcr90d); -__TBB_ORIG_ALLOCATOR_REPLACEMENT_WRAPPER(msvcr90); -__TBB_ORIG_ALLOCATOR_REPLACEMENT_WRAPPER(msvcr100d); -__TBB_ORIG_ALLOCATOR_REPLACEMENT_WRAPPER(msvcr100); - - -/*** replacements for global operators new and delete ***/ - -#include <new> - -#if _MSC_VER && !defined(__INTEL_COMPILER) -#pragma warning( push ) -#pragma warning( disable : 4290 ) -#endif - -void * operator_new(size_t sz) throw (std::bad_alloc) { - void *res = scalable_malloc(sz); - if (NULL == res) throw std::bad_alloc(); - return res; -} -void* operator_new_arr(size_t sz) throw (std::bad_alloc) { - void *res = scalable_malloc(sz); - if (NULL == res) throw std::bad_alloc(); - return res; -} -void operator_delete(void* ptr) throw() { - safer_scalable_free2(ptr); -} -#if _MSC_VER && !defined(__INTEL_COMPILER) -#pragma warning( pop ) -#endif - -void operator_delete_arr(void* ptr) throw() { - safer_scalable_free2(ptr); -} -void* operator_new_t(size_t sz, const std::nothrow_t&) throw() { - return scalable_malloc(sz); -} -void* operator_new_arr_t(std::size_t sz, const std::nothrow_t&) throw() { - return scalable_malloc(sz); -} -void operator_delete_t(void* ptr, const std::nothrow_t&) throw() { - safer_scalable_free2(ptr); -} -void operator_delete_arr_t(void* ptr, const std::nothrow_t&) throw() { - safer_scalable_free2(ptr); -} - -const char* modules_to_replace[] = { - "msvcr80d.dll", - "msvcr80.dll", - "msvcr90d.dll", - "msvcr90.dll", - "msvcr100d.dll", - "msvcr100.dll", - "msvcr70d.dll", - "msvcr70.dll", - "msvcr71d.dll", - "msvcr71.dll", - }; - -/* -We need to replace following functions: -malloc -calloc -_aligned_malloc -_expand (by dummy implementation) -??2@YAPAXI@Z operator new (ia32) -??_U@YAPAXI@Z void * operator new[] (size_t size) (ia32) -??3@YAXPAX@Z operator delete (ia32) -??_V@YAXPAX@Z operator delete[] (ia32) -??2@YAPEAX_K@Z void * operator new(unsigned __int64) (intel64) -??_V@YAXPEAX@Z void * operator new[](unsigned __int64) (intel64) -??3@YAXPEAX@Z operator delete (intel64) -??_V@YAXPEAX@Z operator delete[] (intel64) -??2@YAPAXIABUnothrow_t@std@@@Z void * operator new (size_t sz, const std::nothrow_t&) throw() (optional) -??_U@YAPAXIABUnothrow_t@std@@@Z void * operator new[] (size_t sz, const std::nothrow_t&) throw() (optional) - -and these functions have runtime-specific replacement: -realloc -free -_msize -_aligned_realloc -_aligned_free -*/ - -typedef struct FRData_t { - //char *_module; - const char *_func; - FUNCPTR _fptr; - FRR_ON_ERROR _on_error; -} FRDATA; - -FRDATA routines_to_replace[] = { - { "malloc", (FUNCPTR)scalable_malloc, FRR_FAIL }, - { "calloc", (FUNCPTR)scalable_calloc, FRR_FAIL }, - { "_aligned_malloc", (FUNCPTR)scalable_aligned_malloc, FRR_FAIL }, - { "_expand", (FUNCPTR)safer_expand, FRR_IGNORE }, -#if _WIN64 - { "??2@YAPEAX_K@Z", (FUNCPTR)operator_new, FRR_FAIL }, - { "??_U@YAPEAX_K@Z", (FUNCPTR)operator_new_arr, FRR_FAIL }, - { "??3@YAXPEAX@Z", (FUNCPTR)operator_delete, FRR_FAIL }, - { "??_V@YAXPEAX@Z", (FUNCPTR)operator_delete_arr, FRR_FAIL }, -#else - { "??2@YAPAXI@Z", (FUNCPTR)operator_new, FRR_FAIL }, - { "??_U@YAPAXI@Z", (FUNCPTR)operator_new_arr, FRR_FAIL }, - { "??3@YAXPAX@Z", (FUNCPTR)operator_delete, FRR_FAIL }, - { "??_V@YAXPAX@Z", (FUNCPTR)operator_delete_arr, FRR_FAIL }, -#endif - { "??2@YAPAXIABUnothrow_t@std@@@Z", (FUNCPTR)operator_new_t, FRR_IGNORE }, - { "??_U@YAPAXIABUnothrow_t@std@@@Z", (FUNCPTR)operator_new_arr_t, FRR_IGNORE } -}; - -#ifndef UNICODE -void ReplaceFunctionWithStore( const char*dllName, const char *funcName, FUNCPTR newFunc, const char ** opcodes, FUNCPTR* origFunc ) -#else -void ReplaceFunctionWithStore( const wchar_t *dllName, const char *funcName, FUNCPTR newFunc, const char ** opcodes, FUNCPTR* origFunc ) -#endif -{ - FRR_TYPE type = ReplaceFunction( dllName, funcName, newFunc, opcodes, origFunc ); - if (type == FRR_NODLL) return; - if ( type != FRR_OK ) - { - fprintf(stderr, "Failed to replace function %s in module %s\n", - funcName, dllName); - exit(1); - } -} - -void doMallocReplacement() -{ - int i,j; - - // Replace functions without storing original code - int modules_to_replace_count = sizeof(modules_to_replace) / sizeof(modules_to_replace[0]); - int routines_to_replace_count = sizeof(routines_to_replace) / sizeof(routines_to_replace[0]); - for ( j=0; j<modules_to_replace_count; j++ ) - for (i = 0; i < routines_to_replace_count; i++) - { - FRR_TYPE type = ReplaceFunction( modules_to_replace[j], routines_to_replace[i]._func, routines_to_replace[i]._fptr, NULL, NULL ); - if (type == FRR_NODLL) break; - if (type != FRR_OK && routines_to_replace[i]._on_error==FRR_FAIL) - { - fprintf(stderr, "Failed to replace function %s in module %s\n", - routines_to_replace[i]._func, modules_to_replace[j]); - exit(1); - } - } - - // Replace functions and keep backup of original code (separate for each runtime) - __TBB_ORIG_ALLOCATOR_REPLACEMENT_CALL(msvcr70) - __TBB_ORIG_ALLOCATOR_REPLACEMENT_CALL(msvcr71) - __TBB_ORIG_ALLOCATOR_REPLACEMENT_CALL(msvcr80) - __TBB_ORIG_ALLOCATOR_REPLACEMENT_CALL(msvcr90) - __TBB_ORIG_ALLOCATOR_REPLACEMENT_CALL(msvcr100) -} - -extern "C" BOOL WINAPI DllMain( HINSTANCE hInst, DWORD callReason, LPVOID reserved ) -{ - - if ( callReason==DLL_PROCESS_ATTACH && reserved && hInst ) { -#if TBBMALLOC_USE_TBB_FOR_ALLOCATOR_ENV_CONTROLLED - char pinEnvVariable[50]; - if( GetEnvironmentVariable("TBBMALLOC_USE_TBB_FOR_ALLOCATOR", pinEnvVariable, 50)) - { - doMallocReplacement(); - } -#else - doMallocReplacement(); -#endif - } - - return TRUE; -} - -// Just to make the linker happy and link the DLL to the application -extern "C" __declspec(dllexport) void __TBB_malloc_proxy() -{ - -} - -#endif //_WIN32 diff --git a/deal.II/bundled/tbb30_104oss/src/tbbmalloc/proxy.h b/deal.II/bundled/tbb30_104oss/src/tbbmalloc/proxy.h deleted file mode 100644 index 315f628027..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbbmalloc/proxy.h +++ /dev/null @@ -1,72 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef _TBB_malloc_proxy_H_ -#define _TBB_malloc_proxy_H_ - -#if __linux__ -#define MALLOC_LD_PRELOAD 1 -#endif - -// MALLOC_LD_PRELOAD depends on MALLOC_CHECK_RECURSION stuff -#if __linux__ || __APPLE__ || __sun || __FreeBSD__ || MALLOC_LD_PRELOAD -#define MALLOC_CHECK_RECURSION 1 -#endif - -#include <stddef.h> - -extern "C" { - void * scalable_malloc(size_t size); - void * scalable_calloc(size_t nobj, size_t size); - void scalable_free(void *ptr); - void * scalable_realloc(void* ptr, size_t size); - void * scalable_aligned_malloc(size_t size, size_t alignment); - void * scalable_aligned_realloc(void* ptr, size_t size, size_t alignment); - int scalable_posix_memalign(void **memptr, size_t alignment, size_t size); - size_t scalable_msize(void *ptr); - void safer_scalable_free( void *ptr, void (*original_free)(void*)); - void * safer_scalable_realloc( void *ptr, size_t, void* ); - void * safer_scalable_aligned_realloc( void *ptr, size_t, size_t, void* ); - size_t safer_scalable_msize( void *ptr, size_t (*orig_msize_crt80d)(void*)); - - void * __TBB_internal_malloc(size_t size); - void * __TBB_internal_calloc(size_t num, size_t size); - void __TBB_internal_free(void *ptr); - void * __TBB_internal_realloc(void* ptr, size_t sz); - int __TBB_internal_posix_memalign(void **memptr, size_t alignment, size_t size); - - bool __TBB_internal_find_original_malloc(int num, const char *names[], void *table[]); -} // extern "C" - -// Struct with original free() and _msize() pointers -struct orig_ptrs { - void (*orig_free) (void*); - size_t (*orig_msize)(void*); -}; - -#endif /* _TBB_malloc_proxy_H_ */ diff --git a/deal.II/bundled/tbb30_104oss/src/tbbmalloc/tbb_function_replacement.cpp b/deal.II/bundled/tbb30_104oss/src/tbbmalloc/tbb_function_replacement.cpp deleted file mode 100644 index 02ebea6151..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbbmalloc/tbb_function_replacement.cpp +++ /dev/null @@ -1,476 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -// Works on windows only -#ifdef _WIN32 -#define _CRT_SECURE_NO_DEPRECATE 1 -#define __TBB_NO_IMPLICIT_LINKAGE 1 - -#include <windows.h> -#include <new> -#include <stdio.h> -#include "tbb_function_replacement.h" - -#include "tbb/tbb_config.h" -#include "tbb/tbb_stddef.h" -#include "../tbb/tbb_assert_impl.h" - -inline UINT_PTR Ptr2Addrint(LPVOID ptr) -{ - Int2Ptr i2p; - i2p.lpv = ptr; - return i2p.uip; -} - -inline LPVOID Addrint2Ptr(UINT_PTR ptr) -{ - Int2Ptr i2p; - i2p.uip = ptr; - return i2p.lpv; -} - -// Is the distance between addr1 and addr2 smaller than dist -inline bool IsInDistance(UINT_PTR addr1, UINT_PTR addr2, __int64 dist) -{ - __int64 diff = addr1>addr2 ? addr1-addr2 : addr2-addr1; - return diff<dist; -} - -/* - * When inserting a probe in 64 bits process the distance between the insertion - * point and the target may be bigger than 2^32. In this case we are using - * indirect jump through memory where the offset to this memory location - * is smaller than 2^32 and it contains the absolute address (8 bytes). - * - * This class is used to hold the pages used for the above trampolines. - * Since this utility will be used to replace malloc functions this implementation - * doesn't allocate memory dynamically. - * - * The struct MemoryBuffer holds the data about a page in the memory used for - * replacing functions in Intel64 where the target is too far to be replaced - * with a short jump. All the calculations of m_base and m_next are in a multiple - * of SIZE_OF_ADDRESS (which is 8 in Win64). - */ -class MemoryProvider { -private: - struct MemoryBuffer { - UINT_PTR m_base; // base address of the buffer - UINT_PTR m_next; // next free location in the buffer - DWORD m_size; // size of buffer - - // Default constructor - MemoryBuffer() : m_base(0), m_next(0), m_size(0) {} - - // Constructor - MemoryBuffer(void *base, DWORD size) - { - m_base = Ptr2Addrint(base); - m_next = m_base; - m_size = size; - } - }; - -MemoryBuffer *CreateBuffer(UINT_PTR addr) - { - // No more room in the pages database - if (m_lastBuffer - m_pages == MAX_NUM_BUFFERS) - return 0; - - void *newAddr = Addrint2Ptr(addr); - // Get information for the region which the given address belongs to - MEMORY_BASIC_INFORMATION memInfo; - if (VirtualQuery(newAddr, &memInfo, sizeof(memInfo)) != sizeof(memInfo)) - return 0; - - for(;;) { - // The new address to check is beyond the current region and aligned to allocation size - newAddr = Addrint2Ptr( (Ptr2Addrint(memInfo.BaseAddress) + memInfo.RegionSize + m_allocSize) & ~(UINT_PTR)(m_allocSize-1) ); - - // Check that the address is in the right distance. - // VirtualAlloc can only round the address down; so it will remain in the right distance - if (!IsInDistance(addr, Ptr2Addrint(newAddr), MAX_DISTANCE)) - break; - - if (VirtualQuery(newAddr, &memInfo, sizeof(memInfo)) != sizeof(memInfo)) - break; - - if (memInfo.State == MEM_FREE && memInfo.RegionSize >= m_allocSize) - { - // Found a free region, try to allocate a page in this region - void *newPage = VirtualAlloc(newAddr, m_allocSize, MEM_COMMIT|MEM_RESERVE, PAGE_READWRITE); - if (!newPage) - break; - - // Add the new page to the pages database - MemoryBuffer *pBuff = new (m_lastBuffer) MemoryBuffer(newPage, m_allocSize); - ++m_lastBuffer; - return pBuff; - } - } - - // Failed to find a buffer in the distance - return 0; - } - -public: - MemoryProvider() - { - SYSTEM_INFO sysInfo; - GetSystemInfo(&sysInfo); - m_allocSize = sysInfo.dwAllocationGranularity; - m_lastBuffer = &m_pages[0]; - } - - // We can't free the pages in the destructor because the trampolines - // are using these memory locations and a replaced function might be called - // after the destructor was called. - ~MemoryProvider() - { - } - - // Return a memory location in distance less than 2^31 from input address - UINT_PTR GetLocation(UINT_PTR addr) - { - MemoryBuffer *pBuff = m_pages; - for (; pBuff<m_lastBuffer && IsInDistance(pBuff->m_next, addr, MAX_DISTANCE); ++pBuff) - { - if (pBuff->m_next < pBuff->m_base + pBuff->m_size) - { - UINT_PTR loc = pBuff->m_next; - pBuff->m_next += MAX_PROBE_SIZE; - return loc; - } - } - - pBuff = CreateBuffer(addr); - if(!pBuff) - return 0; - - UINT_PTR loc = pBuff->m_next; - pBuff->m_next += MAX_PROBE_SIZE; - return loc; - } - -private: - MemoryBuffer m_pages[MAX_NUM_BUFFERS]; - MemoryBuffer *m_lastBuffer; - DWORD m_allocSize; -}; - -static MemoryProvider memProvider; - -// Compare opcodes from dictionary (str1) and opcodes from code (str2) -// str1 might contain '*' to mask adresses -// RETURN: NULL if opcodes did not match, string lentgh of str1 on success -size_t compareStrings( const char *str1, const char *str2 ) -{ - size_t str1Lentgh = strlen(str1); - for (size_t i=0; i<str1Lentgh; i++){ - if( str1[i] != '*' && str1[i] != str2[i] ) return 0; - } - return str1Lentgh; -} - -// Check function prologue with know prologues from the dictionary -// opcodes - dictionary -// inpAddr - pointer to function prologue -// Dictionary contains opcodes for several full asm instrutions -// + one opcode byte for the next asm instruction for safe address processing -// RETURN: number of bytes for safe bytes replacement -// (matched_pattern/2-1) -UINT CheckOpcodes( const char ** opcodes, void *inpAddr ) -{ - static size_t opcodesStringsCount = 0; - static size_t maxOpcodesLength = 0; - static size_t opcodes_pointer = (size_t)opcodes; - char opcodeString[61]; - size_t i; - size_t result; - - // Get the values for static variables - // max length and number of patterns - if( !opcodesStringsCount || opcodes_pointer != (size_t)opcodes ){ - while( *(opcodes + opcodesStringsCount)!= NULL ){ - if( (i=strlen(*(opcodes + opcodesStringsCount))) > maxOpcodesLength ) - maxOpcodesLength = i; - opcodesStringsCount++; - } - opcodes_pointer = (size_t)opcodes; - __TBB_ASSERT( maxOpcodesLength < 61, "Limit is 30 opcodes/60 symbols per pattern" ); - } - - // Translate prologue opcodes to string format to compare - for( i=0; i< maxOpcodesLength/2; i++ ){ - sprintf( opcodeString + 2*i, "%.2X", *((unsigned char*)inpAddr+i) ); - } - opcodeString[maxOpcodesLength] = 0; - - // Compare translated opcodes with patterns - for( i=0; i< opcodesStringsCount; i++ ){ - result = compareStrings( opcodes[i],opcodeString ); - if( result ) - return (UINT)(result/2-1); - } - // TODO: to add more stuff to patterns - __TBB_ASSERT( false, "CheckOpcodes failed" ); - - // No matches found just do not store original calls - return 0; -} - -// Insert jump relative instruction to the input address -// RETURN: the size of the trampoline or 0 on failure -static DWORD InsertTrampoline32(void *inpAddr, void *targetAddr, const char ** opcodes, void** storedAddr) -{ - UINT opcodesNumber = SIZE_OF_RELJUMP; - UINT_PTR srcAddr = Ptr2Addrint(inpAddr); - UINT_PTR tgtAddr = Ptr2Addrint(targetAddr); - // Check that the target fits in 32 bits - if (!IsInDistance(srcAddr, tgtAddr, MAX_DISTANCE)) - return 0; - - UINT_PTR offset; - UINT offset32; - UCHAR *codePtr = (UCHAR *)inpAddr; - - // If requested, store original function code - if ( storedAddr ){ - opcodesNumber = CheckOpcodes( opcodes, inpAddr ); - if( opcodesNumber >= SIZE_OF_RELJUMP ){ - UINT_PTR strdAddr = memProvider.GetLocation(srcAddr); - if (!strdAddr) - return 0; - *storedAddr = Addrint2Ptr(strdAddr); - // Set 'executable' flag for original instructions in the new place - DWORD pageFlags = PAGE_EXECUTE_READWRITE; - if (!VirtualProtect(*storedAddr, MAX_PROBE_SIZE, pageFlags, &pageFlags)) return 0; - // Copy original instructions to the new place - memcpy(*storedAddr, codePtr, opcodesNumber); - // Set jump to the code after replacement - offset = srcAddr - strdAddr - SIZE_OF_RELJUMP; - offset32 = (UINT)((offset & 0xFFFFFFFF)); - *((UCHAR*)*storedAddr+opcodesNumber) = 0xE9; - memcpy(((UCHAR*)*storedAddr+opcodesNumber+1), &offset32, sizeof(offset32)); - }else{ - // No matches found just do not store original calls - *storedAddr = NULL; - } - } - - // The following will work correctly even if srcAddr>tgtAddr, as long as - // address difference is less than 2^31, which is guaranteed by IsInDistance. - offset = tgtAddr - srcAddr - SIZE_OF_RELJUMP; - offset32 = (UINT)(offset & 0xFFFFFFFF); - // Insert the jump to the new code - *codePtr = 0xE9; - memcpy(codePtr+1, &offset32, sizeof(offset32)); - - // Fill the rest with NOPs to correctly see disassembler of old code in debugger. - for( unsigned i=SIZE_OF_RELJUMP; i<opcodesNumber; i++ ){ - *(codePtr+i) = 0x90; - } - - return SIZE_OF_RELJUMP; -} - -// This function is called when the offset doesn't fit in 32 bits -// 1 Find and allocate a page in the small distance (<2^31) from input address -// 2 Put jump RIP relative indirect through the address in the close page -// 3 Put the absolute address of the target in the allocated location -// RETURN: the size of the trampoline or 0 on failure -static DWORD InsertTrampoline64(void *inpAddr, void *targetAddr, const char ** opcodes, void** storedAddr) -{ - UINT opcodesNumber = SIZE_OF_INDJUMP; - - UINT_PTR srcAddr = Ptr2Addrint(inpAddr); - UINT_PTR tgtAddr = Ptr2Addrint(targetAddr); - - // Get a location close to the source address - UINT_PTR location = memProvider.GetLocation(srcAddr); - if (!location) - return 0; - - UINT_PTR offset; - UINT offset32; - UCHAR *codePtr = (UCHAR *)inpAddr; - - // Fill the location - UINT_PTR *locPtr = (UINT_PTR *)Addrint2Ptr(location); - *locPtr = tgtAddr; - - // If requested, store original function code - if( storedAddr ){ - opcodesNumber = CheckOpcodes( opcodes, inpAddr ); - if( opcodesNumber >= SIZE_OF_INDJUMP ){ - UINT_PTR strdAddr = memProvider.GetLocation(srcAddr); - if (!strdAddr) - return 0; - *storedAddr = Addrint2Ptr(strdAddr); - // Set 'executable' flag for original instructions in the new place - DWORD pageFlags = PAGE_EXECUTE_READWRITE; - if (!VirtualProtect(*storedAddr, MAX_PROBE_SIZE, pageFlags, &pageFlags)) return 0; - // Copy original instructions to the new place - memcpy(*storedAddr, codePtr, opcodesNumber); - // Set jump to the code after replacement. It is within the distance of relative jump! - offset = srcAddr - strdAddr - SIZE_OF_RELJUMP; - offset32 = (UINT)((offset & 0xFFFFFFFF)); - *((UCHAR*)*storedAddr+opcodesNumber) = 0xE9; - memcpy(((UCHAR*)*storedAddr+opcodesNumber+1), &offset32, sizeof(offset32)); - }else{ - // No matches found just do not store original calls - *storedAddr = NULL; - } - } - - // Fill the buffer - offset = location - srcAddr - SIZE_OF_INDJUMP; - offset32 = (UINT)(offset & 0xFFFFFFFF); - *(codePtr) = 0xFF; - *(codePtr+1) = 0x25; - memcpy(codePtr+2, &offset32, sizeof(offset32)); - - // Fill the rest with NOPs to correctly see disassembler of old code in debugger. - for( unsigned i=SIZE_OF_INDJUMP; i<opcodesNumber; i++ ){ - *(codePtr+i) = 0x90; - } - - return SIZE_OF_INDJUMP; -} - -// Insert a jump instruction in the inpAddr to the targetAddr -// 1. Get the memory protection of the page containing the input address -// 2. Change the memory protection to writable -// 3. Call InsertTrampoline32 or InsertTrampoline64 -// 4. Restore memory protection -// RETURN: FALSE on failure, TRUE on success -static bool InsertTrampoline(void *inpAddr, void *targetAddr, const char ** opcodes, void** origFunc) -{ - DWORD probeSize; - // Change page protection to EXECUTE+WRITE - DWORD origProt = 0; - if (!VirtualProtect(inpAddr, MAX_PROBE_SIZE, PAGE_EXECUTE_WRITECOPY, &origProt)) - return FALSE; - probeSize = InsertTrampoline32(inpAddr, targetAddr, opcodes, origFunc); - if (!probeSize) - probeSize = InsertTrampoline64(inpAddr, targetAddr, opcodes, origFunc); - - // Restore original protection - VirtualProtect(inpAddr, MAX_PROBE_SIZE, origProt, &origProt); - - if (!probeSize) - return FALSE; - - FlushInstructionCache(GetCurrentProcess(), inpAddr, probeSize); - FlushInstructionCache(GetCurrentProcess(), origFunc, probeSize); - - return TRUE; -} - -// Routine to replace the functions -// TODO: replace opcodesNumber with opcodes and opcodes number to check if we replace right code. -FRR_TYPE ReplaceFunctionA(const char *dllName, const char *funcName, FUNCPTR newFunc, const char ** opcodes, FUNCPTR* origFunc) -{ - // Cache the results of the last search for the module - // Assume that there was no DLL unload between - static char cachedName[MAX_PATH+1]; - static HMODULE cachedHM = 0; - - if (!dllName || !*dllName) - return FRR_NODLL; - - if (!cachedHM || strncmp(dllName, cachedName, MAX_PATH) != 0) - { - // Find the module handle for the input dll - HMODULE hModule = GetModuleHandleA(dllName); - if (hModule == 0) - { - // Couldn't find the module with the input name - cachedHM = 0; - return FRR_NODLL; - } - - cachedHM = hModule; - strncpy(cachedName, dllName, MAX_PATH); - } - - FARPROC inpFunc = GetProcAddress(cachedHM, funcName); - if (inpFunc == 0) - { - // Function was not found - return FRR_NOFUNC; - } - - if (!InsertTrampoline((void*)inpFunc, (void*)newFunc, opcodes, (void**)origFunc)){ - // Failed to insert the trampoline to the target address - return FRR_FAILED; - } - - return FRR_OK; -} - -FRR_TYPE ReplaceFunctionW(const wchar_t *dllName, const char *funcName, FUNCPTR newFunc, const char ** opcodes, FUNCPTR* origFunc) -{ - // Cache the results of the last search for the module - // Assume that there was no DLL unload between - static wchar_t cachedName[MAX_PATH+1]; - static HMODULE cachedHM = 0; - - if (!dllName || !*dllName) - return FRR_NODLL; - - if (!cachedHM || wcsncmp(dllName, cachedName, MAX_PATH) != 0) - { - // Find the module handle for the input dll - HMODULE hModule = GetModuleHandleW(dllName); - if (hModule == 0) - { - // Couldn't find the module with the input name - cachedHM = 0; - return FRR_NODLL; - } - - cachedHM = hModule; - wcsncpy(cachedName, dllName, MAX_PATH); - } - - FARPROC inpFunc = GetProcAddress(cachedHM, funcName); - if (inpFunc == 0) - { - // Function was not found - return FRR_NOFUNC; - } - - if (!InsertTrampoline((void*)inpFunc, (void*)newFunc, opcodes, (void**)origFunc)){ - // Failed to insert the trampoline to the target address - return FRR_FAILED; - } - - return FRR_OK; -} - -#endif //_WIN32 diff --git a/deal.II/bundled/tbb30_104oss/src/tbbmalloc/tbb_function_replacement.h b/deal.II/bundled/tbb30_104oss/src/tbbmalloc/tbb_function_replacement.h deleted file mode 100644 index bf520b6668..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbbmalloc/tbb_function_replacement.h +++ /dev/null @@ -1,84 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_function_replacement_H -#define __TBB_function_replacement_H - -typedef enum { - FRR_OK, /* Succeeded in replacing the function */ - FRR_NODLL, /* The requested DLL was not found */ - FRR_NOFUNC, /* The requested function was not found */ - FRR_FAILED, /* The function replacement request failed */ -} FRR_TYPE; - -typedef enum { - FRR_FAIL, /* Required function */ - FRR_IGNORE, /* optional function */ -} FRR_ON_ERROR; - -typedef void (*FUNCPTR)(); - -#ifndef UNICODE -#define ReplaceFunction ReplaceFunctionA -#else -#define ReplaceFunction ReplaceFunctionW -#endif //UNICODE - -FRR_TYPE ReplaceFunctionA(const char *dllName, const char *funcName, FUNCPTR newFunc, const char ** opcodes, FUNCPTR* origFunc=NULL); -FRR_TYPE ReplaceFunctionW(const wchar_t *dllName, const char *funcName, FUNCPTR newFunc, const char ** opcodes, FUNCPTR* origFunc=NULL); - -// Utilities to convert between ADDRESS and LPVOID -union Int2Ptr { - UINT_PTR uip; - LPVOID lpv; -}; - -inline UINT_PTR Ptr2Addrint(LPVOID ptr); -inline LPVOID Addrint2Ptr(UINT_PTR ptr); - -// Use this value as the maximum size the trampoline region -const unsigned MAX_PROBE_SIZE = 32; - -// The size of a jump relative instruction "e9 00 00 00 00" -const unsigned SIZE_OF_RELJUMP = 5; - -// The size of jump RIP relative indirect "ff 25 00 00 00 00" -const unsigned SIZE_OF_INDJUMP = 6; - -// The size of address we put in the location (in Intel64) -const unsigned SIZE_OF_ADDRESS = 8; - -// The max distance covered in 32 bits: 2^31 - 1 - C -// where C should not be smaller than the size of a probe. -// The latter is important to correctly handle "backward" jumps. -const __int64 MAX_DISTANCE = (((__int64)1 << 31) - 1) - MAX_PROBE_SIZE; - -// The maximum number of distinct buffers in memory -const ptrdiff_t MAX_NUM_BUFFERS = 256; - -#endif //__TBB_function_replacement_H diff --git a/deal.II/bundled/tbb30_104oss/src/tbbmalloc/tbbmalloc.cpp b/deal.II/bundled/tbb30_104oss/src/tbbmalloc/tbbmalloc.cpp deleted file mode 100644 index fff6744621..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbbmalloc/tbbmalloc.cpp +++ /dev/null @@ -1,221 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#include "TypeDefinitions.h" // Customize.h and proxy.h get included - -#include "../tbb/itt_notify.h" // for __TBB_load_ittnotify() - -#undef UNICODE - -#if USE_PTHREAD -#include <dlfcn.h> -#elif USE_WINTHREAD -#include "tbb/machine/windows_api.h" -#endif - -#if MALLOC_CHECK_RECURSION - -#include <pthread.h> -#include <stdio.h> -#include <unistd.h> -#if __sun -#include <string.h> /* for memset */ -#include <errno.h> -#endif - -#if MALLOC_LD_PRELOAD - -extern "C" { - -void safer_scalable_free( void*, void (*)(void*) ); -void * safer_scalable_realloc( void*, size_t, void* ); - -bool __TBB_internal_find_original_malloc(int num, const char *names[], void *table[]) __attribute__ ((weak)); - -} - -#endif /* MALLOC_LD_PRELOAD */ -#endif /* MALLOC_CHECK_RECURSION */ - -namespace rml { -namespace internal { - -#if MALLOC_CHECK_RECURSION - -void* (*original_malloc_ptr)(size_t) = 0; -void (*original_free_ptr)(void*) = 0; -static void* (*original_calloc_ptr)(size_t,size_t) = 0; -static void* (*original_realloc_ptr)(void*,size_t) = 0; - -#endif /* MALLOC_CHECK_RECURSION */ - -#if DO_ITT_NOTIFY -/** Caller is responsible for ensuring this routine is called exactly once. */ -void MallocInitializeITT() { - tbb::internal::__TBB_load_ittnotify(); -} -#else -void MallocInitializeITT() {} -#endif /* DO_ITT_NOTIFY */ - -extern "C" -void ITT_DoOneTimeInitialization() { - MallocInitializeITT(); -} // required for itt_notify.cpp to work - -#if TBB_USE_DEBUG -#define DEBUG_SUFFIX "_debug" -#else -#define DEBUG_SUFFIX -#endif /* TBB_USE_DEBUG */ - -// MALLOCLIB_NAME is the name of the TBB memory allocator library. -#if _WIN32||_WIN64 -#define MALLOCLIB_NAME "tbbmalloc" DEBUG_SUFFIX ".dll" -#elif __APPLE__ -#define MALLOCLIB_NAME "libtbbmalloc" DEBUG_SUFFIX ".dylib" -#elif __linux__ -#define MALLOCLIB_NAME "libtbbmalloc" DEBUG_SUFFIX __TBB_STRING(.so.TBB_COMPATIBLE_INTERFACE_VERSION) -#elif __FreeBSD__ || __sun || _AIX -#define MALLOCLIB_NAME "libtbbmalloc" DEBUG_SUFFIX ".so" -#else -#error Unknown OS -#endif - -void init_tbbmalloc() { -#if MALLOC_LD_PRELOAD - if (malloc_proxy && __TBB_internal_find_original_malloc) { - const char *alloc_names[] = { "malloc", "free", "realloc", "calloc"}; - void *orig_alloc_ptrs[4]; - - if (__TBB_internal_find_original_malloc(4, alloc_names, orig_alloc_ptrs)) { - (void *&)original_malloc_ptr = orig_alloc_ptrs[0]; - (void *&)original_free_ptr = orig_alloc_ptrs[1]; - (void *&)original_realloc_ptr = orig_alloc_ptrs[2]; - (void *&)original_calloc_ptr = orig_alloc_ptrs[3]; - MALLOC_ASSERT( original_malloc_ptr!=malloc_proxy, - "standard malloc not found" ); -/* It's workaround for a bug in GNU Libc 2.9 (as it shipped with Fedora 10). - 1st call to libc's malloc should be not from threaded code. - */ - original_free_ptr(original_malloc_ptr(1024)); - original_malloc_found = 1; - } - } -#endif /* MALLOC_LD_PRELOAD */ - -#if DO_ITT_NOTIFY - MallocInitializeITT(); -#endif - -/* Preventing TBB allocator library from unloading to prevent - resource leak, as memory is not released on the library unload. -*/ -#if USE_WINTHREAD - // Prevent Windows from displaying message boxes if it fails to load library - UINT prev_mode = SetErrorMode (SEM_FAILCRITICALERRORS); - LoadLibrary(MALLOCLIB_NAME); - SetErrorMode (prev_mode); -#endif /* USE_PTHREAD */ -} - -#if !(_WIN32||_WIN64) -struct RegisterProcessShutdownNotification { - RegisterProcessShutdownNotification() { -#if USE_PTHREAD - // prevents unloading, POSIX case - dlopen(MALLOCLIB_NAME, RTLD_NOW); -#endif - } - ~RegisterProcessShutdownNotification() { - mallocProcessShutdownNotification(); - } -}; - -static RegisterProcessShutdownNotification reg; -#endif - -#if MALLOC_CHECK_RECURSION - -bool original_malloc_found; - -#if MALLOC_LD_PRELOAD - -extern "C" { - -void * __TBB_internal_malloc(size_t size) -{ - return scalable_malloc(size); -} - -void * __TBB_internal_calloc(size_t num, size_t size) -{ - return scalable_calloc(num, size); -} - -int __TBB_internal_posix_memalign(void **memptr, size_t alignment, size_t size) -{ - return scalable_posix_memalign(memptr, alignment, size); -} - -void* __TBB_internal_realloc(void* ptr, size_t sz) -{ - return safer_scalable_realloc(ptr, sz, (void*&)original_realloc_ptr); -} - -void __TBB_internal_free(void *object) -{ - safer_scalable_free(object, original_free_ptr); -} - -} /* extern "C" */ - -#endif /* MALLOC_LD_PRELOAD */ - -#endif /* MALLOC_CHECK_RECURSION */ - -} } // namespaces - -#ifdef _WIN32 - -extern "C" BOOL WINAPI DllMain( HINSTANCE hInst, DWORD callReason, LPVOID ) -{ - - if (callReason==DLL_THREAD_DETACH) - { - mallocThreadShutdownNotification(NULL); - } - else if (callReason==DLL_PROCESS_DETACH) - { - mallocProcessShutdownNotification(); - } - return TRUE; -} - -#endif //_WIN32 - diff --git a/deal.II/bundled/tbb30_104oss/src/tbbmalloc/tbbmalloc.rc b/deal.II/bundled/tbb30_104oss/src/tbbmalloc/tbbmalloc.rc deleted file mode 100644 index 1aba7982dc..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbbmalloc/tbbmalloc.rc +++ /dev/null @@ -1,129 +0,0 @@ -// Copyright 2005-2010 Intel Corporation. All Rights Reserved. -// -// This file is part of Threading Building Blocks. -// -// Threading Building Blocks is free software; you can redistribute it -// and/or modify it under the terms of the GNU General Public License -// version 2 as published by the Free Software Foundation. -// -// Threading Building Blocks is distributed in the hope that it will be -// useful, but WITHOUT ANY WARRANTY; without even the implied warranty -// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with Threading Building Blocks; if not, write to the Free Software -// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -// -// As a special exception, you may use this file as part of a free software -// library without restriction. Specifically, if other files instantiate -// templates or use macros or inline functions from this file, or you compile -// this file and link it with other files to produce an executable, this -// file does not by itself cause the resulting executable to be covered by -// the GNU General Public License. This exception does not however -// invalidate any other reasons why the executable file might be covered by -// the GNU General Public License. - -// Microsoft Visual C++ generated resource script. -// -#ifdef APSTUDIO_INVOKED -#ifndef APSTUDIO_READONLY_SYMBOLS -#define _APS_NO_MFC 1 -#define _APS_NEXT_RESOURCE_VALUE 102 -#define _APS_NEXT_COMMAND_VALUE 40001 -#define _APS_NEXT_CONTROL_VALUE 1001 -#define _APS_NEXT_SYMED_VALUE 101 -#endif -#endif - -#define APSTUDIO_READONLY_SYMBOLS -///////////////////////////////////////////////////////////////////////////// -// -// Generated from the TEXTINCLUDE 2 resource. -// -#include <winresrc.h> -#define ENDL "\r\n" -#include "tbb/tbb_version.h" - -#define TBBMALLOC_VERNUMBERS TBB_VERSION_MAJOR, TBB_VERSION_MINOR, __TBB_VERSION_YMD -#define TBBMALLOC_VERSION __TBB_STRING(TBBMALLOC_VERNUMBERS) - -///////////////////////////////////////////////////////////////////////////// -#undef APSTUDIO_READONLY_SYMBOLS - -///////////////////////////////////////////////////////////////////////////// -// Neutral resources - -#if !defined(AFX_RESOURCE_DLL) || defined(AFX_TARG_NEU) -#ifdef _WIN32 -LANGUAGE LANG_NEUTRAL, SUBLANG_NEUTRAL -#pragma code_page(1252) -#endif //_WIN32 - -///////////////////////////////////////////////////////////////////////////// -// manifest integration -#ifdef TBB_MANIFEST -#include "winuser.h" -2 RT_MANIFEST tbbmanifest.exe.manifest -#endif - -///////////////////////////////////////////////////////////////////////////// -// -// Version -// - -VS_VERSION_INFO VERSIONINFO - FILEVERSION TBBMALLOC_VERNUMBERS - PRODUCTVERSION TBB_VERNUMBERS - FILEFLAGSMASK 0x17L -#ifdef _DEBUG - FILEFLAGS 0x1L -#else - FILEFLAGS 0x0L -#endif - FILEOS 0x40004L - FILETYPE 0x2L - FILESUBTYPE 0x0L -BEGIN - BLOCK "StringFileInfo" - BEGIN - BLOCK "000004b0" - BEGIN - VALUE "CompanyName", "Intel Corporation\0" - VALUE "FileDescription", "Scalable Allocator library\0" - VALUE "FileVersion", TBBMALLOC_VERSION "\0" -//what is it? VALUE "InternalName", "tbbmalloc\0" - VALUE "LegalCopyright", "Copyright 2005-2010 Intel Corporation. All Rights Reserved.\0" - VALUE "LegalTrademarks", "\0" -#ifndef TBB_USE_DEBUG - VALUE "OriginalFilename", "tbbmalloc.dll\0" -#else - VALUE "OriginalFilename", "tbbmalloc_debug.dll\0" -#endif - VALUE "ProductName", "Intel(R) Threading Building Blocks for Windows\0" - VALUE "ProductVersion", TBB_VERSION "\0" - VALUE "Comments", TBB_VERSION_STRINGS "\0" - VALUE "PrivateBuild", "\0" - VALUE "SpecialBuild", "\0" - END - END - BLOCK "VarFileInfo" - BEGIN - VALUE "Translation", 0x0, 1200 - END -END - -#endif // Neutral resources -///////////////////////////////////////////////////////////////////////////// - - -#ifndef APSTUDIO_INVOKED -///////////////////////////////////////////////////////////////////////////// -// -// Generated from the TEXTINCLUDE 3 resource. -// - - -///////////////////////////////////////////////////////////////////////////// -#endif // not APSTUDIO_INVOKED - diff --git a/deal.II/bundled/tbb30_104oss/src/tbbmalloc/tbbmalloc_internal.h b/deal.II/bundled/tbb30_104oss/src/tbbmalloc/tbbmalloc_internal.h deleted file mode 100644 index ddb9c1a9ac..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbbmalloc/tbbmalloc_internal.h +++ /dev/null @@ -1,279 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -#ifndef __TBB_tbbmalloc_internal_H -#define __TBB_tbbmalloc_internal_H 1 - - -#include "TypeDefinitions.h" /* Also includes customization layer Customize.h */ - -#if USE_PTHREAD - // Some pthreads documentation says that <pthreads.h> must be first header. - #include <pthread.h> -#endif - -#include <stdio.h> -#include <stdlib.h> -#if MALLOC_CHECK_RECURSION -#include <new> /* for placement new */ -#endif - -#if __sun || __SUNPRO_CC -#define __asm__ asm -#endif - -extern "C" { - void * scalable_malloc(size_t size); - void scalable_free(void *object); - void mallocThreadShutdownNotification(void*); -} - - -/********* Various compile-time options **************/ - -#define MALLOC_TRACE 0 - -#if MALLOC_TRACE -#define TRACEF(x) printf x -#else -#define TRACEF(x) ((void)0) -#endif /* MALLOC_TRACE */ - -#define ASSERT_TEXT NULL - -#define COLLECT_STATISTICS MALLOC_DEBUG && defined(MALLOCENV_COLLECT_STATISTICS) -#include "Statistics.h" - -/********* End compile-time options **************/ - -namespace rml { -namespace internal { - -/********** Various numeric parameters controlling allocations ********/ - -/* - * blockSize - the size of a block, it must be larger than maxSegregatedObjectSize. - * - */ -const uintptr_t blockSize = 16*1024; - -/* - * Difference between object sizes in large block bins - */ -const uint32_t largeBlockCacheStep = 8*1024; - -/* - * Large blocks cache cleanup frequency. - * It should be power of 2 for the fast checking. - */ -const unsigned cacheCleanupFreq = 256; - -/* - * Alignment of large (>= minLargeObjectSize) objects. - */ -static int largeObjectAlignment = 64; // 64 is common cache line size - -/********** End of numeric parameters controlling allocations *********/ - -class BackRefIdx { // composite index to backreference array -private: - uint16_t master; // index in BackRefMaster - uint16_t largeObj:1; // is this object "large"? - uint16_t offset :15; // offset from beginning of BackRefBlock -public: - BackRefIdx() : master((uint16_t)-1) {} - bool isInvalid() const { return master == (uint16_t)-1; } - bool isLargeObject() const { return largeObj; } - uint16_t getMaster() const { return master; } - uint16_t getOffset() const { return offset; } - - // only newBackRef can modify BackRefIdx - static BackRefIdx newBackRef(bool largeObj); -}; - -struct LargeMemoryBlock { - LargeMemoryBlock *next, // ptrs in list of cached blocks - *prev; - uintptr_t age; // age of block while in cache - size_t objectSize; // the size requested by a client - size_t unalignedSize; // the size requested from getMemory - bool fromMapMemory; - BackRefIdx backRefIdx; // cached here, used copy is in LargeObjectHdr -}; - -struct LargeObjectHdr { - LargeMemoryBlock *memoryBlock; - /* Backreference points to LargeObjectHdr. - Duplicated in LargeMemoryBlock to reuse in subsequent allocations. */ - BackRefIdx backRefIdx; -}; - -struct FreeObject { - FreeObject *next; -}; - -// interface class for external access to Block -class BlockI { -public: - static BlockI *getRawBlock(bool startup); - void initialize(void *bumpPtr); -}; - -class FreeBlocks { - typedef void* (*RawAlloc) (size_t size, bool useMapMem); - typedef void (*RawFree) (void *object, size_t size, bool useMapMem); - - RawAlloc rawAlloc; - RawFree rawFree; - size_t memReqSize; - - bool mallocBigBlock(); -public: - bool bootstrap(RawAlloc myAlloc, RawFree myFree, size_t myReqSize); - BlockI *get(bool startup); - void put(BlockI *block, bool startup); - void putList(BlockI *head, BlockI *tail); -}; - -extern FreeBlocks freeBlocks; - -/******* A helper class to support overriding malloc with scalable_malloc *******/ -#if MALLOC_CHECK_RECURSION - -class RecursiveMallocCallProtector { - // pointer to an automatic data of holding thread - static void *autoObjPtr; - static MallocMutex rmc_mutex; - static pthread_t owner_thread; -/* Under FreeBSD 8.0 1st call to any pthread function including pthread_self - leads to pthread initialization, that causes malloc calls. As 1st usage of - RecursiveMallocCallProtector can be before pthread initialized, pthread calls - can't be used in 1st instance of RecursiveMallocCallProtector. - RecursiveMallocCallProtector is used 1st time in checkInitialization(), - so there is a guarantee that on 2nd usage pthread is initialized. - No such situation observed with other supported OSes. - */ -#if __FreeBSD__ - static bool canUsePthread; -#else - static const bool canUsePthread = true; -#endif -/* - The variable modified in checkInitialization, - so can be read without memory barriers. - */ - static bool mallocRecursionDetected; - - MallocMutex::scoped_lock* lock_acquired; - char scoped_lock_space[sizeof(MallocMutex::scoped_lock)+1]; - - static uintptr_t absDiffPtr(void *x, void *y) { - uintptr_t xi = (uintptr_t)x, yi = (uintptr_t)y; - return xi > yi ? xi - yi : yi - xi; - } -public: - - RecursiveMallocCallProtector() : lock_acquired(NULL) { - lock_acquired = new (scoped_lock_space) MallocMutex::scoped_lock( rmc_mutex ); - if (canUsePthread) - owner_thread = pthread_self(); - autoObjPtr = &scoped_lock_space; - } - ~RecursiveMallocCallProtector() { - if (lock_acquired) { - autoObjPtr = NULL; - lock_acquired->~scoped_lock(); - } - } - static bool sameThreadActive() { - if (!autoObjPtr) // fast path - return false; - // Some thread has an active recursive call protector; check if the current one. - // Exact pthread_self based test - if (canUsePthread) { - if (pthread_equal( owner_thread, pthread_self() )) { - mallocRecursionDetected = true; - return true; - } else - return false; - } - // inexact stack size based test - const uintptr_t threadStackSz = 2*1024*1024; - int dummy; - return absDiffPtr(autoObjPtr, &dummy)<threadStackSz; - } - static bool noRecursion(); -/* The function is called on 1st scalable_malloc call to check if malloc calls - scalable_malloc (nested call must set mallocRecursionDetected). */ - static void detectNaiveOverload() { - if (!malloc_proxy) { -#if __FreeBSD__ -/* If !canUsePthread, we can't call pthread_self() before, but now pthread - is already on, so can do it. False positives here lead to silent switching - from malloc to mmap for all large allocations with bad performance impact. */ - if (!canUsePthread) { - canUsePthread = true; - owner_thread = pthread_self(); - } -#endif - free(malloc(1)); - } - } -}; - -#else - -class RecursiveMallocCallProtector { -public: - RecursiveMallocCallProtector() {} - ~RecursiveMallocCallProtector() {} -}; - -#endif /* MALLOC_CHECK_RECURSION */ - -bool isMallocInitializedExt(); - -void* getRawMemory (size_t size, bool useMapMem); -void freeRawMemory (void *object, size_t size, bool useMapMem); - -extern const uint32_t minLargeObjectSize; -bool isLargeObject(void *object); -void* mallocLargeObject (size_t size, size_t alignment, bool startupAlloc = false); -void freeLargeObject (void *object); - -unsigned int getThreadId(); - -bool initBackRefMaster(); -void removeBackRef(BackRefIdx backRefIdx); -void setBackRef(BackRefIdx backRefIdx, void *newPtr); -void *getBackRef(BackRefIdx backRefIdx); - -} // namespace internal -} // namespace rml - -#endif // __TBB_tbbmalloc_internal_H diff --git a/deal.II/bundled/tbb30_104oss/src/tbbmalloc/win-gcc-tbbmalloc-export.def b/deal.II/bundled/tbb30_104oss/src/tbbmalloc/win-gcc-tbbmalloc-export.def deleted file mode 100644 index 7a7d072ec6..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbbmalloc/win-gcc-tbbmalloc-export.def +++ /dev/null @@ -1,45 +0,0 @@ -/* - Copyright 2005-2010 Intel Corporation. All Rights Reserved. - - This file is part of Threading Building Blocks. - - Threading Building Blocks is free software; you can redistribute it - and/or modify it under the terms of the GNU General Public License - version 2 as published by the Free Software Foundation. - - Threading Building Blocks is distributed in the hope that it will be - useful, but WITHOUT ANY WARRANTY; without even the implied warranty - of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with Threading Building Blocks; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - - As a special exception, you may use this file as part of a free software - library without restriction. Specifically, if other files instantiate - templates or use macros or inline functions from this file, or you compile - this file and link it with other files to produce an executable, this - file does not by itself cause the resulting executable to be covered by - the GNU General Public License. This exception does not however - invalidate any other reasons why the executable file might be covered by - the GNU General Public License. -*/ - -{ -global: -scalable_calloc; -scalable_free; -scalable_malloc; -scalable_realloc; -scalable_posix_memalign; -scalable_aligned_malloc; -scalable_aligned_realloc; -scalable_aligned_free; -safer_scalable_free; -safer_scalable_realloc; -scalable_msize; -safer_scalable_msize; -safer_scalable_aligned_realloc; -local:*; -}; diff --git a/deal.II/bundled/tbb30_104oss/src/tbbmalloc/win32-tbbmalloc-export.def b/deal.II/bundled/tbb30_104oss/src/tbbmalloc/win32-tbbmalloc-export.def deleted file mode 100644 index 798879e9ea..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbbmalloc/win32-tbbmalloc-export.def +++ /dev/null @@ -1,42 +0,0 @@ -; Copyright 2005-2010 Intel Corporation. All Rights Reserved. -; -; This file is part of Threading Building Blocks. -; -; Threading Building Blocks is free software; you can redistribute it -; and/or modify it under the terms of the GNU General Public License -; version 2 as published by the Free Software Foundation. -; -; Threading Building Blocks is distributed in the hope that it will be -; useful, but WITHOUT ANY WARRANTY; without even the implied warranty -; of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -; GNU General Public License for more details. -; -; You should have received a copy of the GNU General Public License -; along with Threading Building Blocks; if not, write to the Free Software -; Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -; -; As a special exception, you may use this file as part of a free software -; library without restriction. Specifically, if other files instantiate -; templates or use macros or inline functions from this file, or you compile -; this file and link it with other files to produce an executable, this -; file does not by itself cause the resulting executable to be covered by -; the GNU General Public License. This exception does not however -; invalidate any other reasons why the executable file might be covered by -; the GNU General Public License. - -EXPORTS - -; MemoryAllocator.cpp -scalable_calloc -scalable_free -scalable_malloc -scalable_realloc -scalable_posix_memalign -scalable_aligned_malloc -scalable_aligned_realloc -scalable_aligned_free -safer_scalable_free -safer_scalable_realloc -scalable_msize -safer_scalable_msize -safer_scalable_aligned_realloc diff --git a/deal.II/bundled/tbb30_104oss/src/tbbmalloc/win64-tbbmalloc-export.def b/deal.II/bundled/tbb30_104oss/src/tbbmalloc/win64-tbbmalloc-export.def deleted file mode 100644 index 798879e9ea..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbbmalloc/win64-tbbmalloc-export.def +++ /dev/null @@ -1,42 +0,0 @@ -; Copyright 2005-2010 Intel Corporation. All Rights Reserved. -; -; This file is part of Threading Building Blocks. -; -; Threading Building Blocks is free software; you can redistribute it -; and/or modify it under the terms of the GNU General Public License -; version 2 as published by the Free Software Foundation. -; -; Threading Building Blocks is distributed in the hope that it will be -; useful, but WITHOUT ANY WARRANTY; without even the implied warranty -; of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -; GNU General Public License for more details. -; -; You should have received a copy of the GNU General Public License -; along with Threading Building Blocks; if not, write to the Free Software -; Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -; -; As a special exception, you may use this file as part of a free software -; library without restriction. Specifically, if other files instantiate -; templates or use macros or inline functions from this file, or you compile -; this file and link it with other files to produce an executable, this -; file does not by itself cause the resulting executable to be covered by -; the GNU General Public License. This exception does not however -; invalidate any other reasons why the executable file might be covered by -; the GNU General Public License. - -EXPORTS - -; MemoryAllocator.cpp -scalable_calloc -scalable_free -scalable_malloc -scalable_realloc -scalable_posix_memalign -scalable_aligned_malloc -scalable_aligned_realloc -scalable_aligned_free -safer_scalable_free -safer_scalable_realloc -scalable_msize -safer_scalable_msize -safer_scalable_aligned_realloc diff --git a/deal.II/bundled/tbb30_104oss/src/tbbmalloc/xbox360-tbbmalloc-export.def b/deal.II/bundled/tbb30_104oss/src/tbbmalloc/xbox360-tbbmalloc-export.def deleted file mode 100644 index 86509ad355..0000000000 --- a/deal.II/bundled/tbb30_104oss/src/tbbmalloc/xbox360-tbbmalloc-export.def +++ /dev/null @@ -1,42 +0,0 @@ -; Copyright 2005-2010 Intel Corporation. All Rights Reserved. -; -; This file is part of Threading Building Blocks. -; -; Threading Building Blocks is free software; you can redistribute it -; and/or modify it under the terms of the GNU General Public License -; version 2 as published by the Free Software Foundation. -; -; Threading Building Blocks is distributed in the hope that it will be -; useful, but WITHOUT ANY WARRANTY; without even the implied warranty -; of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -; GNU General Public License for more details. -; -; You should have received a copy of the GNU General Public License -; along with Threading Building Blocks; if not, write to the Free Software -; Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -; -; As a special exception, you may use this file as part of a free software -; library without restriction. Specifically, if other files instantiate -; templates or use macros or inline functions from this file, or you compile -; this file and link it with other files to produce an executable, this -; file does not by itself cause the resulting executable to be covered by -; the GNU General Public License. This exception does not however -; invalidate any other reasons why the executable file might be covered by -; the GNU General Public License. - -EXPORTS - -; MemoryAllocator.cpp -scalable_calloc @1 -scalable_free @2 -scalable_malloc @3 -scalable_realloc @4 -scalable_posix_memalign @5 -scalable_aligned_malloc @6 -scalable_aligned_realloc @7 -scalable_aligned_free @8 -safer_scalable_free @9 -safer_scalable_realloc @10 -scalable_msize @11 -safer_scalable_msize @12 -safer_scalable_aligned_realloc @13 diff --git a/deal.II/bundled/tbb41_20130401oss/src/CMakeLists.txt b/deal.II/bundled/tbb41_20130401oss/src/CMakeLists.txt index 69f477862c..3d5aa530b7 100644 --- a/deal.II/bundled/tbb41_20130401oss/src/CMakeLists.txt +++ b/deal.II/bundled/tbb41_20130401oss/src/CMakeLists.txt @@ -12,6 +12,13 @@ ## ##### +# +# Remove -Wall and -pedantic from CMAKE_CXX_FLAGS (in directory scope) to +# avoid some annoying warnings... +# +STRIP_FLAG(CMAKE_CXX_FLAGS "-Wall") +STRIP_FLAG(CMAKE_CXX_FLAGS "-pedantic") + SET(CMAKE_INCLUDE_CURRENT_DIR TRUE) INCLUDE_DIRECTORIES( ${CMAKE_CURRENT_SOURCE_DIR}/rml/include @@ -20,7 +27,6 @@ INCLUDE_DIRECTORIES( IF(NOT EXISTS "${CMAKE_CURRENT_BINARY_DIR}/version_string.ver") FILE(WRITE "${CMAKE_CURRENT_BINARY_DIR}/version_string.ver" "#define __TBB_VERSION_STRINGS(N) \"Empty\"\n" - "#define TBB_VERSION_STRINGS(N) \"Empty\"\n" ) ENDIF() diff --git a/deal.II/bundled/tbb41_20130401oss/src/tbb/tools_api/ittnotify_config.h b/deal.II/bundled/tbb41_20130401oss/src/tbb/tools_api/ittnotify_config.h index 02077a8a7d..6829999618 100644 --- a/deal.II/bundled/tbb41_20130401oss/src/tbb/tools_api/ittnotify_config.h +++ b/deal.II/bundled/tbb41_20130401oss/src/tbb/tools_api/ittnotify_config.h @@ -264,7 +264,7 @@ INLINE int __TBB_machine_fetchadd4(volatile void* ptr, long addend) int result; __asm__ __volatile__("lock\nxaddl %0,%1" : "=r"(result),"=m"(*(long*)ptr) - : "0"(addend), "m"(*(long*)ptr) + : "0"((int)addend), "m"(*(long*)ptr) : "memory"); return result; } -- 2.39.5